# builders.solver.policy

Domain specification

Domain

# Policies

A solver must inherit this class if it computes a stochastic policy as part of the solving process.

# is_policy_defined_for Policies

is_policy_defined_for(
  self,
observation: StrDict[D.T_observation]
) -> bool

Check whether the solver's current policy is defined for the given observation.

# Parameters

  • observation: The observation to consider.

# Returns

True if the policy is defined for the given observation memory (False otherwise).

# sample_action Policies

sample_action(
  self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]

Sample an action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation for which an action must be sampled.

# Returns

The sampled action.

# _is_policy_defined_for Policies

_is_policy_defined_for(
  self,
observation: StrDict[D.T_observation]
) -> bool

Check whether the solver's current policy is defined for the given observation.

# Parameters

  • observation: The observation to consider.

# Returns

True if the policy is defined for the given observation memory (False otherwise).

# _sample_action Policies

_sample_action(
  self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]

Sample an action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation for which an action must be sampled.

# Returns

The sampled action.

# UncertainPolicies

A solver must inherit this class if it computes a stochastic policy (providing next action distribution explicitly) as part of the solving process.

# get_next_action_distribution UncertainPolicies

get_next_action_distribution(
  self,
observation: StrDict[D.T_observation]
) -> Distribution[StrDict[list[D.T_event]]]

Get the probabilistic distribution of next action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation to consider.

# Returns

The probabilistic distribution of next action.

# is_policy_defined_for Policies

is_policy_defined_for(
  self,
observation: StrDict[D.T_observation]
) -> bool

Check whether the solver's current policy is defined for the given observation.

# Parameters

  • observation: The observation to consider.

# Returns

True if the policy is defined for the given observation memory (False otherwise).

# sample_action Policies

sample_action(
  self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]

Sample an action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation for which an action must be sampled.

# Returns

The sampled action.

# _get_next_action_distribution UncertainPolicies

_get_next_action_distribution(
  self,
observation: StrDict[D.T_observation]
) -> Distribution[StrDict[list[D.T_event]]]

Get the probabilistic distribution of next action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation to consider.

# Returns

The probabilistic distribution of next action.

# _is_policy_defined_for Policies

_is_policy_defined_for(
  self,
observation: StrDict[D.T_observation]
) -> bool

Check whether the solver's current policy is defined for the given observation.

# Parameters

  • observation: The observation to consider.

# Returns

True if the policy is defined for the given observation memory (False otherwise).

# _sample_action Policies

_sample_action(
  self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]

Sample an action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation for which an action must be sampled.

# Returns

The sampled action.

# DeterministicPolicies

A solver must inherit this class if it computes a deterministic policy as part of the solving process.

# get_next_action DeterministicPolicies

get_next_action(
  self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]

Get the next deterministic action (from the solver's current policy).

# Parameters

  • observation: The observation for which next action is requested.

# Returns

The next deterministic action.

# get_next_action_distribution UncertainPolicies

get_next_action_distribution(
  self,
observation: StrDict[D.T_observation]
) -> Distribution[StrDict[list[D.T_event]]]

Get the probabilistic distribution of next action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation to consider.

# Returns

The probabilistic distribution of next action.

# is_policy_defined_for Policies

is_policy_defined_for(
  self,
observation: StrDict[D.T_observation]
) -> bool

Check whether the solver's current policy is defined for the given observation.

# Parameters

  • observation: The observation to consider.

# Returns

True if the policy is defined for the given observation memory (False otherwise).

# sample_action Policies

sample_action(
  self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]

Sample an action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation for which an action must be sampled.

# Returns

The sampled action.

# _get_next_action DeterministicPolicies

_get_next_action(
  self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]

Get the next deterministic action (from the solver's current policy).

# Parameters

  • observation: The observation for which next action is requested.

# Returns

The next deterministic action.

# _get_next_action_distribution UncertainPolicies

_get_next_action_distribution(
  self,
observation: StrDict[D.T_observation]
) -> Distribution[StrDict[list[D.T_event]]]

Get the probabilistic distribution of next action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation to consider.

# Returns

The probabilistic distribution of next action.

# _is_policy_defined_for Policies

_is_policy_defined_for(
  self,
observation: StrDict[D.T_observation]
) -> bool

Check whether the solver's current policy is defined for the given observation.

# Parameters

  • observation: The observation to consider.

# Returns

True if the policy is defined for the given observation memory (False otherwise).

# _sample_action Policies

_sample_action(
  self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]

Sample an action for the given observation (from the solver's current policy).

# Parameters

  • observation: The observation for which an action must be sampled.

# Returns

The sampled action.