# hub.solver.meta_policy_scheduling.meta_policies
Domain specification
# MetaPolicy
Utility policy function that represents a meta policy : At a given state, it launches a rollout for each policy to evaluate each of them. Then the policy for the given state is obtained with the policy that is giving the lowest estimated cost.
# Constructor MetaPolicy
MetaPolicy(
policies: dict[Any, DeterministicPolicies],
domain: SchedulingDomain,
nb_rollout_estimation = 1,
verbose = True
)
# Parameters
- policies: dictionaries of different policies to evaluate
- domain: domain on which to evaluate the policies
- nb_rollout_estimation: relevant if the domain is stochastic,
- run nb_rollout_estimation time(s) the rollout to estimate the expected cost of the policy.
# get_next_action DeterministicPolicies
get_next_action(
self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]
Get the next deterministic action (from the solver's current policy).
# Parameters
- observation: The observation for which next action is requested.
# Returns
The next deterministic action.
# get_next_action_distribution UncertainPolicies
get_next_action_distribution(
self,
observation: StrDict[D.T_observation]
) -> Distribution[StrDict[list[D.T_event]]]
Get the probabilistic distribution of next action for the given observation (from the solver's current policy).
# Parameters
- observation: The observation to consider.
# Returns
The probabilistic distribution of next action.
# is_policy_defined_for Policies
is_policy_defined_for(
self,
observation: StrDict[D.T_observation]
) -> bool
Check whether the solver's current policy is defined for the given observation.
# Parameters
- observation: The observation to consider.
# Returns
True if the policy is defined for the given observation memory (False otherwise).
# sample_action Policies
sample_action(
self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]
Sample an action for the given observation (from the solver's current policy).
# Parameters
- observation: The observation for which an action must be sampled.
# Returns
The sampled action.
# _get_next_action DeterministicPolicies
_get_next_action(
self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]
Get the next deterministic action (from the solver's current policy).
# Parameters
- observation: The observation for which next action is requested.
# Returns
The next deterministic action.
# _get_next_action_distribution UncertainPolicies
_get_next_action_distribution(
self,
observation: StrDict[D.T_observation]
) -> Distribution[StrDict[list[D.T_event]]]
Get the probabilistic distribution of next action for the given observation (from the solver's current policy).
# Parameters
- observation: The observation to consider.
# Returns
The probabilistic distribution of next action.
# _is_policy_defined_for Policies
_is_policy_defined_for(
self,
observation: StrDict[D.T_observation]
) -> bool
Check whether the solver's current policy is defined for the given observation.
# Parameters
- observation: The observation to consider.
# Returns
True if the policy is defined for the given observation memory (False otherwise).
# _sample_action Policies
_sample_action(
self,
observation: StrDict[D.T_observation]
) -> StrDict[list[D.T_event]]
Sample an action for the given observation (from the solver's current policy).
# Parameters
- observation: The observation for which an action must be sampled.
# Returns
The sampled action.