1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 19:20:19 +01:00

Add is_on_policy property to agents (#480)

This commit is contained in:
Guy Jacob
2021-05-06 18:02:02 +03:00
committed by GitHub
parent 06bacd9de0
commit 9106b69227
21 changed files with 86 additions and 1 deletions

View File

@@ -111,8 +111,11 @@ class ACERAgent(PolicyOptimizationAgent):
self.V_Values = self.register_signal('Values')
self.kl_divergence = self.register_signal('KL Divergence')
def _learn_from_batch(self, batch):
@property
def is_on_policy(self) -> bool:
return False
def _learn_from_batch(self, batch):
fetches = [self.networks['main'].online_network.output_heads[1].probability_loss,
self.networks['main'].online_network.output_heads[1].bias_correction_loss,
self.networks['main'].online_network.output_heads[1].kl_divergence]

View File

@@ -100,6 +100,10 @@ class ActorCriticAgent(PolicyOptimizationAgent):
self.value_loss = self.register_signal('Value Loss')
self.policy_loss = self.register_signal('Policy Loss')
@property
def is_on_policy(self) -> bool:
return True
# Discounting function used to calculate discounted returns.
def discount(self, x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]

View File

@@ -152,3 +152,8 @@ class AgentInterface(object):
:return: None
"""
raise NotImplementedError("")
@property
def is_on_policy(self) -> bool:
raise NotImplementedError("")

View File

@@ -63,6 +63,10 @@ class BCAgent(ImitationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()

View File

@@ -46,6 +46,10 @@ class BootstrappedDQNAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
@property
def is_on_policy(self) -> bool:
return False
def reset_internal_state(self):
super().reset_internal_state()
self.exploration_policy.select_head()

View File

@@ -77,6 +77,10 @@ class CategoricalDQNAgent(ValueOptimizationAgent):
super().__init__(agent_parameters, parent)
self.z_values = np.linspace(self.ap.algorithm.v_min, self.ap.algorithm.v_max, self.ap.algorithm.atoms)
@property
def is_on_policy(self) -> bool:
return False
def distribution_prediction_to_q_values(self, prediction):
return np.dot(prediction, self.z_values)

View File

@@ -144,6 +144,10 @@ class ClippedPPOAgent(ActorCriticAgent):
self.likelihood_ratio = self.register_signal('Likelihood Ratio')
self.clipped_likelihood_ratio = self.register_signal('Clipped Likelihood Ratio')
@property
def is_on_policy(self) -> bool:
return True
def set_session(self, sess):
super().set_session(sess)
if self.ap.algorithm.normalization_stats is not None:

View File

@@ -130,6 +130,10 @@ class DDPGAgent(ActorCriticAgent):
self.TD_targets_signal = self.register_signal("TD targets")
self.action_signal = self.register_signal("actions")
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
actor = self.networks['actor']
critic = self.networks['critic']

View File

@@ -141,6 +141,12 @@ class DFPAgent(Agent):
self.current_goal = self.ap.algorithm.goal_vector
self.target_measurements_scale_factors = None
@property
def is_on_policy(self) -> bool:
# This is only somewhat correct as the algorithm uses a very small (20k) ER keeping only recent samples seen.
# So, it is approximately on-policy (although if too be completely strict it is off-policy)
return True
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()

View File

@@ -71,6 +71,10 @@ class DQNAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
@property
def is_on_policy(self) -> bool:
return False
def select_actions(self, next_states, q_st_plus_1):
return np.argmax(q_st_plus_1, 1)

View File

@@ -31,6 +31,10 @@ class ImitationAgent(Agent):
super().__init__(agent_parameters, parent)
self.imitation = True
@property
def is_on_policy(self) -> bool:
return False
def extract_action_values(self, prediction):
return prediction.squeeze()

View File

@@ -50,6 +50,10 @@ class MixedMonteCarloAgent(ValueOptimizationAgent):
super().__init__(agent_parameters, parent)
self.mixing_rate = agent_parameters.algorithm.monte_carlo_mixing_rate
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()

View File

@@ -92,6 +92,10 @@ class NStepQAgent(ValueOptimizationAgent, PolicyOptimizationAgent):
self.q_values = self.register_signal('Q Values')
self.value_loss = self.register_signal('Value Loss')
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()

View File

@@ -73,6 +73,10 @@ class NAFAgent(ValueOptimizationAgent):
self.v_values = self.register_signal("V")
self.TD_targets = self.register_signal("TD targets")
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()

View File

@@ -120,6 +120,9 @@ class NECAgent(ValueOptimizationAgent):
Episode(discount=self.ap.algorithm.discount,
n_step=self.ap.algorithm.n_step,
bootstrap_total_return_from_old_policy=self.ap.algorithm.bootstrap_total_return_from_old_policy)
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
if not self.networks['main'].online_network.output_heads[0].DND.has_enough_entries(self.ap.algorithm.number_of_knn):

View File

@@ -63,6 +63,10 @@ class PALAgent(ValueOptimizationAgent):
self.persistent = agent_parameters.algorithm.persistent_advantage_learning
self.monte_carlo_mixing_rate = agent_parameters.algorithm.monte_carlo_mixing_rate
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()

View File

@@ -91,6 +91,10 @@ class PolicyGradientsAgent(PolicyOptimizationAgent):
self.returns_variance = self.register_signal('Returns Variance')
self.last_gradient_update_step_idx = 0
@property
def is_on_policy(self) -> bool:
return True
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()

View File

@@ -149,6 +149,10 @@ class PPOAgent(ActorCriticAgent):
self.total_kl_divergence_during_training_process = 0.0
self.unclipped_grads = self.register_signal('Grads (unclipped)')
@property
def is_on_policy(self) -> bool:
return True
def fill_advantages(self, batch):
batch = Batch(batch)
network_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()

View File

@@ -67,6 +67,10 @@ class QuantileRegressionDQNAgent(ValueOptimizationAgent):
super().__init__(agent_parameters, parent)
self.quantile_probabilities = np.ones(self.ap.algorithm.atoms) / float(self.ap.algorithm.atoms)
@property
def is_on_policy(self) -> bool:
return False
def get_q_values(self, quantile_values):
return np.dot(quantile_values, self.quantile_probabilities)

View File

@@ -161,6 +161,10 @@ class SoftActorCriticAgent(PolicyOptimizationAgent):
self.v_onl_ys = self.register_signal('V_onl_ys')
self.action_signal = self.register_signal("actions")
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
#########################################
# need to update the following networks:

View File

@@ -141,6 +141,10 @@ class TD3Agent(DDPGAgent):
self.TD_targets_signal = self.register_signal("TD targets")
self.action_signal = self.register_signal("actions")
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
actor = self.networks['actor']
critic = self.networks['critic']