diff --git a/agents/agent.py b/agents/agent.py index 0279efd..6a1038e 100644 --- a/agents/agent.py +++ b/agents/agent.py @@ -457,6 +457,11 @@ class Agent: while not episode_ended: episode_ended = self.act(phase=RunPhase.TEST) + if keep_networks_synced \ + and self.total_steps_counter % self.tp.agent.update_evaluation_agent_network_after_every_num_steps: + for network in self.networks: + network.sync() + if self.tp.visualization.dump_gifs and self.total_reward_in_current_episode > max_reward_achieved: max_reward_achieved = self.total_reward_in_current_episode frame_skipping = int(5/self.tp.env.frame_skip) diff --git a/configurations.py b/configurations.py index aff718a..31442f4 100644 --- a/configurations.py +++ b/configurations.py @@ -72,6 +72,7 @@ class AgentParameters: # Agent parameters num_consecutive_playing_steps = 1 num_consecutive_training_steps = 1 + update_evaluation_agent_network_after_every_num_steps = 3000 bootstrap_total_return_from_old_policy = False n_step = -1 num_episodes_in_experience_replay = 200