From 39cf78074c75fdd5605fecb0c12b78a87b740ca3 Mon Sep 17 00:00:00 2001 From: Itai Caspi Date: Wed, 25 Oct 2017 10:28:45 +0300 Subject: [PATCH] preventing the evaluation agent from getting stuck in bad policies by updating from the global network during episodes --- agents/agent.py | 5 +++++ configurations.py | 1 + 2 files changed, 6 insertions(+) diff --git a/agents/agent.py b/agents/agent.py index 0279efd..6a1038e 100644 --- a/agents/agent.py +++ b/agents/agent.py @@ -457,6 +457,11 @@ class Agent: while not episode_ended: episode_ended = self.act(phase=RunPhase.TEST) + if keep_networks_synced \ + and self.total_steps_counter % self.tp.agent.update_evaluation_agent_network_after_every_num_steps: + for network in self.networks: + network.sync() + if self.tp.visualization.dump_gifs and self.total_reward_in_current_episode > max_reward_achieved: max_reward_achieved = self.total_reward_in_current_episode frame_skipping = int(5/self.tp.env.frame_skip) diff --git a/configurations.py b/configurations.py index aff718a..31442f4 100644 --- a/configurations.py +++ b/configurations.py @@ -72,6 +72,7 @@ class AgentParameters: # Agent parameters num_consecutive_playing_steps = 1 num_consecutive_training_steps = 1 + update_evaluation_agent_network_after_every_num_steps = 3000 bootstrap_total_return_from_old_policy = False n_step = -1 num_episodes_in_experience_replay = 200