1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 11:10:20 +01:00

preventing the evaluation agent from getting stuck in bad policies by updating from the global network during episodes

This commit is contained in:
Itai Caspi
2017-10-25 10:28:45 +03:00
parent d3c6860421
commit 39cf78074c
2 changed files with 6 additions and 0 deletions

View File

@@ -457,6 +457,11 @@ class Agent:
while not episode_ended:
episode_ended = self.act(phase=RunPhase.TEST)
if keep_networks_synced \
and self.total_steps_counter % self.tp.agent.update_evaluation_agent_network_after_every_num_steps:
for network in self.networks:
network.sync()
if self.tp.visualization.dump_gifs and self.total_reward_in_current_episode > max_reward_achieved:
max_reward_achieved = self.total_reward_in_current_episode
frame_skipping = int(5/self.tp.env.frame_skip)

View File

@@ -72,6 +72,7 @@ class AgentParameters:
# Agent parameters
num_consecutive_playing_steps = 1
num_consecutive_training_steps = 1
update_evaluation_agent_network_after_every_num_steps = 3000
bootstrap_total_return_from_old_policy = False
n_step = -1
num_episodes_in_experience_replay = 200