mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 11:10:20 +01:00
preventing the evaluation agent from getting stuck in bad policies by updating from the global network during episodes
This commit is contained in:
@@ -457,6 +457,11 @@ class Agent:
|
|||||||
while not episode_ended:
|
while not episode_ended:
|
||||||
episode_ended = self.act(phase=RunPhase.TEST)
|
episode_ended = self.act(phase=RunPhase.TEST)
|
||||||
|
|
||||||
|
if keep_networks_synced \
|
||||||
|
and self.total_steps_counter % self.tp.agent.update_evaluation_agent_network_after_every_num_steps:
|
||||||
|
for network in self.networks:
|
||||||
|
network.sync()
|
||||||
|
|
||||||
if self.tp.visualization.dump_gifs and self.total_reward_in_current_episode > max_reward_achieved:
|
if self.tp.visualization.dump_gifs and self.total_reward_in_current_episode > max_reward_achieved:
|
||||||
max_reward_achieved = self.total_reward_in_current_episode
|
max_reward_achieved = self.total_reward_in_current_episode
|
||||||
frame_skipping = int(5/self.tp.env.frame_skip)
|
frame_skipping = int(5/self.tp.env.frame_skip)
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ class AgentParameters:
|
|||||||
# Agent parameters
|
# Agent parameters
|
||||||
num_consecutive_playing_steps = 1
|
num_consecutive_playing_steps = 1
|
||||||
num_consecutive_training_steps = 1
|
num_consecutive_training_steps = 1
|
||||||
|
update_evaluation_agent_network_after_every_num_steps = 3000
|
||||||
bootstrap_total_return_from_old_policy = False
|
bootstrap_total_return_from_old_policy = False
|
||||||
n_step = -1
|
n_step = -1
|
||||||
num_episodes_in_experience_replay = 200
|
num_episodes_in_experience_replay = 200
|
||||||
|
|||||||
Reference in New Issue
Block a user