1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-18 03:30:19 +01:00

Multiple improvements and bug fixes (#66)

* Multiple improvements and bug fixes:

    * Using lazy stacking to save on memory when using a replay buffer
    * Remove step counting for evaluation episodes
    * Reset game between heatup and training
    * Major bug fixes in NEC (is reproducing the paper results for pong now)
    * Image input rescaling to 0-1 is now optional
    * Change the terminal title to be the experiment name
    * Observation cropping for atari is now optional
    * Added random number of noop actions for gym to match the dqn paper
    * Fixed a bug where the evaluation episodes won't start with the max possible ale lives
    * Added a script for plotting the results of an experiment over all the atari games
This commit is contained in:
Itai Caspi
2018-02-26 12:29:07 +02:00
committed by GitHub
parent 4fe9cba445
commit a7206ed702
20 changed files with 465 additions and 158 deletions

View File

@@ -24,6 +24,8 @@ except:
import copy
from renderer import Renderer
from configurations import Preset
from collections import deque
from utils import LazyStack
from collections import OrderedDict
from utils import RunPhase, Signal, is_empty, RunningStat
from architectures import *
@@ -214,6 +216,8 @@ class Agent(object):
network.online_network.curr_rnn_c_in = network.online_network.middleware_embedder.c_init
network.online_network.curr_rnn_h_in = network.online_network.middleware_embedder.h_init
self.prepare_initial_state()
def preprocess_observation(self, observation):
"""
Preprocesses the given observation.
@@ -291,9 +295,8 @@ class Agent(object):
"""
current_states = {}
next_states = {}
current_states['observation'] = np.array([transition.state['observation'] for transition in batch])
next_states['observation'] = np.array([transition.next_state['observation'] for transition in batch])
current_states['observation'] = np.array([np.array(transition.state['observation']) for transition in batch])
next_states['observation'] = np.array([np.array(transition.next_state['observation']) for transition in batch])
actions = np.array([transition.action for transition in batch])
rewards = np.array([transition.reward for transition in batch])
game_overs = np.array([transition.game_over for transition in batch])
@@ -348,6 +351,23 @@ class Agent(object):
for input_name in self.tp.agent.input_types.keys():
input_state[input_name] = np.expand_dims(np.array(curr_state[input_name]), 0)
return input_state
def prepare_initial_state(self):
"""
Create an initial state when starting a new episode
:return: None
"""
observation = self.preprocess_observation(self.env.state['observation'])
self.curr_stack = deque([observation]*self.tp.env.observation_stack_size, maxlen=self.tp.env.observation_stack_size)
observation = LazyStack(self.curr_stack, -1)
self.curr_state = {
'observation': observation
}
if self.tp.agent.use_measurements:
self.curr_state['measurements'] = self.env.measurements
if self.tp.agent.use_accumulated_reward_as_measurement:
self.curr_state['measurements'] = np.append(self.curr_state['measurements'], 0)
def act(self, phase=RunPhase.TRAIN):
"""
@@ -356,34 +376,12 @@ class Agent(object):
:return: A boolean value that signals an episode termination
"""
self.total_steps_counter += 1
if phase != RunPhase.TEST:
self.total_steps_counter += 1
self.current_episode_steps_counter += 1
# get new action
action_info = {"action_probability": 1.0 / self.env.action_space_size, "action_value": 0}
is_first_transition_in_episode = (self.curr_state == {})
if is_first_transition_in_episode:
if not isinstance(self.env.state, dict):
raise ValueError((
'expected state to be a dictionary, found {}'
).format(type(self.env.state)))
state = self.env.state
# TODO: modify preprocess_observation to modify the entire state
# for now, only preprocess the observation
state['observation'] = self.preprocess_observation(state['observation'])
# TODO: provide option to stack more than just the observation
# TODO: this should probably be happening in an environment wrapper anyway
state['observation'] = stack_observation([], state['observation'], self.tp.env.observation_stack_size)
self.curr_state = state
if self.tp.agent.use_measurements:
# TODO: this should be handled in the environment
self.curr_state['measurements'] = self.env.measurements
if self.tp.agent.use_accumulated_reward_as_measurement:
self.curr_state['measurements'] = np.append(self.curr_state['measurements'], 0)
action_info = {"action_probability": 1.0 / self.env.action_space_size, "action_value": 0, "max_action_value": 0}
if phase == RunPhase.HEATUP and not self.tp.heatup_using_network_decisions:
action = self.env.get_random_action()
@@ -409,8 +407,10 @@ class Agent(object):
# initialize the next state
# TODO: provide option to stack more than just the observation
next_state['observation'] = stack_observation(self.curr_state['observation'], next_state['observation'], self.tp.env.observation_stack_size)
self.curr_stack.append(next_state['observation'])
observation = LazyStack(self.curr_stack, -1)
next_state['observation'] = observation
if self.tp.agent.use_measurements and 'measurements' in result.keys():
next_state['measurements'] = result['state']['measurements']
if self.tp.agent.use_accumulated_reward_as_measurement:
@@ -516,6 +516,7 @@ class Agent(object):
self.exploration_policy.change_phase(RunPhase.TRAIN)
training_start_time = time.time()
model_snapshots_periods_passed = -1
self.reset_game()
while self.training_iteration < self.tp.num_training_iterations:
# evaluate
@@ -526,7 +527,7 @@ class Agent(object):
self.training_iteration % self.tp.evaluate_every_x_training_iterations == 0)
if evaluate_agent:
self.env.reset()
self.env.reset(force_environment_reset=True)
self.last_episode_evaluation_ran = self.current_episode
self.evaluate(self.tp.evaluation_episodes)

View File

@@ -27,10 +27,7 @@ class NECAgent(ValueOptimizationAgent):
ValueOptimizationAgent.__init__(self, env, tuning_parameters, replicated_device, thread_id,
create_target_network=False)
self.current_episode_state_embeddings = []
self.current_episode_actions = []
self.training_started = False
# if self.tp.checkpoint_restore_dir:
# self.load_dnd(self.tp.checkpoint_restore_dir)
def learn_from_batch(self, batch):
if not self.main_network.online_network.output_heads[0].DND.has_enough_entries(self.tp.agent.number_of_knn):
@@ -41,83 +38,57 @@ class NECAgent(ValueOptimizationAgent):
screen.log_title("Finished collecting initial entries in DND. Starting to train network...")
current_states, next_states, actions, rewards, game_overs, total_return = self.extract_batch(batch)
result = self.main_network.train_and_sync_networks(current_states, total_return)
TD_targets = self.main_network.online_network.predict(current_states)
# only update the action that we have actually done in this transition
for i in range(self.tp.batch_size):
TD_targets[i, actions[i]] = total_return[i]
# train the neural network
result = self.main_network.train_and_sync_networks(current_states, TD_targets)
total_loss = result[0]
return total_loss
def choose_action(self, curr_state, phase=RunPhase.TRAIN):
"""
this method modifies the superclass's behavior in only 3 ways:
def act(self, phase=RunPhase.TRAIN):
if self.in_heatup:
# get embedding in heatup (otherwise we get it through choose_action)
embedding = self.main_network.online_network.predict(
self.tf_input_state(self.curr_state),
outputs=self.main_network.online_network.state_embedding)
self.current_episode_state_embeddings.append(embedding)
1) the embedding is saved and stored in self.current_episode_state_embeddings
2) the dnd output head is only called if it has a minimum number of entries in it
ideally, the dnd had would do this on its own, but in my attempt in encoding this
behavior in tensorflow, I ran into problems. Would definitely be worth
revisiting in the future
3) during training, actions are saved and stored in self.current_episode_actions
if behaviors 1 and 2 were handled elsewhere, this could easily be implemented
as a wrapper around super instead of overriding this method entirelysearch
"""
return super().act(phase)
# get embedding
embedding = self.main_network.online_network.predict(
def get_prediction(self, curr_state):
# get the actions q values and the state embedding
embedding, actions_q_values = self.main_network.online_network.predict(
self.tf_input_state(curr_state),
outputs=self.main_network.online_network.state_embedding)
self.current_episode_state_embeddings.append(embedding)
outputs=[self.main_network.online_network.state_embedding,
self.main_network.online_network.output_heads[0].output]
)
# TODO: support additional heads. Right now all other heads are ignored
if self.main_network.online_network.output_heads[0].DND.has_enough_entries(self.tp.agent.number_of_knn):
# if there are enough entries in the DND then we can query it to get the action values
# actions_q_values = []
feed_dict = {
self.main_network.online_network.state_embedding: [embedding],
}
actions_q_values = self.main_network.sess.run(
self.main_network.online_network.output_heads[0].output, feed_dict=feed_dict)
else:
# get only the embedding so we can insert it to the DND
actions_q_values = [0] * self.action_space_size
# choose action according to the exploration policy and the current phase (evaluating or training the agent)
if phase == RunPhase.TRAIN:
action = self.exploration_policy.get_action(actions_q_values)
# NOTE: this next line is not in the parent implementation
# NOTE: it could be implemented as a wrapper around the parent since action is returned
self.current_episode_actions.append(action)
else:
action = np.argmax(actions_q_values)
# store the q values statistics for logging
self.q_values.add_sample(actions_q_values)
# store information for plotting interactively (actual plotting is done in agent)
if self.tp.visualization.plot_action_values_online:
for idx, action_name in enumerate(self.env.actions_description):
self.episode_running_info[action_name].append(actions_q_values[idx])
action_value = {"action_value": actions_q_values[action]}
return action, action_value
# store the state embedding for inserting it to the DND later
self.current_episode_state_embeddings.append(embedding.squeeze())
actions_q_values = actions_q_values[0][0]
return actions_q_values
def reset_game(self, do_not_reset_env=False):
ValueOptimizationAgent.reset_game(self, do_not_reset_env)
super().reset_game(do_not_reset_env)
# make sure we already have at least one episode
if self.memory.num_complete_episodes() >= 1 and not self.in_heatup:
# get the last full episode that we have collected
episode = self.memory.get(-2)
returns = []
for i in range(episode.length()):
returns.append(episode.get_transition(i).total_return)
# Just to deal with the end of heatup where there might be a case where it ends in a middle
# of an episode, and thus when getting the episode out of the ER, it will be a complete one whereas
# the other statistics collected here, are collected only during training.
returns = returns[-len(self.current_episode_actions):]
# get the last full episode that we have collected
episode = self.memory.get_last_complete_episode()
if episode is not None:
# the indexing is only necessary because the heatup can end in the middle of an episode
# this won't be required after fixing this so that when the heatup is ended, the episode is closed
returns = episode.get_transitions_attribute('total_return')[:len(self.current_episode_state_embeddings)]
actions = episode.get_transitions_attribute('action')[:len(self.current_episode_state_embeddings)]
self.main_network.online_network.output_heads[0].DND.add(self.current_episode_state_embeddings,
self.current_episode_actions, returns)
actions, returns)
self.current_episode_state_embeddings = []
self.current_episode_actions = []
def save_model(self, model_id):
self.main_network.save_model(model_id)

View File

@@ -73,5 +73,5 @@ class ValueOptimizationAgent(Agent):
for idx, action_name in enumerate(self.env.actions_description):
self.episode_running_info[action_name].append(actions_q_values[idx])
action_value = {"action_value": actions_q_values[action]}
action_value = {"action_value": actions_q_values[action], "max_action_value": np.max(actions_q_values)}
return action, action_value