1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-18 03:30:19 +01:00

temp commit

This commit is contained in:
Zach Dwiel
2018-02-16 09:35:58 -05:00
parent 16c5032735
commit 85afb86893
14 changed files with 244 additions and 127 deletions

View File

@@ -20,6 +20,17 @@ from utils import *
import scipy.signal
def last_sample(state):
"""
given a batch of states, return the last sample of the batch with length 1
batch axis.
"""
return {
k: np.expand_dims(v[-1], 0)
for k, v in state.items()
}
# Actor Critic - https://arxiv.org/abs/1602.01783
class ActorCriticAgent(PolicyOptimizationAgent):
def __init__(self, env, tuning_parameters, replicated_device=None, thread_id=0, create_target_network = False):
@@ -76,7 +87,7 @@ class ActorCriticAgent(PolicyOptimizationAgent):
if game_overs[-1]:
R = 0
else:
R = self.main_network.online_network.predict(np.expand_dims(next_states[-1], 0))[0]
R = self.main_network.online_network.predict(last_sample(next_states))[0]
for i in reversed(range(num_transitions)):
R = rewards[i] + self.tp.agent.discount * R
@@ -85,7 +96,7 @@ class ActorCriticAgent(PolicyOptimizationAgent):
elif self.policy_gradient_rescaler == PolicyGradientRescaler.GAE:
# get bootstraps
bootstrapped_value = self.main_network.online_network.predict(np.expand_dims(next_states[-1], 0))[0]
bootstrapped_value = self.main_network.online_network.predict(last_sample(next_states))[0]
values = np.append(current_state_values, bootstrapped_value)
if game_overs[-1]:
values[-1] = 0
@@ -101,7 +112,9 @@ class ActorCriticAgent(PolicyOptimizationAgent):
actions = np.expand_dims(actions, -1)
# train
result = self.main_network.online_network.accumulate_gradients([current_states, actions],
inputs = copy.copy(current_states)
inputs['output_1_0'] = actions
result = self.main_network.online_network.accumulate_gradients(inputs,
[state_value_head_targets, action_advantages])
# logging
@@ -114,11 +127,17 @@ class ActorCriticAgent(PolicyOptimizationAgent):
return total_loss
def choose_action(self, curr_state, phase=RunPhase.TRAIN):
# TODO: rename curr_state -> state
# convert to batch so we can run it through the network
observation = np.expand_dims(np.array(curr_state['observation']), 0)
curr_state = {
k: np.expand_dims(np.array(curr_state[k]), 0)
for k in curr_state.keys()
}
if self.env.discrete_controls:
# DISCRETE
state_value, action_probabilities = self.main_network.online_network.predict(observation)
state_value, action_probabilities = self.main_network.online_network.predict(curr_state)
action_probabilities = action_probabilities.squeeze()
if phase == RunPhase.TRAIN:
action = self.exploration_policy.get_action(action_probabilities)
@@ -128,7 +147,7 @@ class ActorCriticAgent(PolicyOptimizationAgent):
self.entropy.add_sample(-np.sum(action_probabilities * np.log(action_probabilities + eps)))
else:
# CONTINUOUS
state_value, action_values_mean, action_values_std = self.main_network.online_network.predict(observation)
state_value, action_values_mean, action_values_std = self.main_network.online_network.predict(curr_state)
action_values_mean = action_values_mean.squeeze()
action_values_std = action_values_std.squeeze()
if phase == RunPhase.TRAIN:

View File

@@ -93,7 +93,7 @@ class Agent(object):
self.running_reward = None
self.training_iteration = 0
self.current_episode = self.tp.current_episode = 0
self.curr_state = []
self.curr_state = {}
self.current_episode_steps_counter = 0
self.episode_running_info = {}
self.last_episode_evaluation_ran = 0
@@ -194,7 +194,7 @@ class Agent(object):
for signal in self.signals:
signal.reset()
self.total_reward_in_current_episode = 0
self.curr_state = []
self.curr_state = {}
self.last_episode_images = []
self.current_episode_steps_counter = 0
self.episode_running_info = {}
@@ -289,23 +289,20 @@ class Agent(object):
:param batch: An array of transitions
:return: For each transition element, returns a numpy array of all the transitions in the batch
"""
current_states = {}
next_states = {}
current_observations = np.array([transition.state['observation'] for transition in batch])
next_observations = np.array([transition.next_state['observation'] for transition in batch])
current_states['observation'] = np.array([transition.state['observation'] for transition in batch])
next_states['observation'] = np.array([transition.next_state['observation'] for transition in batch])
actions = np.array([transition.action for transition in batch])
rewards = np.array([transition.reward for transition in batch])
game_overs = np.array([transition.game_over for transition in batch])
total_return = np.array([transition.total_return for transition in batch])
current_states = current_observations
next_states = next_observations
# get the entire state including measurements if available
if self.tp.agent.use_measurements:
current_measurements = np.array([transition.state['measurements'] for transition in batch])
next_measurements = np.array([transition.next_state['measurements'] for transition in batch])
current_states = [current_observations, current_measurements]
next_states = [next_observations, next_measurements]
current_states['measurements'] = np.array([transition.state['measurements'] for transition in batch])
next_states['measurements'] = np.array([transition.next_state['measurements'] for transition in batch])
return current_states, next_states, actions, rewards, game_overs, total_return
@@ -353,12 +350,24 @@ class Agent(object):
# get new action
action_info = {"action_probability": 1.0 / self.env.action_space_size, "action_value": 0}
is_first_transition_in_episode = (self.curr_state == [])
is_first_transition_in_episode = (self.curr_state == {})
if is_first_transition_in_episode:
observation = self.preprocess_observation(self.env.observation)
observation = stack_observation([], observation, self.tp.env.observation_stack_size)
if not isinstance(self.env.state, dict):
raise ValueError((
'expected state to be a dictionary, found {}'
).format(type(self.env.state)))
self.curr_state = {'observation': observation}
state = self.env.state
# TODO: modify preprocess_observation to modify the entire state
# for now, only preprocess the observation
state['observation'] = self.preprocess_observation(state['observation'])
# TODO: provide option to stack more than just the observation
# TODO: this should probably be happening in an environment wrapper anyway
state['observation'] = stack_observation([], state['observation'], self.tp.env.observation_stack_size)
self.curr_state = state
# TODO: this should be handled in the environment
if self.tp.agent.use_measurements:
self.curr_state['measurements'] = self.env.measurements
if self.tp.agent.use_accumulated_reward_as_measurement:
@@ -373,22 +382,25 @@ class Agent(object):
if type(action) == np.ndarray:
action = action.squeeze()
result = self.env.step(action)
shaped_reward = self.preprocess_reward(result['reward'])
if 'action_intrinsic_reward' in action_info.keys():
shaped_reward += action_info['action_intrinsic_reward']
# TODO: should total_reward_in_current_episode include shaped_reward?
self.total_reward_in_current_episode += result['reward']
observation = self.preprocess_observation(result['observation'])
next_state = result['state']
next_state['observation'] = self.preprocess_observation(next_state['observation'])
# plot action values online
if self.tp.visualization.plot_action_values_online and phase != RunPhase.HEATUP:
self.plot_action_values_online()
# initialize the next state
observation = stack_observation(self.curr_state['observation'], observation, self.tp.env.observation_stack_size)
# TODO: provide option to stack more than just the observation
next_state['observation'] = stack_observation(self.curr_state['observation'], next_state['observation'], self.tp.env.observation_stack_size)
next_state = {'observation': observation}
if self.tp.agent.use_measurements and 'measurements' in result.keys():
next_state['measurements'] = result['measurements']
next_state['measurements'] = result['state']['measurements']
if self.tp.agent.use_accumulated_reward_as_measurement:
next_state['measurements'] = np.append(next_state['measurements'], self.total_reward_in_current_episode)

View File

@@ -1,5 +1,5 @@
#
# Copyright (c) 2017 Intel Corporation
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.