From ea294de7fde341f5a533c80d86afe43b3b7254dc Mon Sep 17 00:00:00 2001 From: Gal Leibovich Date: Thu, 30 Aug 2018 18:15:59 +0300 Subject: [PATCH] adding dueling support for rainbow dqn (now only missing n-step) --- rl_coach/agents/rainbow_dqn_agent.py | 20 ++++------ .../heads/categorical_q_head.py | 3 -- ...{rainbow_dqn_head.py => rainbow_q_head.py} | 39 +++++++++++++++---- 3 files changed, 40 insertions(+), 22 deletions(-) rename rl_coach/architectures/tensorflow_components/heads/{rainbow_dqn_head.py => rainbow_q_head.py} (52%) diff --git a/rl_coach/agents/rainbow_dqn_agent.py b/rl_coach/agents/rainbow_dqn_agent.py index 8fef370..b45415e 100644 --- a/rl_coach/agents/rainbow_dqn_agent.py +++ b/rl_coach/agents/rainbow_dqn_agent.py @@ -18,25 +18,20 @@ from typing import Union import numpy as np -from rl_coach.agents.categorical_dqn_agent import CategoricalDQNNetworkParameters, CategoricalDQNAlgorithmParameters, \ +from rl_coach.agents.categorical_dqn_agent import CategoricalDQNAlgorithmParameters, \ CategoricalDQNAgent, CategoricalDQNAgentParameters -from rl_coach.agents.dqn_agent import DQNNetworkParameters, DQNAlgorithmParameters -from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent -from rl_coach.architectures.tensorflow_components.heads.categorical_q_head import CategoricalQHeadParameters -from rl_coach.base_parameters import AgentParameters +from rl_coach.agents.dqn_agent import DQNNetworkParameters +from rl_coach.architectures.tensorflow_components.heads.rainbow_q_head import RainbowQHeadParameters + from rl_coach.exploration_policies.parameter_noise import ParameterNoiseParameters -from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters from rl_coach.memories.non_episodic.prioritized_experience_replay import PrioritizedExperienceReplayParameters, \ PrioritizedExperienceReplay -from rl_coach.schedules import LinearSchedule - -from rl_coach.core_types import StateType -from rl_coach.exploration_policies.e_greedy import EGreedyParameters -class RainbowDQNNetworkParameters(CategoricalDQNNetworkParameters): +class RainbowDQNNetworkParameters(DQNNetworkParameters): def __init__(self): super().__init__() + self.heads_parameters = [RainbowQHeadParameters()] class RainbowDQNAlgorithmParameters(CategoricalDQNAlgorithmParameters): @@ -68,10 +63,11 @@ class RainbowDQNAgentParameters(CategoricalDQNAgentParameters): # 2. C51 # 3. Prioritized ER # 4. DDQN +# 5. Dueling DQN # # still missing: # 1. N-Step -# 2. Dueling DQN + class RainbowDQNAgent(CategoricalDQNAgent): def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None): super().__init__(agent_parameters, parent) diff --git a/rl_coach/architectures/tensorflow_components/heads/categorical_q_head.py b/rl_coach/architectures/tensorflow_components/heads/categorical_q_head.py index 4a299c0..17573b0 100644 --- a/rl_coach/architectures/tensorflow_components/heads/categorical_q_head.py +++ b/rl_coach/architectures/tensorflow_components/heads/categorical_q_head.py @@ -42,9 +42,6 @@ class CategoricalQHead(Head): self.return_type = QActionStateValue def _build_module(self, input_layer): - self.actions = tf.placeholder(tf.int32, [None], name="actions") - self.input = [self.actions] - values_distribution = self.dense_layer(self.num_actions * self.num_atoms)(input_layer, name='output') values_distribution = tf.reshape(values_distribution, (tf.shape(values_distribution)[0], self.num_actions, self.num_atoms)) diff --git a/rl_coach/architectures/tensorflow_components/heads/rainbow_dqn_head.py b/rl_coach/architectures/tensorflow_components/heads/rainbow_q_head.py similarity index 52% rename from rl_coach/architectures/tensorflow_components/heads/rainbow_dqn_head.py rename to rl_coach/architectures/tensorflow_components/heads/rainbow_q_head.py index d559894..d24b723 100644 --- a/rl_coach/architectures/tensorflow_components/heads/rainbow_dqn_head.py +++ b/rl_coach/architectures/tensorflow_components/heads/rainbow_q_head.py @@ -17,11 +17,10 @@ import tensorflow as tf from rl_coach.architectures.tensorflow_components.architecture import Dense +from rl_coach.architectures.tensorflow_components.heads.head import HeadParameters, Head from rl_coach.base_parameters import AgentParameters -from rl_coach.spaces import SpacesDefinition - -from rl_coach.architectures.tensorflow_components.heads.head import Head, HeadParameters from rl_coach.core_types import QActionStateValue +from rl_coach.spaces import SpacesDefinition class RainbowQHeadParameters(HeadParameters): @@ -30,15 +29,41 @@ class RainbowQHeadParameters(HeadParameters): dense_layer=dense_layer) -class RainbowQHead(): +class RainbowQHead(Head): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, - head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str ='relu', + head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu', dense_layer=Dense): super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function, dense_layer=dense_layer) - self.name = 'rainbow_dqn_head' self.num_actions = len(self.spaces.action.actions) + self.num_atoms = agent_parameters.algorithm.atoms self.return_type = QActionStateValue + self.name = 'rainbow_q_values_head' def _build_module(self, input_layer): - pass \ No newline at end of file + # state value tower - V + with tf.variable_scope("state_value"): + state_value = self.dense_layer(self.num_atoms)(input_layer, name='fc1') + state_value = tf.expand_dims(state_value, axis=1) + + # action advantage tower - A + with tf.variable_scope("action_advantage"): + action_advantage = self.dense_layer(self.num_actions * self.num_atoms)(input_layer, name='fc1') + action_advantage = tf.reshape(action_advantage, (tf.shape(input_layer)[0], self.num_actions, + self.num_atoms)) + action_mean = tf.reduce_mean(action_advantage, axis=1, keepdims=True) + action_advantage = action_advantage - action_mean + + # merge to state-action value function Q + values_distribution = tf.add(state_value, action_advantage, name='output') + + # softmax on atoms dimension + self.output = tf.nn.softmax(values_distribution) + + # calculate cross entropy loss + self.distributions = tf.placeholder(tf.float32, shape=(None, self.num_actions, self.num_atoms), + name="distributions") + self.target = self.distributions + self.loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.target, logits=values_distribution) + tf.losses.add_loss(self.loss) +