diff --git a/rl_coach/agents/ddpg_agent.py b/rl_coach/agents/ddpg_agent.py index fa56b56..0288f00 100644 --- a/rl_coach/agents/ddpg_agent.py +++ b/rl_coach/agents/ddpg_agent.py @@ -23,7 +23,7 @@ import numpy as np from rl_coach.agents.actor_critic_agent import ActorCriticAgent from rl_coach.agents.agent import Agent from rl_coach.architectures.embedder_parameters import InputEmbedderParameters -from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, VHeadParameters +from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, DDPGVHeadParameters from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \ AgentParameters, EmbedderScheme @@ -39,8 +39,10 @@ class DDPGCriticNetworkParameters(NetworkParameters): self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=True), 'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)} self.middleware_parameters = FCMiddlewareParameters() - self.heads_parameters = [VHeadParameters()] + self.heads_parameters = [DDPGVHeadParameters()] self.optimizer_type = 'Adam' + self.adam_optimizer_beta2 = 0.999 + self.optimizer_epsilon = 1e-8 self.batch_size = 64 self.async_training = False self.learning_rate = 0.001 @@ -56,6 +58,8 @@ class DDPGActorNetworkParameters(NetworkParameters): self.middleware_parameters = FCMiddlewareParameters(batchnorm=True) self.heads_parameters = [DDPGActorHeadParameters()] self.optimizer_type = 'Adam' + self.adam_optimizer_beta2 = 0.999 + self.optimizer_epsilon = 1e-8 self.batch_size = 64 self.async_training = False self.learning_rate = 0.0001 @@ -140,7 +144,7 @@ class DDPGAgent(ActorCriticAgent): critic_inputs = copy.copy(batch.next_states(critic_keys)) critic_inputs['action'] = next_actions - q_st_plus_1 = critic.target_network.predict(critic_inputs) + q_st_plus_1 = critic.target_network.predict(critic_inputs)[0] # calculate the bootstrapped TD targets while discounting terminal states according to # use_non_zero_discount_for_terminal_states @@ -160,7 +164,7 @@ class DDPGAgent(ActorCriticAgent): critic_inputs = copy.copy(batch.states(critic_keys)) critic_inputs['action'] = actions_mean action_gradients = critic.online_network.predict(critic_inputs, - outputs=critic.online_network.gradients_wrt_inputs[0]['action']) + outputs=critic.online_network.gradients_wrt_inputs[1]['action']) # train the critic critic_inputs = copy.copy(batch.states(critic_keys)) diff --git a/rl_coach/architectures/head_parameters.py b/rl_coach/architectures/head_parameters.py index b947549..8879647 100644 --- a/rl_coach/architectures/head_parameters.py +++ b/rl_coach/architectures/head_parameters.py @@ -57,6 +57,17 @@ class VHeadParameters(HeadParameters): self.initializer = initializer +class DDPGVHeadParameters(HeadParameters): + def __init__(self, activation_function: str ='relu', name: str='ddpg_v_head_params', + num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, + loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns'): + super().__init__(parameterized_class_name="DDPGVHead", activation_function=activation_function, name=name, + dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, + rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, + loss_weight=loss_weight) + self.initializer = initializer + + class CategoricalQHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='categorical_q_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, diff --git a/rl_coach/architectures/tensorflow_components/heads/__init__.py b/rl_coach/architectures/tensorflow_components/heads/__init__.py index ade92f2..91c1b79 100644 --- a/rl_coach/architectures/tensorflow_components/heads/__init__.py +++ b/rl_coach/architectures/tensorflow_components/heads/__init__.py @@ -16,6 +16,7 @@ from .sac_head import SACPolicyHead from .sac_q_head import SACQHead from .classification_head import ClassificationHead from .cil_head import RegressionHead +from .ddpg_v_head import DDPGVHead __all__ = [ 'CategoricalQHead', @@ -35,5 +36,6 @@ __all__ = [ 'SACPolicyHead', 'SACQHead', 'ClassificationHead', - 'RegressionHead' + 'RegressionHead', + 'DDPGVHead' ] diff --git a/rl_coach/architectures/tensorflow_components/heads/ddpg_v_head.py b/rl_coach/architectures/tensorflow_components/heads/ddpg_v_head.py new file mode 100644 index 0000000..4c30829 --- /dev/null +++ b/rl_coach/architectures/tensorflow_components/heads/ddpg_v_head.py @@ -0,0 +1,39 @@ +# +# Copyright (c) 2017 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow as tf + +from rl_coach.architectures.tensorflow_components.heads import VHead +from rl_coach.architectures.tensorflow_components.layers import Dense +from rl_coach.base_parameters import AgentParameters +from rl_coach.spaces import SpacesDefinition + + +class DDPGVHead(VHead): + def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, + head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu', + dense_layer=Dense, initializer='normalized_columns'): + super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function, + dense_layer=dense_layer, initializer=initializer) + + def _build_module(self, input_layer): + super()._build_module(input_layer) + self.output = [self.output, tf.reduce_mean(self.output)] + + def __str__(self): + result = [ + "Dense (num outputs = 1)" + ] + return '\n'.join(result)