mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 19:20:19 +01:00
DDPG Critic Head Bug Fix (#344)
* A bug fix for DDPG, where the update to the policy network was based on the sum of the critic's Q predictions on the batch instead of their mean
This commit is contained in:
@@ -23,7 +23,7 @@ import numpy as np
|
|||||||
from rl_coach.agents.actor_critic_agent import ActorCriticAgent
|
from rl_coach.agents.actor_critic_agent import ActorCriticAgent
|
||||||
from rl_coach.agents.agent import Agent
|
from rl_coach.agents.agent import Agent
|
||||||
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
|
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
|
||||||
from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, VHeadParameters
|
from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, DDPGVHeadParameters
|
||||||
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
|
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
|
||||||
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
|
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
|
||||||
AgentParameters, EmbedderScheme
|
AgentParameters, EmbedderScheme
|
||||||
@@ -39,8 +39,10 @@ class DDPGCriticNetworkParameters(NetworkParameters):
|
|||||||
self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=True),
|
self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=True),
|
||||||
'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)}
|
'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)}
|
||||||
self.middleware_parameters = FCMiddlewareParameters()
|
self.middleware_parameters = FCMiddlewareParameters()
|
||||||
self.heads_parameters = [VHeadParameters()]
|
self.heads_parameters = [DDPGVHeadParameters()]
|
||||||
self.optimizer_type = 'Adam'
|
self.optimizer_type = 'Adam'
|
||||||
|
self.adam_optimizer_beta2 = 0.999
|
||||||
|
self.optimizer_epsilon = 1e-8
|
||||||
self.batch_size = 64
|
self.batch_size = 64
|
||||||
self.async_training = False
|
self.async_training = False
|
||||||
self.learning_rate = 0.001
|
self.learning_rate = 0.001
|
||||||
@@ -56,6 +58,8 @@ class DDPGActorNetworkParameters(NetworkParameters):
|
|||||||
self.middleware_parameters = FCMiddlewareParameters(batchnorm=True)
|
self.middleware_parameters = FCMiddlewareParameters(batchnorm=True)
|
||||||
self.heads_parameters = [DDPGActorHeadParameters()]
|
self.heads_parameters = [DDPGActorHeadParameters()]
|
||||||
self.optimizer_type = 'Adam'
|
self.optimizer_type = 'Adam'
|
||||||
|
self.adam_optimizer_beta2 = 0.999
|
||||||
|
self.optimizer_epsilon = 1e-8
|
||||||
self.batch_size = 64
|
self.batch_size = 64
|
||||||
self.async_training = False
|
self.async_training = False
|
||||||
self.learning_rate = 0.0001
|
self.learning_rate = 0.0001
|
||||||
@@ -140,7 +144,7 @@ class DDPGAgent(ActorCriticAgent):
|
|||||||
|
|
||||||
critic_inputs = copy.copy(batch.next_states(critic_keys))
|
critic_inputs = copy.copy(batch.next_states(critic_keys))
|
||||||
critic_inputs['action'] = next_actions
|
critic_inputs['action'] = next_actions
|
||||||
q_st_plus_1 = critic.target_network.predict(critic_inputs)
|
q_st_plus_1 = critic.target_network.predict(critic_inputs)[0]
|
||||||
|
|
||||||
# calculate the bootstrapped TD targets while discounting terminal states according to
|
# calculate the bootstrapped TD targets while discounting terminal states according to
|
||||||
# use_non_zero_discount_for_terminal_states
|
# use_non_zero_discount_for_terminal_states
|
||||||
@@ -160,7 +164,7 @@ class DDPGAgent(ActorCriticAgent):
|
|||||||
critic_inputs = copy.copy(batch.states(critic_keys))
|
critic_inputs = copy.copy(batch.states(critic_keys))
|
||||||
critic_inputs['action'] = actions_mean
|
critic_inputs['action'] = actions_mean
|
||||||
action_gradients = critic.online_network.predict(critic_inputs,
|
action_gradients = critic.online_network.predict(critic_inputs,
|
||||||
outputs=critic.online_network.gradients_wrt_inputs[0]['action'])
|
outputs=critic.online_network.gradients_wrt_inputs[1]['action'])
|
||||||
|
|
||||||
# train the critic
|
# train the critic
|
||||||
critic_inputs = copy.copy(batch.states(critic_keys))
|
critic_inputs = copy.copy(batch.states(critic_keys))
|
||||||
|
|||||||
@@ -57,6 +57,17 @@ class VHeadParameters(HeadParameters):
|
|||||||
self.initializer = initializer
|
self.initializer = initializer
|
||||||
|
|
||||||
|
|
||||||
|
class DDPGVHeadParameters(HeadParameters):
|
||||||
|
def __init__(self, activation_function: str ='relu', name: str='ddpg_v_head_params',
|
||||||
|
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
|
||||||
|
loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns'):
|
||||||
|
super().__init__(parameterized_class_name="DDPGVHead", activation_function=activation_function, name=name,
|
||||||
|
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
|
||||||
|
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
|
||||||
|
loss_weight=loss_weight)
|
||||||
|
self.initializer = initializer
|
||||||
|
|
||||||
|
|
||||||
class CategoricalQHeadParameters(HeadParameters):
|
class CategoricalQHeadParameters(HeadParameters):
|
||||||
def __init__(self, activation_function: str ='relu', name: str='categorical_q_head_params',
|
def __init__(self, activation_function: str ='relu', name: str='categorical_q_head_params',
|
||||||
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
|
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ from .sac_head import SACPolicyHead
|
|||||||
from .sac_q_head import SACQHead
|
from .sac_q_head import SACQHead
|
||||||
from .classification_head import ClassificationHead
|
from .classification_head import ClassificationHead
|
||||||
from .cil_head import RegressionHead
|
from .cil_head import RegressionHead
|
||||||
|
from .ddpg_v_head import DDPGVHead
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'CategoricalQHead',
|
'CategoricalQHead',
|
||||||
@@ -35,5 +36,6 @@ __all__ = [
|
|||||||
'SACPolicyHead',
|
'SACPolicyHead',
|
||||||
'SACQHead',
|
'SACQHead',
|
||||||
'ClassificationHead',
|
'ClassificationHead',
|
||||||
'RegressionHead'
|
'RegressionHead',
|
||||||
|
'DDPGVHead'
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -0,0 +1,39 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2017 Intel Corporation
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from rl_coach.architectures.tensorflow_components.heads import VHead
|
||||||
|
from rl_coach.architectures.tensorflow_components.layers import Dense
|
||||||
|
from rl_coach.base_parameters import AgentParameters
|
||||||
|
from rl_coach.spaces import SpacesDefinition
|
||||||
|
|
||||||
|
|
||||||
|
class DDPGVHead(VHead):
|
||||||
|
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
|
||||||
|
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
|
||||||
|
dense_layer=Dense, initializer='normalized_columns'):
|
||||||
|
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
|
||||||
|
dense_layer=dense_layer, initializer=initializer)
|
||||||
|
|
||||||
|
def _build_module(self, input_layer):
|
||||||
|
super()._build_module(input_layer)
|
||||||
|
self.output = [self.output, tf.reduce_mean(self.output)]
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
result = [
|
||||||
|
"Dense (num outputs = 1)"
|
||||||
|
]
|
||||||
|
return '\n'.join(result)
|
||||||
Reference in New Issue
Block a user