mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 19:20:19 +01:00
DDPG Critic Head Bug Fix (#344)
* A bug fix for DDPG, where the update to the policy network was based on the sum of the critic's Q predictions on the batch instead of their mean
This commit is contained in:
@@ -23,7 +23,7 @@ import numpy as np
|
||||
from rl_coach.agents.actor_critic_agent import ActorCriticAgent
|
||||
from rl_coach.agents.agent import Agent
|
||||
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
|
||||
from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, VHeadParameters
|
||||
from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, DDPGVHeadParameters
|
||||
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
|
||||
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
|
||||
AgentParameters, EmbedderScheme
|
||||
@@ -39,8 +39,10 @@ class DDPGCriticNetworkParameters(NetworkParameters):
|
||||
self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=True),
|
||||
'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)}
|
||||
self.middleware_parameters = FCMiddlewareParameters()
|
||||
self.heads_parameters = [VHeadParameters()]
|
||||
self.heads_parameters = [DDPGVHeadParameters()]
|
||||
self.optimizer_type = 'Adam'
|
||||
self.adam_optimizer_beta2 = 0.999
|
||||
self.optimizer_epsilon = 1e-8
|
||||
self.batch_size = 64
|
||||
self.async_training = False
|
||||
self.learning_rate = 0.001
|
||||
@@ -56,6 +58,8 @@ class DDPGActorNetworkParameters(NetworkParameters):
|
||||
self.middleware_parameters = FCMiddlewareParameters(batchnorm=True)
|
||||
self.heads_parameters = [DDPGActorHeadParameters()]
|
||||
self.optimizer_type = 'Adam'
|
||||
self.adam_optimizer_beta2 = 0.999
|
||||
self.optimizer_epsilon = 1e-8
|
||||
self.batch_size = 64
|
||||
self.async_training = False
|
||||
self.learning_rate = 0.0001
|
||||
@@ -140,7 +144,7 @@ class DDPGAgent(ActorCriticAgent):
|
||||
|
||||
critic_inputs = copy.copy(batch.next_states(critic_keys))
|
||||
critic_inputs['action'] = next_actions
|
||||
q_st_plus_1 = critic.target_network.predict(critic_inputs)
|
||||
q_st_plus_1 = critic.target_network.predict(critic_inputs)[0]
|
||||
|
||||
# calculate the bootstrapped TD targets while discounting terminal states according to
|
||||
# use_non_zero_discount_for_terminal_states
|
||||
@@ -160,7 +164,7 @@ class DDPGAgent(ActorCriticAgent):
|
||||
critic_inputs = copy.copy(batch.states(critic_keys))
|
||||
critic_inputs['action'] = actions_mean
|
||||
action_gradients = critic.online_network.predict(critic_inputs,
|
||||
outputs=critic.online_network.gradients_wrt_inputs[0]['action'])
|
||||
outputs=critic.online_network.gradients_wrt_inputs[1]['action'])
|
||||
|
||||
# train the critic
|
||||
critic_inputs = copy.copy(batch.states(critic_keys))
|
||||
|
||||
Reference in New Issue
Block a user