1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 19:20:19 +01:00

DDPG Critic Head Bug Fix (#344)

* A bug fix for DDPG, where the update to the policy network was based on the sum of the critic's Q predictions on the batch instead of their mean
This commit is contained in:
Gal Leibovich
2019-06-05 17:47:56 +03:00
committed by GitHub
parent 0aa5359d63
commit a1bb8eef89
4 changed files with 61 additions and 5 deletions

View File

@@ -57,6 +57,17 @@ class VHeadParameters(HeadParameters):
self.initializer = initializer
class DDPGVHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='ddpg_v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns'):
super().__init__(parameterized_class_name="DDPGVHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.initializer = initializer
class CategoricalQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='categorical_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,