1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 19:20:19 +01:00

SAC algorithm (#282)

* SAC algorithm

* SAC - updates to agent (learn_from_batch), sac_head and sac_q_head to fix problem in gradient calculation. Now SAC agents is able to train.
gym_environment - fixing an error in access to gym.spaces

* Soft Actor Critic - code cleanup

* code cleanup

* V-head initialization fix

* SAC benchmarks

* SAC Documentation

* typo fix

* documentation fixes

* documentation and version update

* README typo
This commit is contained in:
guyk1971
2019-05-01 18:37:49 +03:00
committed by shadiendrawis
parent 33dc29ee99
commit 74db141d5e
92 changed files with 2812 additions and 402 deletions

View File

@@ -36,7 +36,6 @@ class HeadParameters(NetworkComponentParameters):
return 'rl_coach.architectures.tensorflow_components.heads:' + self.parameterized_class_name
class PPOHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='ppo_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
@@ -50,11 +49,12 @@ class PPOHeadParameters(HeadParameters):
class VHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns'):
super().__init__(parameterized_class_name="VHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.initializer = initializer
class CategoricalQHeadParameters(HeadParameters):
@@ -196,3 +196,17 @@ class ACERPolicyHeadParameters(HeadParameters):
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class SACPolicyHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='sac_policy_head_params', dense_layer=None):
super().__init__(parameterized_class_name='SACPolicyHead', activation_function=activation_function, name=name,
dense_layer=dense_layer)
class SACQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='sac_q_head_params', dense_layer=None,
layers_sizes: tuple = (256, 256)):
super().__init__(parameterized_class_name='SACQHead', activation_function=activation_function, name=name,
dense_layer=dense_layer)
self.network_layers_sizes = layers_sizes