1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-18 03:30:19 +01:00

Batch RL Tutorial (#372)

This commit is contained in:
Gal Leibovich
2019-07-14 18:43:48 +03:00
committed by GitHub
parent b82414138d
commit 19ad2d60a7
40 changed files with 1155 additions and 182 deletions

View File

@@ -26,7 +26,7 @@ from rl_coach.spaces import SpacesDefinition, BoxActionSpace, DiscreteActionSpac
class QHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'q_values_head'
@@ -46,9 +46,12 @@ class QHead(Head):
else:
self.loss_type = tf.losses.mean_squared_error
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
# Standard Q Network
self.q_values = self.output = self.dense_layer(self.num_actions)(input_layer, name='output')
self.q_values = self.output = self.dense_layer(self.num_actions)\
(input_layer, name='output', bias_initializer=self.output_bias_initializer)
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()