mirror of
https://github.com/gryf/coach.git
synced 2026-02-16 05:55:46 +01:00
TD3 (#338)
This commit is contained in:
@@ -203,7 +203,6 @@ class TensorFlowArchitecture(Architecture):
|
||||
self._create_gradient_accumulators()
|
||||
|
||||
# gradients of the outputs w.r.t. the inputs
|
||||
# at the moment, this is only used by ddpg
|
||||
self.gradients_wrt_inputs = [{name: tf.gradients(output, input_ph) for name, input_ph in
|
||||
self.inputs.items()} for output in self.outputs]
|
||||
self.gradients_weights_ph = [tf.placeholder('float32', self.outputs[i].shape, 'output_gradient_weights')
|
||||
|
||||
@@ -16,6 +16,7 @@ from .sac_head import SACPolicyHead
|
||||
from .sac_q_head import SACQHead
|
||||
from .classification_head import ClassificationHead
|
||||
from .cil_head import RegressionHead
|
||||
from .td3_v_head import TD3VHead
|
||||
from .ddpg_v_head import DDPGVHead
|
||||
|
||||
__all__ = [
|
||||
@@ -37,5 +38,6 @@ __all__ = [
|
||||
'SACQHead',
|
||||
'ClassificationHead',
|
||||
'RegressionHead',
|
||||
'TD3VHead'
|
||||
'DDPGVHead'
|
||||
]
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from typing import Type
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
@@ -22,7 +21,7 @@ from rl_coach.architectures.tensorflow_components.layers import Dense, convert_l
|
||||
from rl_coach.base_parameters import AgentParameters
|
||||
from rl_coach.spaces import SpacesDefinition
|
||||
from rl_coach.utils import force_list
|
||||
|
||||
from rl_coach.architectures.tensorflow_components.utils import squeeze_tensor
|
||||
|
||||
# Used to initialize weights for policy and value output layers
|
||||
def normalized_columns_initializer(std=1.0):
|
||||
@@ -72,8 +71,9 @@ class Head(object):
|
||||
:param input_layer: the input to the graph
|
||||
:return: the output of the last layer and the target placeholder
|
||||
"""
|
||||
|
||||
with tf.variable_scope(self.get_name(), initializer=tf.contrib.layers.xavier_initializer()):
|
||||
self._build_module(input_layer)
|
||||
self._build_module(squeeze_tensor(input_layer))
|
||||
|
||||
self.output = force_list(self.output)
|
||||
self.target = force_list(self.target)
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
#
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from rl_coach.architectures.tensorflow_components.layers import Dense
|
||||
from rl_coach.architectures.tensorflow_components.heads.head import Head, normalized_columns_initializer
|
||||
from rl_coach.base_parameters import AgentParameters
|
||||
from rl_coach.core_types import VStateValue
|
||||
from rl_coach.spaces import SpacesDefinition
|
||||
|
||||
|
||||
class TD3VHead(Head):
|
||||
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
|
||||
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
|
||||
dense_layer=Dense, initializer='xavier'):
|
||||
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
|
||||
dense_layer=dense_layer)
|
||||
self.name = 'td3_v_values_head'
|
||||
self.return_type = VStateValue
|
||||
self.loss_type = []
|
||||
self.initializer = initializer
|
||||
self.loss = []
|
||||
self.output = []
|
||||
|
||||
def _build_module(self, input_layer):
|
||||
# Standard V Network
|
||||
q_outputs = []
|
||||
self.target = tf.placeholder(tf.float32, shape=(None, 1), name="q_networks_min_placeholder")
|
||||
|
||||
for i in range(input_layer.shape[0]): # assuming that the actual size is 2, as there are two critic networks
|
||||
if self.initializer == 'normalized_columns':
|
||||
q_outputs.append(self.dense_layer(1)(input_layer[i], name='q_output_{}'.format(i + 1),
|
||||
kernel_initializer=normalized_columns_initializer(1.0)))
|
||||
elif self.initializer == 'xavier' or self.initializer is None:
|
||||
q_outputs.append(self.dense_layer(1)(input_layer[i], name='q_output_{}'.format(i + 1)))
|
||||
|
||||
self.output.append(q_outputs[i])
|
||||
self.loss.append(tf.reduce_mean((self.target-q_outputs[i])**2))
|
||||
|
||||
self.output.append(tf.reduce_min(q_outputs, axis=0))
|
||||
self.output.append(tf.reduce_mean(self.output[0]))
|
||||
self.loss = sum(self.loss)
|
||||
tf.losses.add_loss(self.loss)
|
||||
|
||||
def __str__(self):
|
||||
result = [
|
||||
"Q1 Action-Value Stream",
|
||||
"\tDense (num outputs = 1)",
|
||||
"Q2 Action-Value Stream",
|
||||
"\tDense (num outputs = 1)",
|
||||
"Min (Q1, Q2)"
|
||||
]
|
||||
return '\n'.join(result)
|
||||
@@ -28,23 +28,28 @@ class FCMiddleware(Middleware):
|
||||
def __init__(self, activation_function=tf.nn.relu,
|
||||
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
|
||||
batchnorm: bool = False, dropout_rate: float = 0.0,
|
||||
name="middleware_fc_embedder", dense_layer=Dense, is_training=False):
|
||||
name="middleware_fc_embedder", dense_layer=Dense, is_training=False, num_streams: int = 1):
|
||||
super().__init__(activation_function=activation_function, batchnorm=batchnorm,
|
||||
dropout_rate=dropout_rate, scheme=scheme, name=name, dense_layer=dense_layer,
|
||||
is_training=is_training)
|
||||
self.return_type = Middleware_FC_Embedding
|
||||
self.layers = []
|
||||
|
||||
assert(isinstance(num_streams, int) and num_streams >= 1)
|
||||
self.num_streams = num_streams
|
||||
|
||||
def _build_module(self):
|
||||
self.layers.append(self.input)
|
||||
self.output = []
|
||||
|
||||
for idx, layer_params in enumerate(self.layers_params):
|
||||
self.layers.extend(force_list(
|
||||
layer_params(self.layers[-1], name='{}_{}'.format(layer_params.__class__.__name__, idx),
|
||||
is_training=self.is_training)
|
||||
))
|
||||
for stream_idx in range(self.num_streams):
|
||||
layers = [self.input]
|
||||
|
||||
self.output = self.layers[-1]
|
||||
for idx, layer_params in enumerate(self.layers_params):
|
||||
layers.extend(force_list(
|
||||
layer_params(layers[-1], name='{}_{}'.format(layer_params.__class__.__name__,
|
||||
idx + stream_idx * len(self.layers_params)),
|
||||
is_training=self.is_training)
|
||||
))
|
||||
self.output.append((layers[-1]))
|
||||
|
||||
@property
|
||||
def schemes(self):
|
||||
@@ -72,3 +77,15 @@ class FCMiddleware(Middleware):
|
||||
]
|
||||
}
|
||||
|
||||
def __str__(self):
|
||||
stream = [str(l) for l in self.layers_params]
|
||||
if self.layers_params:
|
||||
if self.num_streams > 1:
|
||||
stream = [''] + ['\t' + l for l in stream]
|
||||
result = stream * self.num_streams
|
||||
result[0::len(stream)] = ['Stream {}'.format(i) for i in range(self.num_streams)]
|
||||
else:
|
||||
result = stream
|
||||
return '\n'.join(result)
|
||||
else:
|
||||
return 'No layers'
|
||||
|
||||
@@ -38,3 +38,10 @@ def get_activation_function(activation_function_string: str):
|
||||
"Activation function must be one of the following {}. instead it was: {}" \
|
||||
.format(activation_functions.keys(), activation_function_string)
|
||||
return activation_functions[activation_function_string]
|
||||
|
||||
|
||||
def squeeze_tensor(tensor):
|
||||
if tensor.shape[0] == 1:
|
||||
return tensor[0]
|
||||
else:
|
||||
return tensor
|
||||
Reference in New Issue
Block a user