1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-18 11:40:18 +01:00

fixing the dropout rate code (#72)

addresses issue #53
This commit is contained in:
Itai Caspi
2018-11-08 16:53:47 +02:00
committed by GitHub
parent 389c65cbbe
commit 3a0a1159e9
11 changed files with 33 additions and 33 deletions

View File

@@ -34,15 +34,14 @@ class InputEmbedder(object):
can be multiple embedders in a single network
"""
def __init__(self, input_size: List[int], activation_function=tf.nn.relu,
scheme: EmbedderScheme=None, batchnorm: bool=False, dropout: bool=False,
scheme: EmbedderScheme=None, batchnorm: bool=False, dropout_rate: float=0.0,
name: str= "embedder", input_rescaling=1.0, input_offset=0.0, input_clipping=None, dense_layer=Dense,
is_training=False):
self.name = name
self.input_size = input_size
self.activation_function = activation_function
self.batchnorm = batchnorm
self.dropout = dropout
self.dropout_rate = 0
self.dropout_rate = dropout_rate
self.input = None
self.output = None
self.scheme = scheme
@@ -68,7 +67,7 @@ class InputEmbedder(object):
# we allow adding batchnorm, dropout or activation functions after each layer.
# The motivation is to simplify the transition between a network with batchnorm and a network without
# batchnorm to a single flag (the same applies to activation function and dropout)
if self.batchnorm or self.activation_function or self.dropout:
if self.batchnorm or self.activation_function or self.dropout_rate > 0:
for layer_idx in reversed(range(len(self.layers_params))):
self.layers_params.insert(layer_idx+1,
BatchnormActivationDropout(batchnorm=self.batchnorm,

View File

@@ -32,10 +32,10 @@ class ImageEmbedder(InputEmbedder):
"""
def __init__(self, input_size: List[int], activation_function=tf.nn.relu,
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout: bool=False,
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout_rate: float=0.0,
name: str= "embedder", input_rescaling: float=255.0, input_offset: float=0.0, input_clipping=None,
dense_layer=Dense, is_training=False):
super().__init__(input_size, activation_function, scheme, batchnorm, dropout, name, input_rescaling,
super().__init__(input_size, activation_function, scheme, batchnorm, dropout_rate, name, input_rescaling,
input_offset, input_clipping, dense_layer=dense_layer, is_training=is_training)
self.return_type = InputImageEmbedding
if len(input_size) != 3 and scheme != EmbedderScheme.Empty:

View File

@@ -31,10 +31,10 @@ class VectorEmbedder(InputEmbedder):
"""
def __init__(self, input_size: List[int], activation_function=tf.nn.relu,
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout: bool=False,
name: str= "embedder", input_rescaling: float=1.0, input_offset:float=0.0, input_clipping=None,
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout_rate: float=0.0,
name: str= "embedder", input_rescaling: float=1.0, input_offset: float=0.0, input_clipping=None,
dense_layer=Dense, is_training=False):
super().__init__(input_size, activation_function, scheme, batchnorm, dropout, name,
super().__init__(input_size, activation_function, scheme, batchnorm, dropout_rate, name,
input_rescaling, input_offset, input_clipping, dense_layer=dense_layer,
is_training=is_training)

View File

@@ -8,7 +8,7 @@ from rl_coach.architectures import layers
from rl_coach.architectures.tensorflow_components import utils
def batchnorm_activation_dropout(input_layer, batchnorm, activation_function, dropout, dropout_rate, is_training, name):
def batchnorm_activation_dropout(input_layer, batchnorm, activation_function, dropout_rate, is_training, name):
layers = [input_layer]
# batchnorm
@@ -26,7 +26,7 @@ def batchnorm_activation_dropout(input_layer, batchnorm, activation_function, dr
)
# dropout
if dropout:
if dropout_rate > 0:
layers.append(
tf.layers.dropout(layers[-1], dropout_rate, name="{}_dropout".format(name), training=is_training)
)
@@ -100,7 +100,7 @@ class BatchnormActivationDropout(layers.BatchnormActivationDropout):
"""
return batchnorm_activation_dropout(input_layer, batchnorm=self.batchnorm,
activation_function=self.activation_function,
dropout=self.dropout_rate > 0, dropout_rate=self.dropout_rate,
dropout_rate=self.dropout_rate,
is_training=is_training, name=name)
@staticmethod

View File

@@ -27,10 +27,11 @@ from rl_coach.utils import force_list
class FCMiddleware(Middleware):
def __init__(self, activation_function=tf.nn.relu,
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout: bool = False,
batchnorm: bool = False, dropout_rate: float = 0.0,
name="middleware_fc_embedder", dense_layer=Dense, is_training=False):
super().__init__(activation_function=activation_function, batchnorm=batchnorm,
dropout=dropout, scheme=scheme, name=name, dense_layer=dense_layer, is_training=is_training)
dropout_rate=dropout_rate, scheme=scheme, name=name, dense_layer=dense_layer,
is_training=is_training)
self.return_type = Middleware_FC_Embedding
self.layers = []

View File

@@ -28,10 +28,11 @@ from rl_coach.utils import force_list
class LSTMMiddleware(Middleware):
def __init__(self, activation_function=tf.nn.relu, number_of_lstm_cells: int=256,
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout: bool = False,
batchnorm: bool = False, dropout_rate: float = 0.0,
name="middleware_lstm_embedder", dense_layer=Dense, is_training=False):
super().__init__(activation_function=activation_function, batchnorm=batchnorm,
dropout=dropout, scheme=scheme, name=name, dense_layer=dense_layer, is_training=is_training)
dropout_rate=dropout_rate, scheme=scheme, name=name, dense_layer=dense_layer,
is_training=is_training)
self.return_type = Middleware_LSTM_Embedding
self.number_of_lstm_cells = number_of_lstm_cells
self.layers = []

View File

@@ -31,15 +31,14 @@ class Middleware(object):
"""
def __init__(self, activation_function=tf.nn.relu,
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout: bool = False, name="middleware_embedder", dense_layer=Dense,
batchnorm: bool = False, dropout_rate: float = 0.0, name="middleware_embedder", dense_layer=Dense,
is_training=False):
self.name = name
self.input = None
self.output = None
self.activation_function = activation_function
self.batchnorm = batchnorm
self.dropout = dropout
self.dropout_rate = 0
self.dropout_rate = dropout_rate
self.scheme = scheme
self.return_type = MiddlewareEmbedding
self.dense_layer = dense_layer
@@ -58,7 +57,7 @@ class Middleware(object):
# we allow adding batchnorm, dropout or activation functions after each layer.
# The motivation is to simplify the transition between a network with batchnorm and a network without
# batchnorm to a single flag (the same applies to activation function and dropout)
if self.batchnorm or self.activation_function or self.dropout:
if self.batchnorm or self.activation_function or self.dropout_rate > 0:
for layer_idx in reversed(range(len(self.layers_params))):
self.layers_params.insert(layer_idx+1,
BatchnormActivationDropout(batchnorm=self.batchnorm,