1
0
mirror of https://github.com/gryf/coach.git synced 2026-01-06 13:54:21 +01:00

fixing the dropout rate code (#72)

addresses issue #53
This commit is contained in:
Itai Caspi
2018-11-08 16:53:47 +02:00
committed by GitHub
parent 389c65cbbe
commit 3a0a1159e9
11 changed files with 33 additions and 33 deletions

View File

@@ -27,10 +27,11 @@ from rl_coach.utils import force_list
class FCMiddleware(Middleware):
def __init__(self, activation_function=tf.nn.relu,
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout: bool = False,
batchnorm: bool = False, dropout_rate: float = 0.0,
name="middleware_fc_embedder", dense_layer=Dense, is_training=False):
super().__init__(activation_function=activation_function, batchnorm=batchnorm,
dropout=dropout, scheme=scheme, name=name, dense_layer=dense_layer, is_training=is_training)
dropout_rate=dropout_rate, scheme=scheme, name=name, dense_layer=dense_layer,
is_training=is_training)
self.return_type = Middleware_FC_Embedding
self.layers = []

View File

@@ -28,10 +28,11 @@ from rl_coach.utils import force_list
class LSTMMiddleware(Middleware):
def __init__(self, activation_function=tf.nn.relu, number_of_lstm_cells: int=256,
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout: bool = False,
batchnorm: bool = False, dropout_rate: float = 0.0,
name="middleware_lstm_embedder", dense_layer=Dense, is_training=False):
super().__init__(activation_function=activation_function, batchnorm=batchnorm,
dropout=dropout, scheme=scheme, name=name, dense_layer=dense_layer, is_training=is_training)
dropout_rate=dropout_rate, scheme=scheme, name=name, dense_layer=dense_layer,
is_training=is_training)
self.return_type = Middleware_LSTM_Embedding
self.number_of_lstm_cells = number_of_lstm_cells
self.layers = []

View File

@@ -31,15 +31,14 @@ class Middleware(object):
"""
def __init__(self, activation_function=tf.nn.relu,
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout: bool = False, name="middleware_embedder", dense_layer=Dense,
batchnorm: bool = False, dropout_rate: float = 0.0, name="middleware_embedder", dense_layer=Dense,
is_training=False):
self.name = name
self.input = None
self.output = None
self.activation_function = activation_function
self.batchnorm = batchnorm
self.dropout = dropout
self.dropout_rate = 0
self.dropout_rate = dropout_rate
self.scheme = scheme
self.return_type = MiddlewareEmbedding
self.dense_layer = dense_layer
@@ -58,7 +57,7 @@ class Middleware(object):
# we allow adding batchnorm, dropout or activation functions after each layer.
# The motivation is to simplify the transition between a network with batchnorm and a network without
# batchnorm to a single flag (the same applies to activation function and dropout)
if self.batchnorm or self.activation_function or self.dropout:
if self.batchnorm or self.activation_function or self.dropout_rate > 0:
for layer_idx in reversed(range(len(self.layers_params))):
self.layers_params.insert(layer_idx+1,
BatchnormActivationDropout(batchnorm=self.batchnorm,