mirror of
https://github.com/gryf/coach.git
synced 2025-12-18 19:50:17 +01:00
@@ -34,15 +34,14 @@ class InputEmbedder(object):
|
||||
can be multiple embedders in a single network
|
||||
"""
|
||||
def __init__(self, input_size: List[int], activation_function=tf.nn.relu,
|
||||
scheme: EmbedderScheme=None, batchnorm: bool=False, dropout: bool=False,
|
||||
scheme: EmbedderScheme=None, batchnorm: bool=False, dropout_rate: float=0.0,
|
||||
name: str= "embedder", input_rescaling=1.0, input_offset=0.0, input_clipping=None, dense_layer=Dense,
|
||||
is_training=False):
|
||||
self.name = name
|
||||
self.input_size = input_size
|
||||
self.activation_function = activation_function
|
||||
self.batchnorm = batchnorm
|
||||
self.dropout = dropout
|
||||
self.dropout_rate = 0
|
||||
self.dropout_rate = dropout_rate
|
||||
self.input = None
|
||||
self.output = None
|
||||
self.scheme = scheme
|
||||
@@ -68,7 +67,7 @@ class InputEmbedder(object):
|
||||
# we allow adding batchnorm, dropout or activation functions after each layer.
|
||||
# The motivation is to simplify the transition between a network with batchnorm and a network without
|
||||
# batchnorm to a single flag (the same applies to activation function and dropout)
|
||||
if self.batchnorm or self.activation_function or self.dropout:
|
||||
if self.batchnorm or self.activation_function or self.dropout_rate > 0:
|
||||
for layer_idx in reversed(range(len(self.layers_params))):
|
||||
self.layers_params.insert(layer_idx+1,
|
||||
BatchnormActivationDropout(batchnorm=self.batchnorm,
|
||||
|
||||
@@ -32,10 +32,10 @@ class ImageEmbedder(InputEmbedder):
|
||||
"""
|
||||
|
||||
def __init__(self, input_size: List[int], activation_function=tf.nn.relu,
|
||||
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout: bool=False,
|
||||
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout_rate: float=0.0,
|
||||
name: str= "embedder", input_rescaling: float=255.0, input_offset: float=0.0, input_clipping=None,
|
||||
dense_layer=Dense, is_training=False):
|
||||
super().__init__(input_size, activation_function, scheme, batchnorm, dropout, name, input_rescaling,
|
||||
super().__init__(input_size, activation_function, scheme, batchnorm, dropout_rate, name, input_rescaling,
|
||||
input_offset, input_clipping, dense_layer=dense_layer, is_training=is_training)
|
||||
self.return_type = InputImageEmbedding
|
||||
if len(input_size) != 3 and scheme != EmbedderScheme.Empty:
|
||||
|
||||
@@ -31,10 +31,10 @@ class VectorEmbedder(InputEmbedder):
|
||||
"""
|
||||
|
||||
def __init__(self, input_size: List[int], activation_function=tf.nn.relu,
|
||||
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout: bool=False,
|
||||
name: str= "embedder", input_rescaling: float=1.0, input_offset:float=0.0, input_clipping=None,
|
||||
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout_rate: float=0.0,
|
||||
name: str= "embedder", input_rescaling: float=1.0, input_offset: float=0.0, input_clipping=None,
|
||||
dense_layer=Dense, is_training=False):
|
||||
super().__init__(input_size, activation_function, scheme, batchnorm, dropout, name,
|
||||
super().__init__(input_size, activation_function, scheme, batchnorm, dropout_rate, name,
|
||||
input_rescaling, input_offset, input_clipping, dense_layer=dense_layer,
|
||||
is_training=is_training)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user