mirror of
https://github.com/gryf/coach.git
synced 2026-01-29 03:25:47 +01:00
Cleanup imports.
Till now, most of the modules were importing all of the module objects (variables, classes, functions, other imports) into module namespace, which potentially could (and was) cause of unintentional use of class or methods, which was indirect imported. With this patch, all the star imports were substituted with top-level module, which provides desired class or function. Besides, all imports where sorted (where possible) in a way pep8[1] suggests - first are imports from standard library, than goes third party imports (like numpy, tensorflow etc) and finally coach modules. All of those sections are separated by one empty line. [1] https://www.python.org/dev/peps/pep-0008/#imports
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,19 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import sys
|
||||
import copy
|
||||
from ngraph.frontends.neon import *
|
||||
import ngraph as ng
|
||||
from architectures.architecture import *
|
||||
import numpy as np
|
||||
from utils import *
|
||||
|
||||
from architectures import architecture
|
||||
import utils
|
||||
|
||||
|
||||
class NeonArchitecture(Architecture):
|
||||
class NeonArchitecture(architecture.Architecture):
|
||||
def __init__(self, tuning_parameters, name="", global_network=None, network_is_local=True):
|
||||
Architecture.__init__(self, tuning_parameters, name)
|
||||
architecture.Architecture.__init__(self, tuning_parameters, name)
|
||||
assert tuning_parameters.agent.neon_support, 'Neon is not supported for this agent'
|
||||
self.clip_error = tuning_parameters.clip_gradients
|
||||
self.total_loss = None
|
||||
@@ -113,8 +110,8 @@ class NeonArchitecture(Architecture):
|
||||
def accumulate_gradients(self, inputs, targets):
|
||||
# Neon doesn't currently allow separating the grads calculation and grad apply operations
|
||||
# so this feature is not currently available. instead we do a full training iteration
|
||||
inputs = force_list(inputs)
|
||||
targets = force_list(targets)
|
||||
inputs = utils.force_list(inputs)
|
||||
targets = utils.force_list(targets)
|
||||
|
||||
for idx, input in enumerate(inputs):
|
||||
inputs[idx] = input.swapaxes(0, -1)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,10 +13,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import ngraph.frontends.neon as neon
|
||||
import ngraph as ng
|
||||
from ngraph.util.names import name_scope
|
||||
import ngraph.frontends.neon as neon
|
||||
import ngraph.util.names as ngraph_names
|
||||
|
||||
|
||||
class InputEmbedder(object):
|
||||
@@ -31,7 +30,7 @@ class InputEmbedder(object):
|
||||
self.output = None
|
||||
|
||||
def __call__(self, prev_input_placeholder=None):
|
||||
with name_scope(self.get_name()):
|
||||
with ngraph_names.name_scope(self.get_name()):
|
||||
# create the input axes
|
||||
axes = []
|
||||
if len(self.input_size) == 2:
|
||||
|
||||
@@ -13,15 +13,18 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import ngraph as ng
|
||||
from ngraph.frontends import neon
|
||||
from ngraph.util import names as ngraph_names
|
||||
|
||||
from architectures.neon_components.embedders import *
|
||||
from architectures.neon_components.heads import *
|
||||
from architectures.neon_components.middleware import *
|
||||
from architectures.neon_components.architecture import *
|
||||
from configurations import InputTypes, OutputTypes, MiddlewareTypes
|
||||
from architectures.neon_components import architecture
|
||||
from architectures.neon_components import embedders
|
||||
from architectures.neon_components import middleware
|
||||
from architectures.neon_components import heads
|
||||
import configurations as conf
|
||||
|
||||
|
||||
class GeneralNeonNetwork(NeonArchitecture):
|
||||
class GeneralNeonNetwork(architecture.NeonArchitecture):
|
||||
def __init__(self, tuning_parameters, name="", global_network=None, network_is_local=True):
|
||||
self.global_network = global_network
|
||||
self.network_is_local = network_is_local
|
||||
@@ -34,7 +37,7 @@ class GeneralNeonNetwork(NeonArchitecture):
|
||||
self.activation_function = self.get_activation_function(
|
||||
tuning_parameters.agent.hidden_layers_activation_function)
|
||||
|
||||
NeonArchitecture.__init__(self, tuning_parameters, name, global_network, network_is_local)
|
||||
architecture.NeonArchitecture.__init__(self, tuning_parameters, name, global_network, network_is_local)
|
||||
|
||||
def get_activation_function(self, activation_function_string):
|
||||
activation_functions = {
|
||||
@@ -53,36 +56,36 @@ class GeneralNeonNetwork(NeonArchitecture):
|
||||
# the observation can be either an image or a vector
|
||||
def get_observation_embedding(with_timestep=False):
|
||||
if self.input_height > 1:
|
||||
return ImageEmbedder((self.input_depth, self.input_height, self.input_width), self.batch_size,
|
||||
name="observation")
|
||||
return embedders.ImageEmbedder((self.input_depth, self.input_height, self.input_width), self.batch_size,
|
||||
name="observation")
|
||||
else:
|
||||
return VectorEmbedder((self.input_depth, self.input_width + int(with_timestep)), self.batch_size,
|
||||
name="observation")
|
||||
return embedders.VectorEmbedder((self.input_depth, self.input_width + int(with_timestep)), self.batch_size,
|
||||
name="observation")
|
||||
|
||||
input_mapping = {
|
||||
InputTypes.Observation: get_observation_embedding(),
|
||||
InputTypes.Measurements: VectorEmbedder(self.measurements_size, self.batch_size, name="measurements"),
|
||||
InputTypes.GoalVector: VectorEmbedder(self.measurements_size, self.batch_size, name="goal_vector"),
|
||||
InputTypes.Action: VectorEmbedder((self.num_actions,), self.batch_size, name="action"),
|
||||
InputTypes.TimedObservation: get_observation_embedding(with_timestep=True),
|
||||
conf.InputTypes.Observation: get_observation_embedding(),
|
||||
conf.InputTypes.Measurements: embedders.VectorEmbedder(self.measurements_size, self.batch_size, name="measurements"),
|
||||
conf.InputTypes.GoalVector: embedders.VectorEmbedder(self.measurements_size, self.batch_size, name="goal_vector"),
|
||||
conf.InputTypes.Action: embedders.VectorEmbedder((self.num_actions,), self.batch_size, name="action"),
|
||||
conf.InputTypes.TimedObservation: get_observation_embedding(with_timestep=True),
|
||||
}
|
||||
return input_mapping[embedder_type]
|
||||
|
||||
def get_middleware_embedder(self, middleware_type):
|
||||
return {MiddlewareTypes.LSTM: None, # LSTM over Neon is currently not supported in Coach
|
||||
MiddlewareTypes.FC: FC_Embedder}.get(middleware_type)(self.activation_function)
|
||||
return {conf.MiddlewareTypes.LSTM: None, # LSTM over Neon is currently not supported in Coach
|
||||
conf.MiddlewareTypes.FC: middleware.FC_Embedder}.get(middleware_type)(self.activation_function)
|
||||
|
||||
def get_output_head(self, head_type, head_idx, loss_weight=1.):
|
||||
output_mapping = {
|
||||
OutputTypes.Q: QHead,
|
||||
OutputTypes.DuelingQ: DuelingQHead,
|
||||
OutputTypes.V: None, # Policy Optimization algorithms over Neon are currently not supported in Coach
|
||||
OutputTypes.Pi: None, # Policy Optimization algorithms over Neon are currently not supported in Coach
|
||||
OutputTypes.MeasurementsPrediction: None, # DFP over Neon is currently not supported in Coach
|
||||
OutputTypes.DNDQ: None, # NEC over Neon is currently not supported in Coach
|
||||
OutputTypes.NAF: None, # NAF over Neon is currently not supported in Coach
|
||||
OutputTypes.PPO: None, # PPO over Neon is currently not supported in Coach
|
||||
OutputTypes.PPO_V: None # PPO over Neon is currently not supported in Coach
|
||||
conf.OutputTypes.Q: heads.QHead,
|
||||
conf.OutputTypes.DuelingQ: heads.DuelingQHead,
|
||||
conf.OutputTypes.V: None, # Policy Optimization algorithms over Neon are currently not supported in Coach
|
||||
conf.OutputTypes.Pi: None, # Policy Optimization algorithms over Neon are currently not supported in Coach
|
||||
conf.OutputTypes.MeasurementsPrediction: None, # DFP over Neon is currently not supported in Coach
|
||||
conf.OutputTypes.DNDQ: None, # NEC over Neon is currently not supported in Coach
|
||||
conf.OutputTypes.NAF: None, # NAF over Neon is currently not supported in Coach
|
||||
conf.OutputTypes.PPO: None, # PPO over Neon is currently not supported in Coach
|
||||
conf.OutputTypes.PPO_V: None # PPO over Neon is currently not supported in Coach
|
||||
}
|
||||
return output_mapping[head_type](self.tp, head_idx, loss_weight, self.network_is_local)
|
||||
|
||||
@@ -104,7 +107,7 @@ class GeneralNeonNetwork(NeonArchitecture):
|
||||
done_creating_input_placeholders = False
|
||||
|
||||
for network_idx in range(self.num_networks):
|
||||
with name_scope('network_{}'.format(network_idx)):
|
||||
with ngraph_names.name_scope('network_{}'.format(network_idx)):
|
||||
####################
|
||||
# Input Embeddings #
|
||||
####################
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,13 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import ngraph as ng
|
||||
from ngraph.util.names import name_scope
|
||||
import ngraph.frontends.neon as neon
|
||||
import numpy as np
|
||||
from utils import force_list
|
||||
from architectures.neon_components.losses import *
|
||||
from ngraph.frontends import neon
|
||||
from ngraph.util import names as ngraph_names
|
||||
|
||||
import utils
|
||||
from architectures.neon_components import losses
|
||||
|
||||
|
||||
class Head(object):
|
||||
@@ -30,7 +29,7 @@ class Head(object):
|
||||
self.loss = []
|
||||
self.loss_type = []
|
||||
self.regularizations = []
|
||||
self.loss_weight = force_list(loss_weight)
|
||||
self.loss_weight = utils.force_list(loss_weight)
|
||||
self.weights_init = neon.GlorotInit()
|
||||
self.biases_init = neon.ConstantInit()
|
||||
self.target = []
|
||||
@@ -44,15 +43,15 @@ class Head(object):
|
||||
:param input_layer: the input to the graph
|
||||
:return: the output of the last layer and the target placeholder
|
||||
"""
|
||||
with name_scope(self.get_name()):
|
||||
with ngraph_names.name_scope(self.get_name()):
|
||||
self._build_module(input_layer)
|
||||
|
||||
self.output = force_list(self.output)
|
||||
self.target = force_list(self.target)
|
||||
self.input = force_list(self.input)
|
||||
self.loss_type = force_list(self.loss_type)
|
||||
self.loss = force_list(self.loss)
|
||||
self.regularizations = force_list(self.regularizations)
|
||||
self.output = utils.force_list(self.output)
|
||||
self.target = utils.force_list(self.target)
|
||||
self.input = utils.force_list(self.input)
|
||||
self.loss_type = utils.force_list(self.loss_type)
|
||||
self.loss = utils.force_list(self.loss)
|
||||
self.regularizations = utils.force_list(self.regularizations)
|
||||
if self.is_local:
|
||||
self.set_loss()
|
||||
|
||||
@@ -106,7 +105,7 @@ class QHead(Head):
|
||||
if tuning_parameters.agent.replace_mse_with_huber_loss:
|
||||
raise Exception("huber loss is not supported in neon")
|
||||
else:
|
||||
self.loss_type = mean_squared_error
|
||||
self.loss_type = losses.mean_squared_error
|
||||
|
||||
def _build_module(self, input_layer):
|
||||
# Standard Q Network
|
||||
@@ -159,7 +158,7 @@ class MeasurementsPredictionHead(Head):
|
||||
if tuning_parameters.agent.replace_mse_with_huber_loss:
|
||||
raise Exception("huber loss is not supported in neon")
|
||||
else:
|
||||
self.loss_type = mean_squared_error
|
||||
self.loss_type = losses.mean_squared_error
|
||||
|
||||
def _build_module(self, input_layer):
|
||||
# This is almost exactly the same as Dueling Network but we predict the future measurements for each action
|
||||
@@ -167,7 +166,7 @@ class MeasurementsPredictionHead(Head):
|
||||
multistep_measurements_size = self.measurements_size[0] * self.num_predicted_steps_ahead
|
||||
|
||||
# actions expectation tower (expectation stream) - E
|
||||
with name_scope("expectation_stream"):
|
||||
with ngraph_names.name_scope("expectation_stream"):
|
||||
expectation_stream = neon.Sequential([
|
||||
neon.Affine(nout=256, activation=neon.Rectlin(),
|
||||
weight_init=self.weights_init, bias_init=self.biases_init),
|
||||
@@ -176,7 +175,7 @@ class MeasurementsPredictionHead(Head):
|
||||
])(input_layer)
|
||||
|
||||
# action fine differences tower (action stream) - A
|
||||
with name_scope("action_stream"):
|
||||
with ngraph_names.name_scope("action_stream"):
|
||||
action_stream_unnormalized = neon.Sequential([
|
||||
neon.Affine(nout=256, activation=neon.Rectlin(),
|
||||
weight_init=self.weights_init, bias_init=self.biases_init),
|
||||
@@ -191,4 +190,3 @@ class MeasurementsPredictionHead(Head):
|
||||
|
||||
# merge to future measurements predictions
|
||||
self.output = repeated_expectation_stream + action_stream
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,15 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import ngraph as ng
|
||||
import ngraph.frontends.neon as neon
|
||||
from ngraph.util.names import name_scope
|
||||
import numpy as np
|
||||
from ngraph.util import names as ngraph_names
|
||||
|
||||
|
||||
def mean_squared_error(targets, outputs, weights=1.0, scope=""):
|
||||
with name_scope(scope):
|
||||
with ngraph_names.name_scope(scope):
|
||||
# TODO: reduce mean over the action axis
|
||||
loss = ng.squared_L2(targets - outputs)
|
||||
weighted_loss = loss * weights
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,11 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import ngraph as ng
|
||||
import ngraph.frontends.neon as neon
|
||||
from ngraph.util.names import name_scope
|
||||
import numpy as np
|
||||
from ngraph.util import names as ngraph_names
|
||||
|
||||
|
||||
class MiddlewareEmbedder(object):
|
||||
@@ -30,7 +27,7 @@ class MiddlewareEmbedder(object):
|
||||
self.activation_function = activation_function
|
||||
|
||||
def __call__(self, input_layer):
|
||||
with name_scope(self.get_name()):
|
||||
with ngraph_names.name_scope(self.get_name()):
|
||||
self.input = input_layer
|
||||
self._build_module()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user