From 389c65cbbea933b253882d5d5581ecf4a54774fa Mon Sep 17 00:00:00 2001 From: Itai Caspi <30383381+itaicaspi-intel@users.noreply.github.com> Date: Thu, 8 Nov 2018 16:52:48 +0200 Subject: [PATCH] fix for a bug in distributed training that was introduced lately (#75) --- rl_coach/graph_managers/graph_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rl_coach/graph_managers/graph_manager.py b/rl_coach/graph_managers/graph_manager.py index 778e4ed..b20a12c 100644 --- a/rl_coach/graph_managers/graph_manager.py +++ b/rl_coach/graph_managers/graph_manager.py @@ -195,7 +195,7 @@ class GraphManager(object): @staticmethod def create_worker_or_parameters_server(task_parameters: DistributedTaskParameters): if task_parameters.framework_type == Frameworks.tensorflow: - GraphManager._create_worker_or_parameters_server_tf(task_parameters) + return GraphManager._create_worker_or_parameters_server_tf(task_parameters) elif task_parameters.framework_type == Frameworks.mxnet: raise NotImplementedError('Distributed training not implemented for MXNet') else: