diff --git a/agents/agent.py b/agents/agent.py index a541fa5..bd34d16 100644 --- a/agents/agent.py +++ b/agents/agent.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,12 @@ # import scipy.ndimage -import matplotlib.pyplot as plt +try: + import matplotlib.pyplot as plt +except: + from logger import failed_imports + failed_imports.append("matplotlib") + import copy from configurations import Preset from collections import OrderedDict @@ -206,8 +211,8 @@ class Agent(object): def preprocess_observation(self, observation): """ - Preprocesses the given observation. - For images - convert to grayscale, resize and convert to int. + Preprocesses the given observation. + For images - convert to grayscale, resize and convert to int. For measurements vectors - normalize by a running average and std. :param observation: The agents observation :return: A processed version of the observation @@ -308,8 +313,8 @@ class Agent(object): """ choose an action to act with in the current episode being played. Different behavior might be exhibited when training or testing. - - :param curr_state: the current state to act upon. + + :param curr_state: the current state to act upon. :param phase: the current phase: training or testing. :return: chosen action, some action value describing the action (q-value, probability, etc) """ diff --git a/agents/policy_gradients_agent.py b/agents/policy_gradients_agent.py index 11cef75..3746bba 100644 --- a/agents/policy_gradients_agent.py +++ b/agents/policy_gradients_agent.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,11 @@ from agents.policy_optimization_agent import * import numpy as np from logger import * import tensorflow as tf -import matplotlib.pyplot as plt +try: + import matplotlib.pyplot as plt +except: + from logger import failed_imports + failed_imports.append("matplotlib") from utils import * diff --git a/environments/carla_environment_wrapper.py b/environments/carla_environment_wrapper.py index ee9c3ed..115bdbf 100644 --- a/environments/carla_environment_wrapper.py +++ b/environments/carla_environment_wrapper.py @@ -2,7 +2,8 @@ import sys from os import path, environ try: - sys.path.append(path.join(environ.get('CARLA_ROOT'), 'PythonClient')) + if 'CARLA_ROOT' in environ: + sys.path.append(path.join(environ.get('CARLA_ROOT'), 'PythonClient')) from carla.client import CarlaClient from carla.settings import CarlaSettings from carla.tcp import TCPConnectionError @@ -126,7 +127,7 @@ class CarlaEnvironmentWrapper(EnvironmentWrapper): if action == key: self.key_to_action[key_map[key]] = idx self.num_speedup_steps = 30 - + # measurements self.measurements_size = (1,) self.autopilot = None @@ -227,4 +228,3 @@ class CarlaEnvironmentWrapper(EnvironmentWrapper): self.observation = observation return observation -