1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 11:10:20 +01:00

allow missing carla environment and missing matplotlib package

This commit is contained in:
Zach Dwiel
2017-12-19 23:03:19 -05:00
committed by Itai Caspi
parent 125c7ee38d
commit 37e317682b
3 changed files with 20 additions and 11 deletions

View File

@@ -1,5 +1,5 @@
#
# Copyright (c) 2017 Intel Corporation
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,7 +15,12 @@
#
import scipy.ndimage
import matplotlib.pyplot as plt
try:
import matplotlib.pyplot as plt
except:
from logger import failed_imports
failed_imports.append("matplotlib")
import copy
from configurations import Preset
from collections import OrderedDict
@@ -206,8 +211,8 @@ class Agent(object):
def preprocess_observation(self, observation):
"""
Preprocesses the given observation.
For images - convert to grayscale, resize and convert to int.
Preprocesses the given observation.
For images - convert to grayscale, resize and convert to int.
For measurements vectors - normalize by a running average and std.
:param observation: The agents observation
:return: A processed version of the observation
@@ -308,8 +313,8 @@ class Agent(object):
"""
choose an action to act with in the current episode being played. Different behavior might be exhibited when training
or testing.
:param curr_state: the current state to act upon.
:param curr_state: the current state to act upon.
:param phase: the current phase: training or testing.
:return: chosen action, some action value describing the action (q-value, probability, etc)
"""

View File

@@ -1,5 +1,5 @@
#
# Copyright (c) 2017 Intel Corporation
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,7 +18,11 @@ from agents.policy_optimization_agent import *
import numpy as np
from logger import *
import tensorflow as tf
import matplotlib.pyplot as plt
try:
import matplotlib.pyplot as plt
except:
from logger import failed_imports
failed_imports.append("matplotlib")
from utils import *

View File

@@ -2,7 +2,8 @@ import sys
from os import path, environ
try:
sys.path.append(path.join(environ.get('CARLA_ROOT'), 'PythonClient'))
if 'CARLA_ROOT' in environ:
sys.path.append(path.join(environ.get('CARLA_ROOT'), 'PythonClient'))
from carla.client import CarlaClient
from carla.settings import CarlaSettings
from carla.tcp import TCPConnectionError
@@ -126,7 +127,7 @@ class CarlaEnvironmentWrapper(EnvironmentWrapper):
if action == key:
self.key_to_action[key_map[key]] = idx
self.num_speedup_steps = 30
# measurements
self.measurements_size = (1,)
self.autopilot = None
@@ -227,4 +228,3 @@ class CarlaEnvironmentWrapper(EnvironmentWrapper):
self.observation = observation
return observation