mirror of
https://github.com/gryf/coach.git
synced 2025-12-18 11:40:18 +01:00
temp commit
This commit is contained in:
@@ -161,7 +161,6 @@ class CarlaEnvironmentWrapper(EnvironmentWrapper):
|
||||
measurements = []
|
||||
while type(measurements) == list:
|
||||
measurements, sensor_data = self.game.read_data()
|
||||
self.observation = sensor_data['CameraRGB'].data
|
||||
|
||||
self.location = (measurements.player_measurements.transform.location.x,
|
||||
measurements.player_measurements.transform.location.y,
|
||||
@@ -181,7 +180,10 @@ class CarlaEnvironmentWrapper(EnvironmentWrapper):
|
||||
- np.abs(self.control.steer) * 10
|
||||
|
||||
# update measurements
|
||||
self.measurements = [measurements.player_measurements.forward_speed]
|
||||
self.observation = {
|
||||
'observation': sensor_data['CameraRGB'].data,
|
||||
'measurements': [measurements.player_measurements.forward_speed],
|
||||
}
|
||||
self.autopilot = measurements.player_measurements.autopilot_control
|
||||
|
||||
# action_p = ['%.2f' % member for member in [self.control.throttle, self.control.steer]]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -135,8 +135,10 @@ class DoomEnvironmentWrapper(EnvironmentWrapper):
|
||||
# extract all data from the current state
|
||||
state = self.game.get_state()
|
||||
if state is not None and state.screen_buffer is not None:
|
||||
self.observation = state.screen_buffer
|
||||
self.measurements = state.game_variables
|
||||
self.observation = {
|
||||
'observation': state.screen_buffer,
|
||||
'measurements': state.game_variables,
|
||||
}
|
||||
self.reward = self.game.get_last_reward()
|
||||
self.done = self.game.is_episode_finished()
|
||||
|
||||
@@ -157,5 +159,3 @@ class DoomEnvironmentWrapper(EnvironmentWrapper):
|
||||
|
||||
def _restart_environment_episode(self, force_environment_reset=False):
|
||||
self.game.new_episode()
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -31,14 +31,13 @@ class EnvironmentWrapper(object):
|
||||
# env initialization
|
||||
self.game = []
|
||||
self.actions = {}
|
||||
self.observation = []
|
||||
self.state = []
|
||||
self.reward = 0
|
||||
self.done = False
|
||||
self.default_action = 0
|
||||
self.last_action_idx = 0
|
||||
self.episode_idx = 0
|
||||
self.last_episode_time = time.time()
|
||||
self.measurements = []
|
||||
self.info = []
|
||||
self.action_space_low = 0
|
||||
self.action_space_high = 0
|
||||
@@ -65,6 +64,22 @@ class EnvironmentWrapper(object):
|
||||
self.game_is_open = True
|
||||
self.renderer = Renderer()
|
||||
|
||||
@property
|
||||
def measurements(self):
|
||||
assert False
|
||||
|
||||
@measurements.setter
|
||||
def measurements(self, value):
|
||||
assert False
|
||||
|
||||
@property
|
||||
def observation(self):
|
||||
assert False
|
||||
|
||||
@observation.setter
|
||||
def observation(self, value):
|
||||
assert False
|
||||
|
||||
def _idx_to_action(self, action_idx):
|
||||
"""
|
||||
Convert an action index to one of the environment available actions.
|
||||
@@ -108,7 +123,7 @@ class EnvironmentWrapper(object):
|
||||
for env_keys in self.key_to_action.keys():
|
||||
if set(env_keys) == set(self.renderer.pressed_keys):
|
||||
return self.key_to_action[env_keys]
|
||||
|
||||
|
||||
# return the default action 0 so that the environment will continue running
|
||||
return self.default_action
|
||||
|
||||
@@ -116,7 +131,7 @@ class EnvironmentWrapper(object):
|
||||
"""
|
||||
Perform a single step on the environment using the given action
|
||||
:param action_idx: the action to perform on the environment
|
||||
:return: A dictionary containing the observation, reward, done flag, action and measurements
|
||||
:return: A dictionary containing the state, reward, done flag and action
|
||||
"""
|
||||
self.last_action_idx = action_idx
|
||||
|
||||
@@ -127,13 +142,12 @@ class EnvironmentWrapper(object):
|
||||
if self.is_rendered:
|
||||
self.render()
|
||||
|
||||
self.observation = self._preprocess_observation(self.observation)
|
||||
self.state = self._preprocess_state(self.state)
|
||||
|
||||
return {'observation': self.observation,
|
||||
return {'state': self.state,
|
||||
'reward': self.reward,
|
||||
'done': self.done,
|
||||
'action': self.last_action_idx,
|
||||
'measurements': self.measurements,
|
||||
'info': self.info}
|
||||
|
||||
def render(self):
|
||||
@@ -146,7 +160,7 @@ class EnvironmentWrapper(object):
|
||||
"""
|
||||
Reset the environment and all the variable of the wrapper
|
||||
:param force_environment_reset: forces environment reset even when the game did not end
|
||||
:return: A dictionary containing the observation, reward, done flag, action and measurements
|
||||
:return: A dictionary containing the state, reward, done flag and action
|
||||
"""
|
||||
self._restart_environment_episode(force_environment_reset)
|
||||
self.last_episode_time = time.time()
|
||||
@@ -156,17 +170,18 @@ class EnvironmentWrapper(object):
|
||||
self.last_action_idx = 0
|
||||
self._update_state()
|
||||
|
||||
# render before the preprocessing of the observation, so that the image will be in its original quality
|
||||
# render before the preprocessing of the state, so that the image will be in its original quality
|
||||
if self.is_rendered:
|
||||
self.render()
|
||||
|
||||
self.observation = self._preprocess_observation(self.observation)
|
||||
# TODO BUG: if the environment has not been reset, _preprocessed_state will be running on an already preprocessed state
|
||||
# TODO: see also _update_state above
|
||||
self.state = self._preprocess_state(self.state)
|
||||
|
||||
return {'observation': self.observation,
|
||||
return {'state': self.state,
|
||||
'reward': self.reward,
|
||||
'done': self.done,
|
||||
'action': self.last_action_idx,
|
||||
'measurements': self.measurements,
|
||||
'info': self.info}
|
||||
|
||||
def get_random_action(self):
|
||||
@@ -181,7 +196,7 @@ class EnvironmentWrapper(object):
|
||||
|
||||
def change_phase(self, phase):
|
||||
"""
|
||||
Change the current phase of the run.
|
||||
Change the current phase of the run.
|
||||
This is useful when different behavior is expected when testing and training
|
||||
:param phase: The running phase of the algorithm
|
||||
:type phase: RunPhase
|
||||
@@ -216,19 +231,19 @@ class EnvironmentWrapper(object):
|
||||
"""
|
||||
pass
|
||||
|
||||
def _preprocess_observation(self, observation):
|
||||
def _preprocess_state(self, state):
|
||||
"""
|
||||
Do initial observation preprocessing such as cropping, rgb2gray, rescale etc.
|
||||
Do initial state preprocessing such as cropping, rgb2gray, rescale etc.
|
||||
Implementing this function is optional.
|
||||
:param observation: a raw observation from the environment
|
||||
:return: the preprocessed observation
|
||||
:param state: a raw state from the environment
|
||||
:return: the preprocessed state
|
||||
"""
|
||||
return observation
|
||||
return state
|
||||
|
||||
def _update_state(self):
|
||||
"""
|
||||
Updates the state from the environment.
|
||||
Should update self.observation, self.reward, self.done, self.measurements and self.info
|
||||
Should update self.state, self.reward, self.done and self.info
|
||||
:return: None
|
||||
"""
|
||||
pass
|
||||
@@ -243,7 +258,8 @@ class EnvironmentWrapper(object):
|
||||
def get_rendered_image(self):
|
||||
"""
|
||||
Return a numpy array containing the image that will be rendered to the screen.
|
||||
This can be different from the observation. For example, mujoco's observation is a measurements vector.
|
||||
This can be different from the state. For example, mujoco's state is a measurements vector.
|
||||
:return: numpy array containing the image that will be rendered to the screen
|
||||
"""
|
||||
return self.observation
|
||||
# TODO: probably needs revisiting
|
||||
return self.state
|
||||
|
||||
@@ -60,7 +60,7 @@ class GymEnvironmentWrapper(EnvironmentWrapper):
|
||||
self.env.frameskip = self.frame_skip
|
||||
self.discrete_controls = type(self.env.action_space) != gym.spaces.box.Box
|
||||
|
||||
self.observation = self.reset(True)['observation']
|
||||
self.state = self.reset(True)['state']
|
||||
|
||||
# render
|
||||
if self.is_rendered:
|
||||
@@ -70,12 +70,13 @@ class GymEnvironmentWrapper(EnvironmentWrapper):
|
||||
scale = 2
|
||||
self.renderer.create_screen(image.shape[1]*scale, image.shape[0]*scale)
|
||||
|
||||
self.is_state_type_image = len(self.observation.shape) > 1
|
||||
# TODO: collect and store this as observation space instead
|
||||
self.is_state_type_image = len(self.state['observation'].shape) > 1
|
||||
if self.is_state_type_image:
|
||||
self.width = self.observation.shape[1]
|
||||
self.height = self.observation.shape[0]
|
||||
self.width = self.state['observation'].shape[1]
|
||||
self.height = self.state['observation'].shape[0]
|
||||
else:
|
||||
self.width = self.observation.shape[0]
|
||||
self.width = self.state['observation'].shape[0]
|
||||
|
||||
# action space
|
||||
self.actions_description = {}
|
||||
@@ -101,6 +102,12 @@ class GymEnvironmentWrapper(EnvironmentWrapper):
|
||||
self.timestep_limit = None
|
||||
self.measurements_size = len(self.step(0)['info'].keys())
|
||||
|
||||
def _wrap_state(self, state):
|
||||
if isinstance(self.env.observation_space, gym.spaces.Dict):
|
||||
return state
|
||||
else:
|
||||
return {'observation': state}
|
||||
|
||||
def _update_state(self):
|
||||
if hasattr(self.env, 'env') and hasattr(self.env.env, 'ale'):
|
||||
if self.phase == RunPhase.TRAIN and hasattr(self, 'current_ale_lives'):
|
||||
@@ -131,28 +138,30 @@ class GymEnvironmentWrapper(EnvironmentWrapper):
|
||||
action = np.squeeze(action)
|
||||
action = np.clip(action, self.action_space_low, self.action_space_high)
|
||||
|
||||
self.observation, self.reward, self.done, self.info = self.env.step(action)
|
||||
state, self.reward, self.done, self.info = self.env.step(action)
|
||||
self.state = self._wrap_state(state)
|
||||
|
||||
def _preprocess_observation(self, observation):
|
||||
def _preprocess_state(self, state):
|
||||
# TODO: move this into wrapper
|
||||
if any(env in self.env_id for env in ["Breakout", "Pong"]):
|
||||
# crop image
|
||||
observation = observation[34:195, :, :]
|
||||
return observation
|
||||
state['observation'] = state['observation'][34:195, :, :]
|
||||
return state
|
||||
|
||||
def _restart_environment_episode(self, force_environment_reset=False):
|
||||
# prevent reset of environment if there are ale lives left
|
||||
if (hasattr(self.env, 'env') and hasattr(self.env.env, 'ale') and self.env.env.ale.lives() > 0) \
|
||||
and not force_environment_reset and not self.env._past_limit():
|
||||
return self.observation
|
||||
return self.state
|
||||
|
||||
if self.seed:
|
||||
self.env.seed(self.seed)
|
||||
|
||||
self.observation = self.env.reset()
|
||||
while self.observation is None:
|
||||
self.state = self._wrap_state(self.env.reset())
|
||||
while self.state is None:
|
||||
self.step(0)
|
||||
|
||||
return self.observation
|
||||
return self.state
|
||||
|
||||
def get_rendered_image(self):
|
||||
return self.env.render(mode='rgb_array')
|
||||
|
||||
Reference in New Issue
Block a user