1
0
mirror of https://github.com/gryf/coach.git synced 2026-02-16 14:05:46 +01:00

Adding target reward and target sucess (#58)

* Adding target reward

* Adding target successs

* Addressing comments

* Using custom_reward_threshold and target_success_rate

* Adding exit message

* Moving success rate to environment

* Making target_success_rate optional
This commit is contained in:
Ajay Deshpande
2018-11-12 15:03:43 -08:00
committed by Balaji Subramaniam
parent 0fe583186e
commit 875d6ef017
17 changed files with 162 additions and 74 deletions

View File

@@ -133,8 +133,8 @@ class CarlaEnvironment(Environment):
allow_braking: bool, quality: CarlaEnvironmentParameters.Quality,
cameras: List[CameraTypes], weather_id: List[int], experiment_path: str,
separate_actions_for_throttle_and_brake: bool,
num_speedup_steps: int, max_speed: float, **kwargs):
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters)
num_speedup_steps: int, max_speed: float, target_success_rate: float = 1.0, **kwargs):
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters, target_success_rate)
# server configuration
self.server_height = server_height
@@ -261,6 +261,8 @@ class CarlaEnvironment(Environment):
image = self.get_rendered_image()
self.renderer.create_screen(image.shape[1], image.shape[0])
self.target_success_rate = target_success_rate
def _add_cameras(self, settings, cameras, camera_width, camera_height):
# add a front facing camera
if CameraTypes.FRONT in cameras:
@@ -461,3 +463,6 @@ class CarlaEnvironment(Environment):
image = [self.state[camera.name] for camera in self.scene.sensors]
image = np.vstack(image)
return image
def get_target_success_rate(self) -> float:
return self.target_success_rate

View File

@@ -66,10 +66,10 @@ control_suite_envs = {':'.join(env): ':'.join(env) for env in suite.BENCHMARKING
# Environment
class ControlSuiteEnvironment(Environment):
def __init__(self, level: LevelSelection, frame_skip: int, visualization_parameters: VisualizationParameters,
seed: Union[None, int]=None, human_control: bool=False,
target_success_rate: float=1.0, seed: Union[None, int]=None, human_control: bool=False,
observation_type: ObservationType=ObservationType.Measurements,
custom_reward_threshold: Union[int, float]=None, **kwargs):
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters)
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters, target_success_rate)
self.observation_type = observation_type
@@ -126,6 +126,8 @@ class ControlSuiteEnvironment(Environment):
if not self.native_rendering:
self.renderer.create_screen(image.shape[1]*scale, image.shape[0]*scale)
self.target_success_rate = target_success_rate
def _update_state(self):
self.state = {}
@@ -160,3 +162,6 @@ class ControlSuiteEnvironment(Environment):
def get_rendered_image(self):
return self.env.physics.render(camera_id=0)
def get_target_success_rate(self) -> float:
return self.target_success_rate

View File

@@ -1,5 +1,5 @@
#
# Copyright (c) 2017 Intel Corporation
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -124,8 +124,8 @@ class DoomEnvironment(Environment):
def __init__(self, level: LevelSelection, seed: int, frame_skip: int, human_control: bool,
custom_reward_threshold: Union[int, float], visualization_parameters: VisualizationParameters,
cameras: List[CameraTypes], **kwargs):
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters)
cameras: List[CameraTypes], target_success_rate: float=1.0, **kwargs):
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters, target_success_rate)
self.cameras = cameras
@@ -196,6 +196,8 @@ class DoomEnvironment(Environment):
image = self.get_rendered_image()
self.renderer.create_screen(image.shape[1], image.shape[0])
self.target_success_rate = target_success_rate
def _update_state(self):
# extract all data from the current state
state = self.game.get_state()
@@ -227,3 +229,6 @@ class DoomEnvironment(Environment):
image = [self.state[camera.value[0]] for camera in self.cameras]
image = np.vstack(image)
return image
def get_target_success_rate(self) -> float:
return self.target_success_rate

View File

@@ -103,6 +103,9 @@ class EnvironmentParameters(Parameters):
self.default_output_filter = None
self.experiment_path = None
# Set target reward and target_success if present
self.target_success_rate = 1.0
@property
def path(self):
return 'rl_coach.environments.environment:Environment'
@@ -111,7 +114,7 @@ class EnvironmentParameters(Parameters):
class Environment(EnvironmentInterface):
def __init__(self, level: LevelSelection, seed: int, frame_skip: int, human_control: bool,
custom_reward_threshold: Union[int, float], visualization_parameters: VisualizationParameters,
**kwargs):
target_success_rate: float=1.0, **kwargs):
"""
:param level: The environment level. Each environment can have multiple levels
:param seed: a seed for the random number generator of the environment
@@ -166,6 +169,9 @@ class Environment(EnvironmentInterface):
if not self.native_rendering:
self.renderer = Renderer()
# Set target reward and target_success if present
self.target_success_rate = target_success_rate
@property
def action_space(self) -> Union[List[ActionSpace], ActionSpace]:
"""
@@ -469,3 +475,5 @@ class Environment(EnvironmentInterface):
"""
return np.transpose(self.state['observation'], [1, 2, 0])
def get_target_success_rate(self) -> float:
return self.target_success_rate

View File

@@ -178,11 +178,11 @@ class MaxOverFramesAndFrameskipEnvWrapper(gym.Wrapper):
# Environment
class GymEnvironment(Environment):
def __init__(self, level: LevelSelection, frame_skip: int, visualization_parameters: VisualizationParameters,
additional_simulator_parameters: Dict[str, Any] = {}, seed: Union[None, int]=None,
target_success_rate: float=1.0, additional_simulator_parameters: Dict[str, Any] = {}, seed: Union[None, int]=None,
human_control: bool=False, custom_reward_threshold: Union[int, float]=None,
random_initialization_steps: int=1, max_over_num_frames: int=1, **kwargs):
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold,
visualization_parameters)
visualization_parameters, target_success_rate)
self.random_initialization_steps = random_initialization_steps
self.max_over_num_frames = max_over_num_frames
@@ -221,7 +221,7 @@ class GymEnvironment(Environment):
try:
self.env = env_class(**self.additional_simulator_parameters)
except:
screen.error("Failed to instantiate Gym environment class %s with arguments %s" %
screen.error("Failed to instantiate Gym environment class %s with arguments %s" %
(env_class, self.additional_simulator_parameters), crash=False)
raise
else:
@@ -337,6 +337,8 @@ class GymEnvironment(Environment):
self.reward_success_threshold = self.env.spec.reward_threshold
self.reward_space = RewardSpace(1, reward_success_threshold=self.reward_success_threshold)
self.target_success_rate = target_success_rate
def _wrap_state(self, state):
if not isinstance(self.env.observation_space, gym.spaces.Dict):
return {'observation': state}
@@ -434,3 +436,6 @@ class GymEnvironment(Environment):
if self.is_mujoco_env:
self._set_mujoco_camera(0)
return image
def get_target_success_rate(self) -> float:
return self.target_success_rate

View File

@@ -107,14 +107,14 @@ class StarCraft2EnvironmentParameters(EnvironmentParameters):
# Environment
class StarCraft2Environment(Environment):
def __init__(self, level: LevelSelection, frame_skip: int, visualization_parameters: VisualizationParameters,
seed: Union[None, int]=None, human_control: bool=False,
target_success_rate: float=1.0, seed: Union[None, int]=None, human_control: bool=False,
custom_reward_threshold: Union[int, float]=None,
screen_size: int=84, minimap_size: int=64,
feature_minimap_maps_to_use: List=range(7), feature_screen_maps_to_use: List=range(17),
observation_type: StarcraftObservationType=StarcraftObservationType.Features,
disable_fog: bool=False, auto_select_all_army: bool=True,
use_full_action_space: bool=False, **kwargs):
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters)
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters, target_success_rate)
self.screen_size = screen_size
self.minimap_size = minimap_size
@@ -163,11 +163,11 @@ class StarCraft2Environment(Environment):
"""
feature_screen: [height_map, visibility_map, creep, power, player_id, player_relative, unit_type, selected,
unit_hit_points, unit_hit_points_ratio, unit_energy, unit_energy_ratio, unit_shields,
unit_hit_points, unit_hit_points_ratio, unit_energy, unit_energy_ratio, unit_shields,
unit_shields_ratio, unit_density, unit_density_aa, effects]
feature_minimap: [height_map, visibility_map, creep, camera, player_id, player_relative, selecte
d]
player: [player_id, minerals, vespene, food_cap, food_army, food_workers, idle_worker_dount,
player: [player_id, minerals, vespene, food_cap, food_army, food_workers, idle_worker_dount,
army_count, warp_gate_count, larva_count]
"""
self.screen_shape = np.array(self.env.observation_spec()[0]['feature_screen'])
@@ -192,6 +192,8 @@ class StarCraft2Environment(Environment):
self.action_space = BoxActionSpace(2, 0, self.screen_size - 1, ["X-Axis, Y-Axis"],
default_action=np.array([self.screen_size/2, self.screen_size/2]))
self.target_success_rate = target_success_rate
def _update_state(self):
timestep = 0
self.screen = self.last_result[timestep].observation.feature_screen
@@ -244,3 +246,6 @@ class StarCraft2Environment(Environment):
self.env._run_config.replay_dir = experiment_path
self.env.save_replay('replays')
super().dump_video_of_last_episode()
def get_target_success_rate(self):
return self.target_success_rate