1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-18 19:50:17 +01:00

Merge branch 'master' into imports

This commit is contained in:
Roman Dobosz
2018-04-24 07:43:04 +02:00
124 changed files with 10828 additions and 17 deletions

View File

@@ -177,7 +177,7 @@ class CarlaEnvironmentWrapper(ew.EnvironmentWrapper):
- np.abs(self.control.steer) * 10
# update measurements
self.observation = {
self.state = {
'observation': sensor_data['CameraRGB'].data,
'measurements': [measurements.player_measurements.forward_speed],
}
@@ -221,9 +221,9 @@ class CarlaEnvironmentWrapper(ew.EnvironmentWrapper):
self.game.start_episode(self.iterator_start_positions)
# start the game with some initial speed
observation = None
state = None
for i in range(self.num_speedup_steps):
observation = self.step([1.0, 0])['observation']
self.observation = observation
state = self.step([1.0, 0])['state']
self.state = state
return observation
return state

View File

@@ -137,7 +137,7 @@ class DoomEnvironmentWrapper(ew.EnvironmentWrapper):
# extract all data from the current state
state = self.game.get_state()
if state is not None and state.screen_buffer is not None:
self.observation = {
self.state = {
'observation': state.screen_buffer,
'measurements': state.game_variables,
}

View File

@@ -91,7 +91,7 @@ class GymEnvironmentWrapper(ew.EnvironmentWrapper):
self.timestep_limit = self.env.spec.timestep_limit
else:
self.timestep_limit = None
self.measurements_size = len(self.step(0)['info'].keys())
self.measurements_size = (len(self.step(0)['info'].keys()),)
self.random_initialization_steps = self.tp.env.random_initialization_steps
def _wrap_state(self, state):