mirror of
https://github.com/gryf/coach.git
synced 2026-02-18 15:35:56 +01:00
pre-release 0.10.0
This commit is contained in:
0
rl_coach/environments/toy_problems/__init__.py
Normal file
0
rl_coach/environments/toy_problems/__init__.py
Normal file
82
rl_coach/environments/toy_problems/bit_flip.py
Normal file
82
rl_coach/environments/toy_problems/bit_flip.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import numpy as np
|
||||
import gym
|
||||
from gym import spaces
|
||||
import random
|
||||
|
||||
|
||||
class BitFlip(gym.Env):
|
||||
metadata = {
|
||||
'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30
|
||||
}
|
||||
|
||||
def __init__(self, bit_length=16, max_steps=None, mean_zero=False):
|
||||
super(BitFlip, self).__init__()
|
||||
if bit_length < 1:
|
||||
raise ValueError('bit_length must be >= 1, found {}'.format(bit_length))
|
||||
self.bit_length = bit_length
|
||||
self.mean_zero = mean_zero
|
||||
|
||||
if max_steps is None:
|
||||
# default to bit_length
|
||||
self.max_steps = bit_length
|
||||
elif max_steps == 0:
|
||||
self.max_steps = None
|
||||
else:
|
||||
self.max_steps = max_steps
|
||||
|
||||
# spaces documentation: https://gym.openai.com/docs/
|
||||
self.action_space = spaces.Discrete(bit_length)
|
||||
self.observation_space = spaces.Dict({
|
||||
'state': spaces.Box(low=0, high=1, shape=(bit_length, )),
|
||||
'desired_goal': spaces.Box(low=0, high=1, shape=(bit_length, )),
|
||||
'achieved_goal': spaces.Box(low=0, high=1, shape=(bit_length, ))
|
||||
})
|
||||
|
||||
self.reset()
|
||||
|
||||
def _terminate(self):
|
||||
return (self.state == self.goal).all() or self.steps >= self.max_steps
|
||||
|
||||
def _reward(self):
|
||||
return -1 if (self.state != self.goal).any() else 0
|
||||
|
||||
def step(self, action):
|
||||
# action is an int in the range [0, self.bit_length)
|
||||
self.state[action] = int(not self.state[action])
|
||||
self.steps += 1
|
||||
|
||||
return (self._get_obs(), self._reward(), self._terminate(), {})
|
||||
|
||||
def reset(self):
|
||||
self.steps = 0
|
||||
|
||||
self.state = np.array([random.choice([1, 0]) for _ in range(self.bit_length)])
|
||||
|
||||
# make sure goal is not the initial state
|
||||
self.goal = self.state
|
||||
while (self.goal == self.state).all():
|
||||
self.goal = np.array([random.choice([1, 0]) for _ in range(self.bit_length)])
|
||||
|
||||
return self._get_obs()
|
||||
|
||||
def _mean_zero(self, x):
|
||||
if self.mean_zero:
|
||||
return (x - 0.5) / 0.5
|
||||
else:
|
||||
return x
|
||||
|
||||
def _get_obs(self):
|
||||
return {
|
||||
'state': self._mean_zero(self.state),
|
||||
'desired_goal': self._mean_zero(self.goal),
|
||||
'achieved_goal': self._mean_zero(self.state)
|
||||
}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
observation = np.zeros((20, 20 * self.bit_length, 3))
|
||||
for bit_idx, (state_bit, goal_bit) in enumerate(zip(self.state, self.goal)):
|
||||
# green if the bit matches
|
||||
observation[:, bit_idx * 20:(bit_idx + 1) * 20, 1] = (state_bit == goal_bit) * 255
|
||||
# red if the bit doesn't match
|
||||
observation[:, bit_idx * 20:(bit_idx + 1) * 20, 0] = (state_bit != goal_bit) * 255
|
||||
return observation
|
||||
126
rl_coach/environments/toy_problems/exploration_chain.py
Normal file
126
rl_coach/environments/toy_problems/exploration_chain.py
Normal file
@@ -0,0 +1,126 @@
|
||||
import numpy as np
|
||||
import gym
|
||||
from gym import spaces
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class ExplorationChain(gym.Env):
|
||||
metadata = {
|
||||
'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30
|
||||
}
|
||||
|
||||
class ObservationType(Enum):
|
||||
OneHot = 0
|
||||
Therm = 1
|
||||
|
||||
def __init__(self, chain_length=16, start_state=1, max_steps=None, observation_type=ObservationType.Therm,
|
||||
left_state_reward=1/1000, right_state_reward=1, simple_render=True):
|
||||
super().__init__()
|
||||
if chain_length <= 3:
|
||||
raise ValueError('Chain length must be > 3, found {}'.format(chain_length))
|
||||
if not 0 <= start_state < chain_length:
|
||||
raise ValueError('The start state should be within the chain bounds, found {}'.format(start_state))
|
||||
self.chain_length = chain_length
|
||||
self.start_state = start_state
|
||||
self.max_steps = max_steps
|
||||
self.observation_type = observation_type
|
||||
self.left_state_reward = left_state_reward
|
||||
self.right_state_reward = right_state_reward
|
||||
self.simple_render = simple_render
|
||||
|
||||
# spaces documentation: https://gym.openai.com/docs/
|
||||
self.action_space = spaces.Discrete(2) # 0 -> Go left, 1 -> Go right
|
||||
self.observation_space = spaces.Box(0, 1, shape=(chain_length,))#spaces.MultiBinary(chain_length)
|
||||
|
||||
self.reset()
|
||||
|
||||
def _terminate(self):
|
||||
return self.steps >= self.max_steps
|
||||
|
||||
def _reward(self):
|
||||
if self.state == 0:
|
||||
return self.left_state_reward
|
||||
elif self.state == self.chain_length - 1:
|
||||
return self.right_state_reward
|
||||
else:
|
||||
return 0
|
||||
|
||||
def step(self, action):
|
||||
# action is 0 or 1
|
||||
if action == 0:
|
||||
if 0 < self.state:
|
||||
self.state -= 1
|
||||
elif action == 1:
|
||||
if self.state < self.chain_length - 1:
|
||||
self.state += 1
|
||||
else:
|
||||
raise ValueError("An invalid action was given. The available actions are - 0 or 1, found {}".format(action))
|
||||
|
||||
self.steps += 1
|
||||
|
||||
return self._get_obs(), self._reward(), self._terminate(), {}
|
||||
|
||||
def reset(self):
|
||||
self.steps = 0
|
||||
|
||||
self.state = self.start_state
|
||||
|
||||
return self._get_obs()
|
||||
|
||||
def _get_obs(self):
|
||||
self.observation = np.zeros((self.chain_length,))
|
||||
if self.observation_type == self.ObservationType.OneHot:
|
||||
self.observation[self.state] = 1
|
||||
elif self.observation_type == self.ObservationType.Therm:
|
||||
self.observation[:(self.state+1)] = 1
|
||||
|
||||
return self.observation
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
if self.simple_render:
|
||||
observation = np.zeros((20, 20*self.chain_length))
|
||||
observation[:, self.state*20:(self.state+1)*20] = 255
|
||||
return observation
|
||||
else:
|
||||
# lazy loading of networkx and matplotlib to allow using the environment without installing them if
|
||||
# necessary
|
||||
import networkx as nx
|
||||
from networkx.drawing.nx_agraph import graphviz_layout
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
if not hasattr(self, 'G'):
|
||||
self.states = list(range(self.chain_length))
|
||||
self.G = nx.DiGraph(directed=True)
|
||||
for i, origin_state in enumerate(self.states):
|
||||
if i < self.chain_length - 1:
|
||||
self.G.add_edge(origin_state,
|
||||
origin_state + 1,
|
||||
weight=0.5)
|
||||
if i > 0:
|
||||
self.G.add_edge(origin_state,
|
||||
origin_state - 1,
|
||||
weight=0.5, )
|
||||
if i == 0 or i < self.chain_length - 1:
|
||||
self.G.add_edge(origin_state,
|
||||
origin_state,
|
||||
weight=0.5, )
|
||||
|
||||
fig = plt.gcf()
|
||||
if np.all(fig.get_size_inches() != [10, 2]):
|
||||
fig.set_size_inches(5, 1)
|
||||
color = ['y']*(len(self.G))
|
||||
color[self.state] = 'r'
|
||||
options = {
|
||||
'node_color': color,
|
||||
'node_size': 50,
|
||||
'width': 1,
|
||||
'arrowstyle': '-|>',
|
||||
'arrowsize': 5,
|
||||
'font_size': 6
|
||||
}
|
||||
pos = graphviz_layout(self.G, prog='dot', args='-Grankdir=LR')
|
||||
nx.draw_networkx(self.G, pos, arrows=True, **options)
|
||||
fig.canvas.draw()
|
||||
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
||||
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
||||
return data
|
||||
Reference in New Issue
Block a user