mirror of
https://github.com/gryf/coach.git
synced 2026-02-26 12:15:50 +01:00
coach v0.8.0
This commit is contained in:
28
exploration_policies/__init__.py
Normal file
28
exploration_policies/__init__.py
Normal file
@@ -0,0 +1,28 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from exploration_policies.additive_noise import *
|
||||
from exploration_policies.approximated_thompson_sampling_using_dropout import *
|
||||
from exploration_policies.bayesian import *
|
||||
from exploration_policies.boltzmann import *
|
||||
from exploration_policies.bootstrapped import *
|
||||
from exploration_policies.categorical import *
|
||||
from exploration_policies.continuous_entropy import *
|
||||
from exploration_policies.e_greedy import *
|
||||
from exploration_policies.exploration_policy import *
|
||||
from exploration_policies.greedy import *
|
||||
from exploration_policies.ou_process import *
|
||||
from exploration_policies.thompson_sampling import *
|
||||
46
exploration_policies/additive_noise.py
Normal file
46
exploration_policies/additive_noise.py
Normal file
@@ -0,0 +1,46 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import numpy as np
|
||||
from exploration_policies.exploration_policy import *
|
||||
|
||||
|
||||
class AdditiveNoise(ExplorationPolicy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
ExplorationPolicy.__init__(self, tuning_parameters)
|
||||
self.variance = tuning_parameters.exploration.initial_noise_variance_percentage
|
||||
self.final_variance = tuning_parameters.exploration.final_noise_variance_percentage
|
||||
self.decay_steps = tuning_parameters.exploration.noise_variance_decay_steps
|
||||
self.variance_decay_delta = (self.variance - self.final_variance) / float(self.decay_steps)
|
||||
|
||||
def decay_exploration(self):
|
||||
if self.variance > self.final_variance:
|
||||
self.variance -= self.variance_decay_delta
|
||||
elif self.variance < self.final_variance:
|
||||
self.variance = self.final_variance
|
||||
|
||||
def get_action(self, action_values):
|
||||
if self.phase == RunPhase.TRAIN:
|
||||
self.decay_exploration()
|
||||
action = np.random.normal(action_values, 2 * self.variance * self.action_abs_range)
|
||||
return action #np.clip(action, -self.action_abs_range, self.action_abs_range).squeeze()
|
||||
|
||||
def get_control_param(self):
|
||||
return self.variance
|
||||
@@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from exploration_policies.exploration_policy import *
|
||||
|
||||
|
||||
class ApproximatedThompsonSamplingUsingDropout(ExplorationPolicy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
ExplorationPolicy.__init__(self, tuning_parameters)
|
||||
self.dropout_discard_probability = tuning_parameters.exploration.dropout_discard_probability
|
||||
self.network = tuning_parameters.network
|
||||
self.assign_op = self.network.dropout_discard_probability.assign(self.dropout_discard_probability)
|
||||
self.network.sess.run(self.assign_op)
|
||||
pass
|
||||
|
||||
def decay_dropout(self):
|
||||
pass
|
||||
|
||||
def get_action(self, action_values):
|
||||
return np.argmax(action_values)
|
||||
|
||||
def get_control_param(self):
|
||||
return self.dropout_discard_probability
|
||||
56
exploration_policies/bayesian.py
Normal file
56
exploration_policies/bayesian.py
Normal file
@@ -0,0 +1,56 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from exploration_policies.exploration_policy import *
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
class Bayesian(ExplorationPolicy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
ExplorationPolicy.__init__(self, tuning_parameters)
|
||||
self.keep_probability = tuning_parameters.exploration.initial_keep_probability
|
||||
self.final_keep_probability = tuning_parameters.exploration.final_keep_probability
|
||||
self.keep_probability_decay_delta = (
|
||||
tuning_parameters.exploration.initial_keep_probability - tuning_parameters.exploration.final_keep_probability) \
|
||||
/ float(tuning_parameters.exploration.keep_probability_decay_steps)
|
||||
self.action_space_size = tuning_parameters.env.action_space_size
|
||||
self.network = tuning_parameters.network
|
||||
self.epsilon = 0
|
||||
|
||||
def decay_keep_probability(self):
|
||||
if (self.keep_probability > self.final_keep_probability and self.keep_probability_decay_delta > 0) \
|
||||
or (self.keep_probability < self.final_keep_probability and self.keep_probability_decay_delta < 0):
|
||||
self.keep_probability -= self.keep_probability_decay_delta
|
||||
|
||||
def get_action(self, action_values):
|
||||
if self.phase == RunPhase.TRAIN:
|
||||
self.decay_keep_probability()
|
||||
# dropout = self.network.get_layer('variable_dropout_1')
|
||||
# with tf.Session() as sess:
|
||||
# print(dropout.rate.eval())
|
||||
# set_value(dropout.rate, 1-self.keep_probability)
|
||||
|
||||
print(self.keep_probability)
|
||||
self.network.curr_keep_prob = self.keep_probability
|
||||
|
||||
return np.argmax(action_values)
|
||||
|
||||
def get_control_param(self):
|
||||
return self.keep_probability
|
||||
48
exploration_policies/boltzmann.py
Normal file
48
exploration_policies/boltzmann.py
Normal file
@@ -0,0 +1,48 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from exploration_policies.exploration_policy import *
|
||||
|
||||
|
||||
class Boltzmann(ExplorationPolicy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
ExplorationPolicy.__init__(self, tuning_parameters)
|
||||
self.temperature = tuning_parameters.exploration.initial_temperature
|
||||
self.final_temperature = tuning_parameters.exploration.final_temperature
|
||||
self.temperature_decay_delta = (
|
||||
tuning_parameters.exploration.initial_temperature - tuning_parameters.exploration.final_temperature) \
|
||||
/ float(tuning_parameters.exploration.temperature_decay_steps)
|
||||
|
||||
def decay_temperature(self):
|
||||
if self.temperature > self.final_temperature:
|
||||
self.temperature -= self.temperature_decay_delta
|
||||
|
||||
def get_action(self, action_values):
|
||||
if self.phase == RunPhase.TRAIN:
|
||||
self.decay_temperature()
|
||||
# softmax calculation
|
||||
exp_probabilities = np.exp(action_values / self.temperature)
|
||||
probabilities = exp_probabilities / np.sum(exp_probabilities)
|
||||
probabilities[-1] = 1 - np.sum(probabilities[:-1]) # make sure probs sum to 1
|
||||
# choose actions according to the probabilities
|
||||
return np.random.choice(range(self.action_space_size), p=probabilities)
|
||||
|
||||
def get_control_param(self):
|
||||
return self.temperature
|
||||
37
exploration_policies/bootstrapped.py
Normal file
37
exploration_policies/bootstrapped.py
Normal file
@@ -0,0 +1,37 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from exploration_policies.e_greedy import *
|
||||
|
||||
|
||||
class Bootstrapped(EGreedy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running parameters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
EGreedy.__init__(self, tuning_parameters)
|
||||
self.num_heads = tuning_parameters.exploration.architecture_num_q_heads
|
||||
self.selected_head = 0
|
||||
|
||||
def select_head(self):
|
||||
self.selected_head = np.random.randint(self.num_heads)
|
||||
|
||||
def get_action(self, action_values):
|
||||
return EGreedy.get_action(self, action_values[self.selected_head])
|
||||
|
||||
def get_control_param(self):
|
||||
return self.selected_head
|
||||
33
exploration_policies/categorical.py
Normal file
33
exploration_policies/categorical.py
Normal file
@@ -0,0 +1,33 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from exploration_policies.exploration_policy import *
|
||||
|
||||
|
||||
class Categorical(ExplorationPolicy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
ExplorationPolicy.__init__(self, tuning_parameters)
|
||||
|
||||
def get_action(self, action_values):
|
||||
# choose actions according to the probabilities
|
||||
return np.random.choice(range(self.action_space_size), p=action_values)
|
||||
|
||||
def get_control_param(self):
|
||||
return 0
|
||||
22
exploration_policies/continuous_entropy.py
Normal file
22
exploration_policies/continuous_entropy.py
Normal file
@@ -0,0 +1,22 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import numpy as np
|
||||
from exploration_policies.exploration_policy import *
|
||||
|
||||
|
||||
class ContinuousEntropy(ExplorationPolicy):
|
||||
pass
|
||||
70
exploration_policies/e_greedy.py
Normal file
70
exploration_policies/e_greedy.py
Normal file
@@ -0,0 +1,70 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from exploration_policies.exploration_policy import *
|
||||
|
||||
|
||||
class EGreedy(ExplorationPolicy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
ExplorationPolicy.__init__(self, tuning_parameters)
|
||||
self.epsilon = tuning_parameters.exploration.initial_epsilon
|
||||
self.final_epsilon = tuning_parameters.exploration.final_epsilon
|
||||
self.epsilon_decay_delta = (
|
||||
tuning_parameters.exploration.initial_epsilon - tuning_parameters.exploration.final_epsilon) \
|
||||
/ float(tuning_parameters.exploration.epsilon_decay_steps)
|
||||
self.evaluation_epsilon = tuning_parameters.exploration.evaluation_epsilon
|
||||
|
||||
# for continuous e-greedy (see http://www.cs.ubc.ca/~van/papers/2017-TOG-deepLoco/2017-TOG-deepLoco.pdf)
|
||||
self.variance = tuning_parameters.exploration.initial_noise_variance_percentage
|
||||
self.final_variance = tuning_parameters.exploration.final_noise_variance_percentage
|
||||
self.decay_steps = tuning_parameters.exploration.noise_variance_decay_steps
|
||||
self.variance_decay_delta = (self.variance - self.final_variance) / float(self.decay_steps)
|
||||
|
||||
def decay_exploration(self):
|
||||
# decay epsilon
|
||||
if self.epsilon > self.final_epsilon:
|
||||
self.epsilon -= self.epsilon_decay_delta
|
||||
elif self.epsilon < self.final_epsilon:
|
||||
self.epsilon = self.final_epsilon
|
||||
|
||||
# decay noise variance
|
||||
if not self.discrete_controls:
|
||||
if self.variance > self.final_variance:
|
||||
self.variance -= self.variance_decay_delta
|
||||
elif self.variance < self.final_variance:
|
||||
self.variance = self.final_variance
|
||||
|
||||
def get_action(self, action_values):
|
||||
if self.phase == RunPhase.TRAIN:
|
||||
self.decay_exploration()
|
||||
epsilon = self.evaluation_epsilon if self.phase == RunPhase.TEST else self.epsilon
|
||||
|
||||
if self.discrete_controls:
|
||||
top_action = np.argmax(action_values)
|
||||
if np.random.rand() < epsilon:
|
||||
return np.random.randint(self.action_space_size)
|
||||
else:
|
||||
return top_action
|
||||
else:
|
||||
noise = np.random.randn(1, self.action_space_size) * self.variance * self.action_abs_range
|
||||
return np.squeeze(action_values + (np.random.rand() < epsilon) * noise)
|
||||
|
||||
def get_control_param(self):
|
||||
return self.epsilon
|
||||
58
exploration_policies/exploration_policy.py
Normal file
58
exploration_policies/exploration_policy.py
Normal file
@@ -0,0 +1,58 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import numpy as np
|
||||
from utils import *
|
||||
from configurations import *
|
||||
|
||||
|
||||
class ExplorationPolicy:
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
self.phase = RunPhase.HEATUP
|
||||
self.action_space_size = tuning_parameters.env.action_space_size
|
||||
self.action_abs_range = tuning_parameters.env_instance.action_space_abs_range
|
||||
self.discrete_controls = tuning_parameters.env_instance.discrete_controls
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Used for resetting the exploration policy parameters when needed
|
||||
:return: None
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_action(self, action_values):
|
||||
"""
|
||||
Given a list of values corresponding to each action,
|
||||
choose one actions according to the exploration policy
|
||||
:param action_values: A list of action values
|
||||
:return: The chosen action
|
||||
"""
|
||||
pass
|
||||
|
||||
def change_phase(self, phase):
|
||||
"""
|
||||
Change between running phases of the algorithm
|
||||
:param phase: Either Heatup or Train
|
||||
:return: none
|
||||
"""
|
||||
self.phase = phase
|
||||
|
||||
def get_control_param(self):
|
||||
return 0
|
||||
32
exploration_policies/greedy.py
Normal file
32
exploration_policies/greedy.py
Normal file
@@ -0,0 +1,32 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from exploration_policies.exploration_policy import *
|
||||
|
||||
|
||||
class Greedy(ExplorationPolicy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
ExplorationPolicy.__init__(self, tuning_parameters)
|
||||
|
||||
def get_action(self, action_values):
|
||||
return np.argmax(action_values)
|
||||
|
||||
def get_control_param(self):
|
||||
return 0
|
||||
52
exploration_policies/ou_process.py
Normal file
52
exploration_policies/ou_process.py
Normal file
@@ -0,0 +1,52 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import numpy as np
|
||||
from exploration_policies.exploration_policy import *
|
||||
|
||||
# Based on on the description in:
|
||||
# https://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
|
||||
|
||||
# Ornstein-Uhlenbeck process
|
||||
class OUProcess(ExplorationPolicy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
ExplorationPolicy.__init__(self, tuning_parameters)
|
||||
self.action_space_size = tuning_parameters.env.action_space_size
|
||||
self.mu = float(tuning_parameters.exploration.mu) * np.ones(self.action_space_size)
|
||||
self.theta = tuning_parameters.exploration.theta
|
||||
self.sigma = float(tuning_parameters.exploration.sigma) * np.ones(self.action_space_size)
|
||||
self.state = np.zeros(self.action_space_size)
|
||||
self.dt = tuning_parameters.exploration.dt
|
||||
|
||||
def reset(self):
|
||||
self.state = np.zeros(self.action_space_size)
|
||||
|
||||
def noise(self):
|
||||
x = self.state
|
||||
dx = self.theta * (self.mu - x) * self.dt + self.sigma * np.random.randn(len(x)) * np.sqrt(self.dt)
|
||||
self.state = x + dx
|
||||
return self.state[0]
|
||||
|
||||
def get_action(self, action_values):
|
||||
noise = self.noise()
|
||||
return action_values.squeeze() + noise
|
||||
|
||||
def get_control_param(self):
|
||||
return self.state[0]
|
||||
35
exploration_policies/thompson_sampling.py
Normal file
35
exploration_policies/thompson_sampling.py
Normal file
@@ -0,0 +1,35 @@
|
||||
#
|
||||
# Copyright (c) 2017 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from exploration_policies.exploration_policy import *
|
||||
|
||||
|
||||
class ThompsonSampling(ExplorationPolicy):
|
||||
def __init__(self, tuning_parameters):
|
||||
"""
|
||||
:param tuning_parameters: A Preset class instance with all the running paramaters
|
||||
:type tuning_parameters: Preset
|
||||
"""
|
||||
ExplorationPolicy.__init__(self, tuning_parameters)
|
||||
self.action_space_size = tuning_parameters.env.action_space_size
|
||||
|
||||
def get_action(self, action_values):
|
||||
q_values, values_uncertainty = action_values
|
||||
sampled_q_values = np.random.normal(q_values, abs(values_uncertainty))
|
||||
return np.argmax(sampled_q_values)
|
||||
|
||||
def get_control_param(self):
|
||||
return 0
|
||||
Reference in New Issue
Block a user