1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 11:10:20 +01:00

bug fix - preventing crashes when the probability of one of the actions is 0 in the policy head

This commit is contained in:
Itai Caspi
2017-10-31 10:49:50 +02:00
parent 1918f16079
commit 913ab75e8a
4 changed files with 5 additions and 3 deletions

View File

@@ -121,7 +121,7 @@ class ActorCriticAgent(PolicyOptimizationAgent):
else: else:
action = np.argmax(action_probabilities) action = np.argmax(action_probabilities)
action_info = {"action_probability": action_probabilities[action], "state_value": state_value} action_info = {"action_probability": action_probabilities[action], "state_value": state_value}
self.entropy.add_sample(-np.sum(action_probabilities * np.log(action_probabilities))) self.entropy.add_sample(-np.sum(action_probabilities * np.log(action_probabilities + eps)))
else: else:
# CONTINUOUS # CONTINUOUS
state_value, action_values_mean, action_values_std = self.main_network.online_network.predict(observation) state_value, action_values_mean, action_values_std = self.main_network.online_network.predict(observation)

View File

@@ -73,7 +73,7 @@ class PolicyGradientsAgent(PolicyOptimizationAgent):
else: else:
action = np.argmax(action_values) action = np.argmax(action_values)
action_value = {"action_probability": action_values[action]} action_value = {"action_probability": action_values[action]}
self.entropy.add_sample(-np.sum(action_values * np.log(action_values))) self.entropy.add_sample(-np.sum(action_values * np.log(action_values + eps)))
else: else:
# CONTINUOUS # CONTINUOUS
result = self.main_network.online_network.predict(observation) result = self.main_network.online_network.predict(observation)

View File

@@ -177,7 +177,8 @@ class PolicyHead(Head):
self.policy_mean = tf.nn.softmax(policy_values, name="policy") self.policy_mean = tf.nn.softmax(policy_values, name="policy")
# define the distributions for the policy and the old policy # define the distributions for the policy and the old policy
self.policy_distribution = tf.contrib.distributions.Categorical(probs=self.policy_mean) # (the + eps is to prevent probability 0 which will cause the log later on to be -inf)
self.policy_distribution = tf.contrib.distributions.Categorical(probs=(self.policy_mean + eps))
self.output = self.policy_mean self.output = self.policy_mean
else: else:
# mean # mean

View File

@@ -23,6 +23,7 @@ from subprocess import call, Popen
killed_processes = [] killed_processes = []
eps = np.finfo(np.float32).eps
class Enum(object): class Enum(object):
def __init__(self): def __init__(self):