mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 11:10:20 +01:00
bug fix - preventing crashes when the probability of one of the actions is 0 in the policy head
This commit is contained in:
@@ -121,7 +121,7 @@ class ActorCriticAgent(PolicyOptimizationAgent):
|
||||
else:
|
||||
action = np.argmax(action_probabilities)
|
||||
action_info = {"action_probability": action_probabilities[action], "state_value": state_value}
|
||||
self.entropy.add_sample(-np.sum(action_probabilities * np.log(action_probabilities)))
|
||||
self.entropy.add_sample(-np.sum(action_probabilities * np.log(action_probabilities + eps)))
|
||||
else:
|
||||
# CONTINUOUS
|
||||
state_value, action_values_mean, action_values_std = self.main_network.online_network.predict(observation)
|
||||
|
||||
@@ -73,7 +73,7 @@ class PolicyGradientsAgent(PolicyOptimizationAgent):
|
||||
else:
|
||||
action = np.argmax(action_values)
|
||||
action_value = {"action_probability": action_values[action]}
|
||||
self.entropy.add_sample(-np.sum(action_values * np.log(action_values)))
|
||||
self.entropy.add_sample(-np.sum(action_values * np.log(action_values + eps)))
|
||||
else:
|
||||
# CONTINUOUS
|
||||
result = self.main_network.online_network.predict(observation)
|
||||
|
||||
@@ -177,7 +177,8 @@ class PolicyHead(Head):
|
||||
self.policy_mean = tf.nn.softmax(policy_values, name="policy")
|
||||
|
||||
# define the distributions for the policy and the old policy
|
||||
self.policy_distribution = tf.contrib.distributions.Categorical(probs=self.policy_mean)
|
||||
# (the + eps is to prevent probability 0 which will cause the log later on to be -inf)
|
||||
self.policy_distribution = tf.contrib.distributions.Categorical(probs=(self.policy_mean + eps))
|
||||
self.output = self.policy_mean
|
||||
else:
|
||||
# mean
|
||||
|
||||
Reference in New Issue
Block a user