1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 19:20:19 +01:00

bug fix for clipped ppo for discrete controls

This commit is contained in:
itaicaspi-intel
2018-09-18 10:37:42 +03:00
parent abaa58b559
commit 73cc6e39d0
2 changed files with 9 additions and 2 deletions

View File

@@ -194,7 +194,9 @@ class ClippedPPOAgent(ActorCriticAgent):
for input_index, input in enumerate(old_policy_distribution):
inputs['output_1_{}'.format(input_index + 1)] = input
inputs['output_1_3'] = self.ap.algorithm.clipping_decay_schedule.current_value
# update the clipping decay schedule value
inputs['output_1_{}'.format(len(old_policy_distribution)+1)] = \
self.ap.algorithm.clipping_decay_schedule.current_value
total_loss, losses, unclipped_grads, fetch_result = \
self.networks['main'].train_and_sync_networks(

View File

@@ -52,7 +52,12 @@ class ExplorationPolicy(object):
:param action_values: A list of action values
:return: The chosen action
"""
pass
if self.__class__ == ExplorationPolicy:
raise ValueError("The ExplorationPolicy class is an abstract class and should not be used directly. "
"Please set the exploration parameters to point to an inheriting class like EGreedy or "
"AdditiveNoise")
else:
raise ValueError("The get_action function should be overridden in the inheriting exploration class")
def change_phase(self, phase):
"""