mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 19:20:19 +01:00
bug fix for clipped ppo for discrete controls
This commit is contained in:
@@ -194,7 +194,9 @@ class ClippedPPOAgent(ActorCriticAgent):
|
|||||||
for input_index, input in enumerate(old_policy_distribution):
|
for input_index, input in enumerate(old_policy_distribution):
|
||||||
inputs['output_1_{}'.format(input_index + 1)] = input
|
inputs['output_1_{}'.format(input_index + 1)] = input
|
||||||
|
|
||||||
inputs['output_1_3'] = self.ap.algorithm.clipping_decay_schedule.current_value
|
# update the clipping decay schedule value
|
||||||
|
inputs['output_1_{}'.format(len(old_policy_distribution)+1)] = \
|
||||||
|
self.ap.algorithm.clipping_decay_schedule.current_value
|
||||||
|
|
||||||
total_loss, losses, unclipped_grads, fetch_result = \
|
total_loss, losses, unclipped_grads, fetch_result = \
|
||||||
self.networks['main'].train_and_sync_networks(
|
self.networks['main'].train_and_sync_networks(
|
||||||
|
|||||||
@@ -52,7 +52,12 @@ class ExplorationPolicy(object):
|
|||||||
:param action_values: A list of action values
|
:param action_values: A list of action values
|
||||||
:return: The chosen action
|
:return: The chosen action
|
||||||
"""
|
"""
|
||||||
pass
|
if self.__class__ == ExplorationPolicy:
|
||||||
|
raise ValueError("The ExplorationPolicy class is an abstract class and should not be used directly. "
|
||||||
|
"Please set the exploration parameters to point to an inheriting class like EGreedy or "
|
||||||
|
"AdditiveNoise")
|
||||||
|
else:
|
||||||
|
raise ValueError("The get_action function should be overridden in the inheriting exploration class")
|
||||||
|
|
||||||
def change_phase(self, phase):
|
def change_phase(self, phase):
|
||||||
"""
|
"""
|
||||||
|
|||||||
Reference in New Issue
Block a user