mirror of
https://github.com/gryf/coach.git
synced 2025-12-18 11:40:18 +01:00
RL in Large Discrete Action Spaces - Wolpertinger Agent (#394)
* Currently this is specific to the case of discretizing a continuous action space. Can easily be adapted to other case by feeding the kNN otherwise, and removing the usage of a discretizing output action filter
This commit is contained in:
@@ -48,7 +48,8 @@ class PartialDiscreteActionSpaceMap(ActionFilter):
|
||||
|
||||
def get_unfiltered_action_space(self, output_action_space: ActionSpace) -> DiscreteActionSpace:
|
||||
self.output_action_space = output_action_space
|
||||
self.input_action_space = DiscreteActionSpace(len(self.target_actions), self.descriptions)
|
||||
self.input_action_space = DiscreteActionSpace(len(self.target_actions), self.descriptions,
|
||||
filtered_action_space=output_action_space)
|
||||
return self.input_action_space
|
||||
|
||||
def filter(self, action: ActionType) -> ActionType:
|
||||
|
||||
Reference in New Issue
Block a user