1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 19:20:19 +01:00

RL in Large Discrete Action Spaces - Wolpertinger Agent (#394)

* Currently this is specific to the case of discretizing a continuous action space. Can easily be adapted to other case by feeding the kNN otherwise, and removing the usage of a discretizing output action filter
This commit is contained in:
Gal Leibovich
2019-09-08 12:53:49 +03:00
committed by GitHub
parent fc50398544
commit 138ced23ba
46 changed files with 1193 additions and 51 deletions

View File

@@ -385,7 +385,8 @@ class DiscreteActionSpace(ActionSpace):
"""
A discrete action space with action indices as actions
"""
def __init__(self, num_actions: int, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None):
def __init__(self, num_actions: int, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None,
filtered_action_space=None):
super().__init__(1, low=0, high=num_actions-1, descriptions=descriptions)
# the number of actions is mapped to high
@@ -395,6 +396,9 @@ class DiscreteActionSpace(ActionSpace):
else:
self.default_action = default_action
if filtered_action_space is not None:
self.filtered_action_space = filtered_action_space
@property
def actions(self) -> List[ActionType]:
return list(range(0, int(self.high[0]) + 1))