mirror of
https://github.com/gryf/coach.git
synced 2026-02-25 11:45:46 +01:00
RL in Large Discrete Action Spaces - Wolpertinger Agent (#394)
* Currently this is specific to the case of discretizing a continuous action space. Can easily be adapted to other case by feeding the kNN otherwise, and removing the usage of a discretizing output action filter
This commit is contained in:
@@ -57,7 +57,7 @@ class AnnoyDictionary(object):
|
||||
|
||||
self.built_capacity = 0
|
||||
|
||||
def add(self, keys, values, additional_data=None):
|
||||
def add(self, keys, values, additional_data=None, force_rebuild_tree=False):
|
||||
if not additional_data:
|
||||
additional_data = [None] * len(keys)
|
||||
|
||||
@@ -96,7 +96,7 @@ class AnnoyDictionary(object):
|
||||
if len(self.buffered_indices) >= self.min_update_size:
|
||||
self.min_update_size = max(self.initial_update_size, int(self.curr_size * 0.02))
|
||||
self._rebuild_index()
|
||||
elif self.rebuild_on_every_update:
|
||||
elif force_rebuild_tree or self.rebuild_on_every_update:
|
||||
self._rebuild_index()
|
||||
|
||||
self.current_timestamp += 1
|
||||
|
||||
Reference in New Issue
Block a user