1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 19:20:19 +01:00

Multiple improvements and bug fixes (#66)

* Multiple improvements and bug fixes:

    * Using lazy stacking to save on memory when using a replay buffer
    * Remove step counting for evaluation episodes
    * Reset game between heatup and training
    * Major bug fixes in NEC (is reproducing the paper results for pong now)
    * Image input rescaling to 0-1 is now optional
    * Change the terminal title to be the experiment name
    * Observation cropping for atari is now optional
    * Added random number of noop actions for gym to match the dqn paper
    * Fixed a bug where the evaluation episodes won't start with the max possible ale lives
    * Added a script for plotting the results of an experiment over all the atari games
This commit is contained in:
Itai Caspi
2018-02-26 12:29:07 +02:00
committed by GitHub
parent 4fe9cba445
commit a7206ed702
20 changed files with 465 additions and 158 deletions

View File

@@ -83,6 +83,11 @@ class AnnoyDictionary(object):
# Returns the stored embeddings and values of the closest embeddings
def query(self, keys, k):
if not self.has_enough_entries(k):
# this will only happen when the DND is not yet populated with enough entries, which is only during heatup
# these values won't be used and therefore they are meaningless
return [0.0], [0.0], [0]
_, indices = self._get_k_nearest_neighbors_indices(keys, k)
embeddings = []
@@ -94,7 +99,7 @@ class AnnoyDictionary(object):
self.current_timestamp += 1
return embeddings, values
return embeddings, values, indices
def has_enough_entries(self, k):
return self.curr_size > k and (self.built_capacity > k)
@@ -133,9 +138,11 @@ class AnnoyDictionary(object):
class QDND:
def __init__(self, dict_size, key_width, num_actions, new_value_shift_coefficient=0.1, key_error_threshold=0.01):
def __init__(self, dict_size, key_width, num_actions, new_value_shift_coefficient=0.1, key_error_threshold=0.01,
learning_rate=0.01):
self.num_actions = num_actions
self.dicts = []
self.learning_rate = learning_rate
# create a dict for each action
for a in range(num_actions):
@@ -155,16 +162,18 @@ class QDND:
self.dicts[a].add(curr_action_embeddings, curr_action_values)
return True
def query(self, embeddings, actions, k):
def query(self, embeddings, action, k):
# query for nearest neighbors to the given embeddings
dnd_embeddings = []
dnd_values = []
for i, action in enumerate(actions):
embedding, value = self.dicts[action].query([embeddings[i]], k)
dnd_indices = []
for i in range(len(embeddings)):
embedding, value, indices = self.dicts[action].query([embeddings[i]], k)
dnd_embeddings.append(embedding[0])
dnd_values.append(value[0])
dnd_indices.append(indices[0])
return dnd_embeddings, dnd_values
return dnd_embeddings, dnd_values, dnd_indices
def has_enough_entries(self, k):
# check if each of the action dictionaries has at least k entries
@@ -193,4 +202,5 @@ def load_dnd(model_dir):
DND.dicts[a].index.add_item(idx, key)
DND.dicts[a].index.build(50)
return DND