1
0
mirror of https://github.com/gryf/coach.git synced 2026-02-24 11:15:45 +01:00

Enabling-more-agents-for-Batch-RL-and-cleanup (#258)

allowing for the last training batch drawn to be smaller than batch_size + adding support for more agents in BatchRL by adding softmax with temperature to the corresponding heads + adding a CartPole_QR_DQN preset with a golden test + cleanups
This commit is contained in:
Gal Leibovich
2019-03-21 16:10:29 +02:00
committed by GitHub
parent abec59f367
commit 6e08c55ad5
24 changed files with 152 additions and 69 deletions

View File

@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from collections import namedtuple
import numpy as np
@@ -55,19 +56,18 @@ class OpeManager(object):
all_reward_model_rewards, all_policy_probs, all_old_policy_probs = [], [], []
all_v_values_reward_model_based, all_v_values_q_model_based, all_rewards, all_actions = [], [], [], []
for i in range(int(len(dataset_as_transitions) / batch_size) + 1):
for i in range(math.ceil(len(dataset_as_transitions) / batch_size)):
batch = dataset_as_transitions[i * batch_size: (i + 1) * batch_size]
batch_for_inference = Batch(batch)
all_reward_model_rewards.append(reward_model.predict(
batch_for_inference.states(network_keys)))
# TODO can we get rid of the 'output_heads[0]', and have some way of a cleaner API?
# we always use the first Q head to calculate OPEs. might want to change this in the future.
# for instance, this means that for bootstrapped we always use the first QHead to calculate the OPEs.
q_values, sm_values = q_network.predict(batch_for_inference.states(network_keys),
outputs=[q_network.output_heads[0].output,
outputs=[q_network.output_heads[0].q_values,
q_network.output_heads[0].softmax])
# TODO why is this needed?
q_values = q_values[0]
all_policy_probs.append(sm_values)
all_v_values_reward_model_based.append(np.sum(all_policy_probs[-1] * all_reward_model_rewards[-1], axis=1))