1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 11:10:20 +01:00

remove unused parameter scale_external_reward_by_intrinsic_reward_value

This commit is contained in:
zach dwiel
2019-04-04 11:54:18 -04:00
committed by Zach Dwiel
parent 881f78f45a
commit 7d79433c05
3 changed files with 3 additions and 9 deletions

2
.gitignore vendored
View File

@@ -15,6 +15,7 @@ roboschool
*.orig
docs/site
coach_env
venv
build
rl_coach.egg*
rl_coach_slim.egg*
@@ -32,4 +33,3 @@ trace_test*
.cache/
*.pyc
coachenv

View File

@@ -895,10 +895,7 @@ class Agent(AgentInterface):
transition = self.update_transition_before_adding_to_replay_buffer(transition)
# merge the intrinsic reward in
if self.ap.algorithm.scale_external_reward_by_intrinsic_reward_value:
transition.reward = transition.reward * (1 + self.last_action_info.action_intrinsic_reward)
else:
transition.reward = transition.reward + self.last_action_info.action_intrinsic_reward
transition.reward = transition.reward + self.last_action_info.action_intrinsic_reward
# sum up the total shaped reward
self.total_shaped_reward_in_current_episode += transition.reward
@@ -1026,7 +1023,7 @@ class Agent(AgentInterface):
self.total_reward_in_current_episode += transition.reward
self.shaped_reward.add_sample(transition.reward)
self.reward.add_sample(transition.reward)
# create and store the transition
if self.phase in [RunPhase.TRAIN, RunPhase.HEATUP]:
# for episodic memories we keep the transitions in a local buffer until the episode is ended.

View File

@@ -200,9 +200,6 @@ class AlgorithmParameters(Parameters):
# distributed agents params
self.share_statistics_between_workers = True
# intrinsic reward
self.scale_external_reward_by_intrinsic_reward_value = False
# n-step returns
self.n_step = -1 # calculate the total return (no bootstrap, by default)