mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 11:10:20 +01:00
remove unused parameter scale_external_reward_by_intrinsic_reward_value
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -15,6 +15,7 @@ roboschool
|
||||
*.orig
|
||||
docs/site
|
||||
coach_env
|
||||
venv
|
||||
build
|
||||
rl_coach.egg*
|
||||
rl_coach_slim.egg*
|
||||
@@ -32,4 +33,3 @@ trace_test*
|
||||
.cache/
|
||||
*.pyc
|
||||
coachenv
|
||||
|
||||
|
||||
@@ -895,10 +895,7 @@ class Agent(AgentInterface):
|
||||
transition = self.update_transition_before_adding_to_replay_buffer(transition)
|
||||
|
||||
# merge the intrinsic reward in
|
||||
if self.ap.algorithm.scale_external_reward_by_intrinsic_reward_value:
|
||||
transition.reward = transition.reward * (1 + self.last_action_info.action_intrinsic_reward)
|
||||
else:
|
||||
transition.reward = transition.reward + self.last_action_info.action_intrinsic_reward
|
||||
transition.reward = transition.reward + self.last_action_info.action_intrinsic_reward
|
||||
|
||||
# sum up the total shaped reward
|
||||
self.total_shaped_reward_in_current_episode += transition.reward
|
||||
|
||||
@@ -200,9 +200,6 @@ class AlgorithmParameters(Parameters):
|
||||
# distributed agents params
|
||||
self.share_statistics_between_workers = True
|
||||
|
||||
# intrinsic reward
|
||||
self.scale_external_reward_by_intrinsic_reward_value = False
|
||||
|
||||
# n-step returns
|
||||
self.n_step = -1 # calculate the total return (no bootstrap, by default)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user