mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 11:10:20 +01:00
remove unused parameter scale_external_reward_by_intrinsic_reward_value
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -15,6 +15,7 @@ roboschool
|
|||||||
*.orig
|
*.orig
|
||||||
docs/site
|
docs/site
|
||||||
coach_env
|
coach_env
|
||||||
|
venv
|
||||||
build
|
build
|
||||||
rl_coach.egg*
|
rl_coach.egg*
|
||||||
rl_coach_slim.egg*
|
rl_coach_slim.egg*
|
||||||
@@ -32,4 +33,3 @@ trace_test*
|
|||||||
.cache/
|
.cache/
|
||||||
*.pyc
|
*.pyc
|
||||||
coachenv
|
coachenv
|
||||||
|
|
||||||
|
|||||||
@@ -895,9 +895,6 @@ class Agent(AgentInterface):
|
|||||||
transition = self.update_transition_before_adding_to_replay_buffer(transition)
|
transition = self.update_transition_before_adding_to_replay_buffer(transition)
|
||||||
|
|
||||||
# merge the intrinsic reward in
|
# merge the intrinsic reward in
|
||||||
if self.ap.algorithm.scale_external_reward_by_intrinsic_reward_value:
|
|
||||||
transition.reward = transition.reward * (1 + self.last_action_info.action_intrinsic_reward)
|
|
||||||
else:
|
|
||||||
transition.reward = transition.reward + self.last_action_info.action_intrinsic_reward
|
transition.reward = transition.reward + self.last_action_info.action_intrinsic_reward
|
||||||
|
|
||||||
# sum up the total shaped reward
|
# sum up the total shaped reward
|
||||||
|
|||||||
@@ -200,9 +200,6 @@ class AlgorithmParameters(Parameters):
|
|||||||
# distributed agents params
|
# distributed agents params
|
||||||
self.share_statistics_between_workers = True
|
self.share_statistics_between_workers = True
|
||||||
|
|
||||||
# intrinsic reward
|
|
||||||
self.scale_external_reward_by_intrinsic_reward_value = False
|
|
||||||
|
|
||||||
# n-step returns
|
# n-step returns
|
||||||
self.n_step = -1 # calculate the total return (no bootstrap, by default)
|
self.n_step = -1 # calculate the total return (no bootstrap, by default)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user