Skip to content
This repository has been archived by the owner on Dec 11, 2022. It is now read-only.

Commit

Permalink
remove unused parameter scale_external_reward_by_intrinsic_reward_value
Browse files Browse the repository at this point in the history
  • Loading branch information
zach-nervana committed Apr 9, 2019
1 parent 881f78f commit 7d79433
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 9 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ roboschool
*.orig
docs/site
coach_env
venv
build
rl_coach.egg*
rl_coach_slim.egg*
Expand All @@ -32,4 +33,3 @@ trace_test*
.cache/
*.pyc
coachenv

7 changes: 2 additions & 5 deletions rl_coach/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -895,10 +895,7 @@ def observe(self, env_response: EnvResponse) -> bool:
transition = self.update_transition_before_adding_to_replay_buffer(transition)

# merge the intrinsic reward in
if self.ap.algorithm.scale_external_reward_by_intrinsic_reward_value:
transition.reward = transition.reward * (1 + self.last_action_info.action_intrinsic_reward)
else:
transition.reward = transition.reward + self.last_action_info.action_intrinsic_reward
transition.reward = transition.reward + self.last_action_info.action_intrinsic_reward

# sum up the total shaped reward
self.total_shaped_reward_in_current_episode += transition.reward
Expand Down Expand Up @@ -1026,7 +1023,7 @@ def emulate_observe_on_trainer(self, transition: Transition) -> bool:
self.total_reward_in_current_episode += transition.reward
self.shaped_reward.add_sample(transition.reward)
self.reward.add_sample(transition.reward)

# create and store the transition
if self.phase in [RunPhase.TRAIN, RunPhase.HEATUP]:
# for episodic memories we keep the transitions in a local buffer until the episode is ended.
Expand Down
3 changes: 0 additions & 3 deletions rl_coach/base_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,9 +200,6 @@ def __init__(self):
# distributed agents params
self.share_statistics_between_workers = True

# intrinsic reward
self.scale_external_reward_by_intrinsic_reward_value = False

# n-step returns
self.n_step = -1 # calculate the total return (no bootstrap, by default)

Expand Down

0 comments on commit 7d79433

Please sign in to comment.