From f2fead57e5a6f27b325d5628ed031a3e4333d3b8 Mon Sep 17 00:00:00 2001 From: zach dwiel Date: Fri, 5 Apr 2019 10:53:03 -0400 Subject: [PATCH] change method interface: AgentInterface.emulate_act_on_trainer(transition: Transition) -> emulate_act_on_trainer(action: ActionType) --- rl_coach/agents/agent.py | 6 ++++-- rl_coach/agents/agent_interface.py | 9 +++------ rl_coach/level_manager.py | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/rl_coach/agents/agent.py b/rl_coach/agents/agent.py index cc2665c..99c1080 100644 --- a/rl_coach/agents/agent.py +++ b/rl_coach/agents/agent.py @@ -813,6 +813,7 @@ class Agent(AgentInterface): curr_state = self.curr_state self.last_action_info = self.choose_action(curr_state) + # is it intentional that self.last_action_info is not filtered? filtered_action_info = self.output_filter.filter(self.last_action_info) return filtered_action_info @@ -1037,7 +1038,7 @@ class Agent(AgentInterface): # TODO-remove - this is a temporary flow, used by the trainer worker, duplicated from observe() - need to create # an external trainer flow reusing the existing flow and methods [e.g. observe(), step(), act()] - def emulate_act_on_trainer(self, transition: Transition) -> ActionInfo: + def emulate_act_on_trainer(self, action: ActionType) -> ActionInfo: """ This emulates the act using the transition obtained from the rollout worker on the training worker in case of distributed training. @@ -1053,7 +1054,8 @@ class Agent(AgentInterface): self.total_steps_counter += 1 self.current_episode_steps_counter += 1 - self.last_action_info = transition.action + # these types don't match: ActionInfo = ActionType + self.last_action_info = action return self.last_action_info diff --git a/rl_coach/agents/agent_interface.py b/rl_coach/agents/agent_interface.py index 16c32a5..199daf6 100644 --- a/rl_coach/agents/agent_interface.py +++ b/rl_coach/agents/agent_interface.py @@ -144,14 +144,11 @@ class AgentInterface(object): # TODO-remove - this is a temporary flow, used by the trainer worker, duplicated from observe() - need to create # an external trainer flow reusing the existing flow and methods [e.g. observe(), step(), act()] - def emulate_act_on_trainer(self, transition: Transition) -> ActionInfo: + def emulate_act_on_trainer(self, action: ActionType) -> ActionInfo: """ This emulates the act using the transition obtained from the rollout worker on the training worker in case of distributed training. - Get a decision of the next action to take. - The action is dependent on the current state which the agent holds from resetting the environment or from - the observe function. - :return: A tuple containing the actual action and additional info on the action + :return: A tuple containing the actual action """ raise NotImplementedError("") @@ -173,7 +170,7 @@ class AgentInterface(object): :return: None """ raise NotImplementedError("") - + def run_off_policy_evaluation(self) -> None: """ Run off-policy evaluation estimators to evaluate the trained policy performance against a dataset. diff --git a/rl_coach/level_manager.py b/rl_coach/level_manager.py index a7c2742..a58ed0b 100644 --- a/rl_coach/level_manager.py +++ b/rl_coach/level_manager.py @@ -313,7 +313,7 @@ class LevelManager(EnvironmentInterface): # for i in range(self.steps_limit.num_steps): # let the agent observe the result and decide if it wants to terminate the episode done = acting_agent.emulate_observe_on_trainer(transition) - acting_agent.emulate_act_on_trainer(transition) + acting_agent.emulate_act_on_trainer(transition.action) if done: self.handle_episode_ended()