1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 19:20:19 +01:00

load and save function for non-episodic replay buffers + carla improvements + network bug fixes

This commit is contained in:
itaicaspi-intel
2018-09-06 16:46:57 +03:00
parent d59a700248
commit a9bd1047c4
8 changed files with 50 additions and 18 deletions

View File

@@ -32,6 +32,7 @@ from rl_coach.core_types import ActionInfo
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters
class HumanAlgorithmParameters(AlgorithmParameters):
@@ -57,7 +58,7 @@ class HumanAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=HumanAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=EpisodicExperienceReplayParameters(),
memory=ExperienceReplayParameters(),
networks={"main": BCNetworkParameters()})
@property
@@ -103,7 +104,7 @@ class HumanAgent(Agent):
def save_replay_buffer_and_exit(self):
replay_buffer_path = os.path.join(self.agent_logger.experiments_path, 'replay_buffer.p')
self.memory.tp = None
to_pickle(self.memory, replay_buffer_path)
self.memory.save(replay_buffer_path)
screen.log_title("Replay buffer was stored in {}".format(replay_buffer_path))
exit()