1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-18 11:40:18 +01:00

Added missing imports, correct usages

This commit is contained in:
Roman Dobosz
2018-04-24 13:33:10 +02:00
parent 42a9ec132d
commit 5c53f9be02
4 changed files with 13 additions and 10 deletions

View File

@@ -14,6 +14,7 @@
# limitations under the License.
#
import collections
import copy
import random
import time
@@ -30,8 +31,8 @@ import scipy
from architectures.tensorflow_components import shared_variables as sv
import configurations
import exploration_policies as ep
import memories
import exploration_policies as ep # noqa, used in eval()
import memories # noqa, used in eval()
from memories import memory
import renderer
import utils

View File

@@ -31,14 +31,14 @@ class HumanAgent(agent.Agent):
self.clock = pygame.time.Clock()
self.max_fps = int(self.tp.visualization.max_fps_for_human_control)
utils.screen.log_title("Human Control Mode")
logger.screen.log_title("Human Control Mode")
available_keys = self.env.get_available_keys()
if available_keys:
utils.screen.log("Use keyboard keys to move. Press escape to quit. Available keys:")
utils.screen.log("")
logger.screen.log("Use keyboard keys to move. Press escape to quit. Available keys:")
logger.screen.log("")
for action, key in self.env.get_available_keys():
utils.screen.log("\t- {}: {}".format(action, key))
utils.screen.separator()
logger.screen.log("\t- {}: {}".format(action, key))
logger.screen.separator()
def train(self):
return 0
@@ -58,12 +58,12 @@ class HumanAgent(agent.Agent):
replay_buffer_path = os.path.join(logger.logger.experiments_path, 'replay_buffer.p')
self.memory.tp = None
pickle.to_pickle(self.memory, replay_buffer_path)
utils.screen.log_title("Replay buffer was stored in {}".format(replay_buffer_path))
logger.screen.log_title("Replay buffer was stored in {}".format(replay_buffer_path))
exit()
def log_to_screen(self, phase):
# log to utils.screen
utils.screen.log_dict(
# log to logger.screen
logger.screen.log_dict(
collections.OrderedDict([
("Episode", self.current_episode),
("total reward", self.total_reward_in_current_episode),

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python
#
# Copyright (c) 2017 Intel Corporation
#

View File

@@ -15,6 +15,7 @@
#
import argparse
import os
import sys
import time
import tensorflow as tf