1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 11:10:20 +01:00

Cleanup imports.

Till now, most of the modules were importing all of the module objects
(variables, classes, functions, other imports) into module namespace,
which potentially could (and was) cause of unintentional use of class or
methods, which was indirect imported.

With this patch, all the star imports were substituted with top-level
module, which provides desired class or function.

Besides, all imports where sorted (where possible) in a way pep8[1]
suggests - first are imports from standard library, than goes third
party imports (like numpy, tensorflow etc) and finally coach modules.
All of those sections are separated by one empty line.

[1] https://www.python.org/dev/peps/pep-0008/#imports
This commit is contained in:
Roman Dobosz
2018-04-12 19:46:32 +02:00
parent cafa152382
commit 1b095aeeca
75 changed files with 1169 additions and 1139 deletions

View File

@@ -13,23 +13,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
import presets
import numpy as np
import pandas as pd
from os import path
import os
import argparse
import glob
import os
import shutil
import signal
import subprocess
import sys
import time
from logger import screen
from utils import list_all_classes_in_module, threaded_cmd_line_run, killed_processes
import subprocess
import signal
import argparse
import numpy as np
import pandas as pd
import logger
import presets
import utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
@@ -61,7 +59,7 @@ if __name__ == '__main__':
if args.preset is not None:
presets_lists = [args.preset]
else:
presets_lists = list_all_classes_in_module(presets)
presets_lists = utils.list_all_classes_in_module(presets)
win_size = 10
fail_count = 0
test_count = 0
@@ -70,7 +68,7 @@ if __name__ == '__main__':
# create a clean experiment directory
test_name = '__test'
test_path = os.path.join('./experiments', test_name)
if path.exists(test_path):
if os.path.exists(test_path):
shutil.rmtree(test_path)
if args.ignore_presets is not None:
presets_to_ignore = args.ignore_presets.split(',')
@@ -100,7 +98,7 @@ if __name__ == '__main__':
test_count += 1
# run the experiment in a separate thread
screen.log_title("Running test {} - {}".format(preset_name, framework))
logger.screen.log_title("Running test {} - {}".format(preset_name, framework))
log_file_name = 'test_log_{preset_name}_{framework}.txt'.format(
preset_name=preset_name,
framework=framework,
@@ -139,7 +137,7 @@ if __name__ == '__main__':
tries_counter = 0
while not csv_paths:
csv_paths = glob.glob(path.join(test_path, '*', filename_pattern))
csv_paths = glob.glob(os.path.join(test_path, '*', filename_pattern))
if tries_counter > read_csv_tries:
break
tries_counter += 1
@@ -195,26 +193,26 @@ if __name__ == '__main__':
# kill test and print result
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
if test_passed:
screen.success("Passed successfully")
logger.screen.success("Passed successfully")
else:
if csv_paths:
screen.error("Failed due to insufficient reward", crash=False)
screen.error("preset.test_max_step_threshold: {}".format(preset.test_max_step_threshold), crash=False)
screen.error("preset.test_min_return_threshold: {}".format(preset.test_min_return_threshold), crash=False)
screen.error("averaged_rewards: {}".format(averaged_rewards), crash=False)
screen.error("episode number: {}".format(csv['Episode #'].values[-1]), crash=False)
logger.screen.error("Failed due to insufficient reward", crash=False)
logger.screen.error("preset.test_max_step_threshold: {}".format(preset.test_max_step_threshold), crash=False)
logger.screen.error("preset.test_min_return_threshold: {}".format(preset.test_min_return_threshold), crash=False)
logger.screen.error("averaged_rewards: {}".format(averaged_rewards), crash=False)
logger.screen.error("episode number: {}".format(csv['Episode #'].values[-1]), crash=False)
else:
screen.error("csv file never found", crash=False)
logger.screen.error("csv file never found", crash=False)
if args.verbose:
screen.error("command exitcode: {}".format(p.returncode), crash=False)
screen.error(open(log_file_name).read(), crash=False)
logger.screen.error("command exitcode: {}".format(p.returncode), crash=False)
logger.screen.error(open(log_file_name).read(), crash=False)
fail_count += 1
shutil.rmtree(test_path)
screen.separator()
logger.screen.separator()
if fail_count == 0:
screen.success(" Summary: " + str(test_count) + "/" + str(test_count) + " tests passed successfully")
logger.screen.success(" Summary: " + str(test_count) + "/" + str(test_count) + " tests passed successfully")
else:
screen.error(" Summary: " + str(test_count - fail_count) + "/" + str(test_count) + " tests passed successfully")
logger.screen.error(" Summary: " + str(test_count - fail_count) + "/" + str(test_count) + " tests passed successfully")