1
0
mirror of https://github.com/gryf/coach.git synced 2026-03-06 01:05:47 +01:00

Integrate coach.py params with distributed Coach. (#42)

* Integrate coach.py params with distributed Coach.
* Minor improvements
- Use enums instead of constants.
- Reduce code duplication.
- Ask experiment name with timeout.
This commit is contained in:
Balaji Subramaniam
2018-11-05 09:33:30 -08:00
committed by GitHub
parent 95b4fc6888
commit 7e7006305a
13 changed files with 263 additions and 285 deletions

View File

@@ -2,6 +2,7 @@ import os
import uuid
import json
import time
from enum import Enum
from typing import List
from configparser import ConfigParser, Error
from rl_coach.orchestrators.deploy import Deploy, DeployParameters
@@ -12,10 +13,19 @@ from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.data_store_impl import get_data_store
class RunType(Enum):
ORCHESTRATOR = "orchestrator"
TRAINER = "trainer"
ROLLOUT_WORKER = "rollout-worker"
def __str__(self):
return self.value
class RunTypeParameters():
def __init__(self, image: str, command: list(), arguments: list() = None,
run_type: str = "trainer", checkpoint_dir: str = "/checkpoint",
run_type: str = str(RunType.TRAINER), checkpoint_dir: str = "/checkpoint",
num_replicas: int = 1, orchestration_params: dict=None):
self.image = image
self.command = command
@@ -97,12 +107,12 @@ class Kubernetes(Deploy):
def deploy_trainer(self) -> bool:
trainer_params = self.params.run_type_params.get('trainer', None)
trainer_params = self.params.run_type_params.get(str(RunType.TRAINER), None)
if not trainer_params:
return False
trainer_params.command += ['--memory-backend-params', json.dumps(self.params.memory_backend_parameters.__dict__)]
trainer_params.command += ['--data-store-params', json.dumps(self.params.data_store_params.__dict__)]
trainer_params.command += ['--memory_backend_params', json.dumps(self.params.memory_backend_parameters.__dict__)]
trainer_params.command += ['--data_store_params', json.dumps(self.params.data_store_params.__dict__)]
name = "{}-{}".format(trainer_params.run_type, uuid.uuid4())
@@ -175,13 +185,13 @@ class Kubernetes(Deploy):
def deploy_worker(self):
worker_params = self.params.run_type_params.get('worker', None)
worker_params = self.params.run_type_params.get(str(RunType.ROLLOUT_WORKER), None)
if not worker_params:
return False
worker_params.command += ['--memory-backend-params', json.dumps(self.params.memory_backend_parameters.__dict__)]
worker_params.command += ['--data-store-params', json.dumps(self.params.data_store_params.__dict__)]
worker_params.command += ['--num-workers', '{}'.format(worker_params.num_replicas)]
worker_params.command += ['--memory_backend_params', json.dumps(self.params.memory_backend_parameters.__dict__)]
worker_params.command += ['--data_store_params', json.dumps(self.params.data_store_params.__dict__)]
worker_params.command += ['--num_workers', '{}'.format(worker_params.num_replicas)]
name = "{}-{}".format(worker_params.run_type, uuid.uuid4())
@@ -255,7 +265,7 @@ class Kubernetes(Deploy):
pass
def trainer_logs(self):
trainer_params = self.params.run_type_params.get('trainer', None)
trainer_params = self.params.run_type_params.get(str(RunType.TRAINER), None)
if not trainer_params:
return
@@ -313,7 +323,7 @@ class Kubernetes(Deploy):
return
def undeploy(self):
trainer_params = self.params.run_type_params.get('trainer', None)
trainer_params = self.params.run_type_params.get(str(RunType.TRAINER), None)
api_client = k8sclient.AppsV1Api()
delete_options = k8sclient.V1DeleteOptions()
if trainer_params:
@@ -321,7 +331,7 @@ class Kubernetes(Deploy):
api_client.delete_namespaced_deployment(trainer_params.orchestration_params['deployment_name'], self.params.namespace, delete_options)
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while deleting trainer", e)
worker_params = self.params.run_type_params.get('worker', None)
worker_params = self.params.run_type_params.get(str(RunType.ROLLOUT_WORKER), None)
if worker_params:
try:
api_client.delete_namespaced_deployment(worker_params.orchestration_params['deployment_name'], self.params.namespace, delete_options)

View File

@@ -1,120 +0,0 @@
import argparse
from rl_coach.orchestrators.kubernetes_orchestrator import KubernetesParameters, Kubernetes, RunTypeParameters
from rl_coach.memories.backend.redis import RedisPubSubMemoryBackendParameters
from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.s3_data_store import S3DataStoreParameters
from rl_coach.data_stores.nfs_data_store import NFSDataStoreParameters
def main(preset: str, image: str='ajaysudh/testing:coach', num_workers: int=1, nfs_server: str=None, nfs_path: str=None,
memory_backend: str=None, data_store: str=None, s3_end_point: str=None, s3_bucket_name: str=None,
s3_creds_file: str=None, policy_type: str="OFF"):
rollout_command = ['python3', 'rl_coach/rollout_worker.py', '-p', preset, '--policy-type', policy_type]
training_command = ['python3', 'rl_coach/training_worker.py', '-p', preset, '--policy-type', policy_type]
memory_backend_params = None
if memory_backend == "redispubsub":
memory_backend_params = RedisPubSubMemoryBackendParameters()
ds_params_instance = None
if data_store == "s3":
ds_params = DataStoreParameters("s3", "", "")
ds_params_instance = S3DataStoreParameters(ds_params=ds_params, end_point=s3_end_point, bucket_name=s3_bucket_name,
creds_file=s3_creds_file, checkpoint_dir="/checkpoint")
elif data_store == "nfs":
ds_params = DataStoreParameters("nfs", "kubernetes", {"namespace": "default"})
ds_params_instance = NFSDataStoreParameters(ds_params)
worker_run_type_params = RunTypeParameters(image, rollout_command, run_type="worker", num_replicas=num_workers)
trainer_run_type_params = RunTypeParameters(image, training_command, run_type="trainer")
orchestration_params = KubernetesParameters([worker_run_type_params, trainer_run_type_params],
kubeconfig='~/.kube/config', nfs_server=nfs_server, nfs_path=nfs_path,
memory_backend_parameters=memory_backend_params,
data_store_params=ds_params_instance)
orchestrator = Kubernetes(orchestration_params)
if not orchestrator.setup():
print("Could not setup")
return
if orchestrator.deploy_trainer():
print("Successfully deployed")
else:
print("Could not deploy")
return
if orchestrator.deploy_worker():
print("Successfully deployed")
else:
print("Could not deploy")
return
try:
orchestrator.trainer_logs()
except KeyboardInterrupt:
pass
orchestrator.undeploy()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image',
help="(string) Name of a docker image.",
type=str,
required=True)
parser.add_argument('-p', '--preset',
help="(string) Name of a preset to run (class name from the 'presets' directory).",
type=str,
required=True)
parser.add_argument('--memory-backend',
help="(string) Memory backend to use.",
type=str,
choices=['redispubsub'],
default="redispubsub")
parser.add_argument('--data-store',
help="(string) Data store to use.",
type=str,
choices=['s3', 'nfs'],
default="s3")
parser.add_argument('--nfs-server',
help="(string) Addresss of the nfs server.",
type=str,
required=False)
parser.add_argument('--nfs-path',
help="(string) Exported path for the nfs server.",
type=str,
required=False)
parser.add_argument('--s3-end-point',
help="(string) S3 endpoint to use when S3 data store is used.",
type=str,
required=False)
parser.add_argument('--s3-bucket-name',
help="(string) S3 bucket name to use when S3 data store is used.",
type=str,
required=False)
parser.add_argument('--s3-creds-file',
help="(string) S3 credentials file to use when S3 data store is used.",
type=str,
required=False)
parser.add_argument('--num-workers',
help="(string) Number of rollout workers.",
type=int,
required=False,
default=1)
parser.add_argument('--policy-type',
help="(string) The type of policy: OFF/ON.",
type=str,
choices=['ON', 'OFF'],
default='OFF')
# parser.add_argument('--checkpoint_dir',
# help='(string) Path to a folder containing a checkpoint to write the model to.',
# type=str,
# default='/checkpoint')
args = parser.parse_args()
main(preset=args.preset, image=args.image, nfs_server=args.nfs_server, nfs_path=args.nfs_path,
memory_backend=args.memory_backend, data_store=args.data_store, s3_end_point=args.s3_end_point,
s3_bucket_name=args.s3_bucket_name, s3_creds_file=args.s3_creds_file, num_workers=args.num_workers,
policy_type=args.policy_type)

View File

@@ -1,18 +0,0 @@
from rl_coach.orchestrators.kubernetes_orchestrator import KubernetesParameters, Kubernetes
# image = 'gcr.io/constant-cubist-173123/coach:latest'
image = 'ajaysudh/testing:coach'
command = ['python3', 'rl_coach/rollout_worker.py', '-p', 'CartPole_DQN_distributed']
# command = ['sleep', '10h']
params = KubernetesParameters(image, command, kubeconfig='~/.kube/config', redis_ip='redis-service.ajay.svc', redis_port=6379, num_workers=1)
# params = KubernetesParameters(image, command, kubeconfig='~/.kube/config')
obj = Kubernetes(params)
if not obj.setup():
print("Could not setup")
if obj.deploy():
print("Successfully deployed")
else:
print("Could not deploy")