1
0
mirror of https://github.com/gryf/coach.git synced 2025-12-17 03:00:20 +01:00

updated gifs in README + fix for multiworker crashes + improved Atari DQN and Dueling DDQN presets

This commit is contained in:
Itai Caspi
2018-08-16 18:23:32 +03:00
parent 8f99409387
commit 1de04d6fee
11 changed files with 19 additions and 5 deletions

View File

@@ -17,7 +17,12 @@ Training an agent to solve an environment is as easy as running:
coach -p CartPole_DQN -r
```
<img src="img/doom_deathmatch.gif" alt="Doom Deathmatch" width="267" height="200"/> <img src="img/carla.gif" alt="CARLA" width="284" height="200"/> <img src="img/montezuma.gif" alt="MontezumaRevenge" width="152" height="200"/>
<img src="img/fetch_slide.gif" alt="Fetch Slide"/> <img src="img/pendulum.gif" alt="Pendulum"/> <img src="img/starcraft.gif" alt="Starcraft"/>
<br>
<img src="img/doom_deathmatch.gif" alt="Doom Deathmatch"/> <img src="img/carla.gif" alt="CARLA"/> <img src="img/montezuma.gif" alt="MontezumaRevenge"/>
<br>
<img src="img/doom_health.gif" alt="Doom Health Gathering"/><img src="img/minitaur.gif" alt="PyBullet Minitaur"/> <img src="img/ant.gif" alt="Gym Extensions Ant"/>
<br><br>
Blog posts from the Intel® AI website:
* [Release 0.8.0](https://ai.intel.com/reinforcement-learning-coach-intel/) (initial release)

View File

@@ -25,10 +25,11 @@ The environments that were used for testing include:
![#FF4040](https://placehold.it/15/FF4040/000000?text=+) *Not training*
| |**Status** |**Environments**|**Comments**|
| ----------------------- |:--------------------------------------------------------:|:--------------:|:--------:|
|**[DQN](dqn)** | ![#ceffad](https://placehold.it/15/ceffad/000000?text=+) |Atari | Pong is not training |
|**[Dueling DDQN](dueling_ddqn)**| ![#ceffad](https://placehold.it/15/ceffad/000000?text=+) |Atari | Pong is not training |
|**[DQN](dqn)** | ![#2E8B57](https://placehold.it/15/2E8B57/000000?text=+) |Atari | |
|**[Dueling DDQN](dueling_ddqn)**| ![#2E8B57](https://placehold.it/15/2E8B57/000000?text=+) |Atari | |
|**[Dueling DDQN with PER](dueling_ddqn_with_per)**| ![#2E8B57](https://placehold.it/15/2E8B57/000000?text=+) |Atari | |
|**[Bootstrapped DQN](bootstrapped_dqn)**| ![#2E8B57](https://placehold.it/15/2E8B57/000000?text=+) |Atari | |
|**[QR-DQN](qr_dqn)** | ![#2E8B57](https://placehold.it/15/2E8B57/000000?text=+) |Atari | |

BIN
img/ant.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 MiB

BIN
img/doom_health.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 MiB

BIN
img/fetch_slide.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 712 KiB

BIN
img/minitaur.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 846 KiB

BIN
img/pendulum.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 754 KiB

BIN
img/starcraft.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 MiB

View File

@@ -142,6 +142,9 @@ class GraphManager(object):
config = tf.ConfigProto()
config.allow_soft_placement = True # allow placing ops on cpu if they are not fit for gpu
config.gpu_options.allow_growth = True # allow the gpu memory allocated for the worker to grow if needed
config.gpu_options.per_process_gpu_memory_fraction = 0.2
config.intra_op_parallelism_threads = 1
config.inter_op_parallelism_threads = 1
from rl_coach.architectures.tensorflow_components.distributed_tf_utils import create_and_start_parameters_server, \
create_cluster_spec, create_worker_server_and_device
@@ -169,6 +172,8 @@ class GraphManager(object):
config.allow_soft_placement = True # allow placing ops on cpu if they are not fit for gpu
config.gpu_options.allow_growth = True # allow the gpu memory allocated for the worker to grow if needed
# config.gpu_options.per_process_gpu_memory_fraction = 0.2
config.intra_op_parallelism_threads = 1
config.inter_op_parallelism_threads = 1
if isinstance(task_parameters, DistributedTaskParameters):
# the distributed tensorflow setting

View File

@@ -20,7 +20,8 @@ schedule_params.heatup_steps = EnvironmentSteps(50000)
# Agent #
#########
agent_params = DQNAgentParameters()
agent_params.network_wrappers['main'].learning_rate = 0.00025
# since we are using Adam instead of RMSProp, we adjust the learning rate as well
agent_params.network_wrappers['main'].learning_rate = 0.0001
###############
# Environment #

View File

@@ -23,7 +23,9 @@ schedule_params.heatup_steps = EnvironmentSteps(50000)
# Agent #
#########
agent_params = DDQNAgentParameters()
agent_params.network_wrappers['main'].learning_rate = 0.00025
# since we are using Adam instead of RMSProp, we adjust the learning rate as well
agent_params.network_wrappers['main'].learning_rate = 0.0001
agent_params.network_wrappers['main'].middleware_parameters.scheme = MiddlewareScheme.Empty
agent_params.network_wrappers['main'].heads_parameters = [DuelingQHeadParameters()]
agent_params.network_wrappers['main'].rescale_gradient_from_head_by_factor = [1/math.sqrt(2)]