mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 19:20:19 +01:00
308 lines
11 KiB
HTML
308 lines
11 KiB
HTML
<!DOCTYPE html>
|
|
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
|
|
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
|
|
<head>
|
|
<meta charset="utf-8">
|
|
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
|
|
|
|
<link rel="shortcut icon" href="../../../img/favicon.ico">
|
|
<title>Deep Determinstic Policy Gradients - Reinforcement Learning Coach</title>
|
|
<link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>
|
|
|
|
<link rel="stylesheet" href="../../../css/theme.css" type="text/css" />
|
|
<link rel="stylesheet" href="../../../css/theme_extra.css" type="text/css" />
|
|
<link rel="stylesheet" href="../../../css/highlight.css">
|
|
<link href="../../../extra.css" rel="stylesheet">
|
|
|
|
<script>
|
|
// Current page data
|
|
var mkdocs_page_name = "Deep Determinstic Policy Gradients";
|
|
var mkdocs_page_input_path = "algorithms/policy_optimization/ddpg.md";
|
|
var mkdocs_page_url = "/algorithms/policy_optimization/ddpg/";
|
|
</script>
|
|
|
|
<script src="../../../js/jquery-2.1.1.min.js"></script>
|
|
<script src="../../../js/modernizr-2.8.3.min.js"></script>
|
|
<script type="text/javascript" src="../../../js/highlight.pack.js"></script>
|
|
|
|
</head>
|
|
|
|
<body class="wy-body-for-nav" role="document">
|
|
|
|
<div class="wy-grid-for-nav">
|
|
|
|
|
|
<nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
|
|
<div class="wy-side-nav-search">
|
|
<a href="../../.." class="icon icon-home"> Reinforcement Learning Coach</a>
|
|
<div role="search">
|
|
<form id ="rtd-search-form" class="wy-form" action="../../../search.html" method="get">
|
|
<input type="text" name="q" placeholder="Search docs" />
|
|
</form>
|
|
</div>
|
|
</div>
|
|
|
|
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
|
|
<ul class="current">
|
|
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<a class="" href="../../..">Home</a>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<a class="" href="../../../usage/">Usage</a>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<span class="caption-text">Design</span>
|
|
<ul class="subnav">
|
|
<li class="">
|
|
|
|
<a class="" href="../../../design/features/">Features</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../../design/control_flow/">Control Flow</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../../design/network/">Network</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../../design/filters/">Filters</a>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<span class="caption-text">Algorithms</span>
|
|
<ul class="subnav">
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/dqn/">DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/double_dqn/">Double DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/dueling_dqn/">Dueling DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/categorical_dqn/">Categorical DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/mmc/">Mixed Monte Carlo</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/pal/">Persistent Advantage Learning</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/nec/">Neural Episodic Control</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/bs_dqn/">Bootstrapped DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/n_step/">N-Step Q Learning</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/naf/">Normalized Advantage Functions</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../pg/">Policy Gradient</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../ac/">Actor-Critic</a>
|
|
</li>
|
|
<li class=" current">
|
|
|
|
<a class="current" href="./">Deep Determinstic Policy Gradients</a>
|
|
<ul class="subnav">
|
|
|
|
<li class="toctree-l3"><a href="#deep-deterministic-policy-gradient">Deep Deterministic Policy Gradient</a></li>
|
|
|
|
<ul>
|
|
|
|
<li><a class="toctree-l4" href="#network-structure">Network Structure</a></li>
|
|
|
|
<li><a class="toctree-l4" href="#algorithm-description">Algorithm Description</a></li>
|
|
|
|
</ul>
|
|
|
|
|
|
</ul>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../ppo/">Proximal Policy Optimization</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../cppo/">Clipped Proximal Policy Optimization</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../other/dfp/">Direct Future Prediction</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../imitation/bc/">Behavioral Cloning</a>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<a class="" href="../../../dashboard/">Coach Dashboard</a>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<span class="caption-text">Contributing</span>
|
|
<ul class="subnav">
|
|
<li class="">
|
|
|
|
<a class="" href="../../../contributing/add_agent/">Adding a New Agent</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../../contributing/add_env/">Adding a New Environment</a>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
|
|
</ul>
|
|
</div>
|
|
|
|
</nav>
|
|
|
|
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
|
|
|
|
|
|
<nav class="wy-nav-top" role="navigation" aria-label="top navigation">
|
|
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
|
<a href="../../..">Reinforcement Learning Coach</a>
|
|
</nav>
|
|
|
|
|
|
<div class="wy-nav-content">
|
|
<div class="rst-content">
|
|
<div role="navigation" aria-label="breadcrumbs navigation">
|
|
<ul class="wy-breadcrumbs">
|
|
<li><a href="../../..">Docs</a> »</li>
|
|
|
|
|
|
|
|
<li>Algorithms »</li>
|
|
|
|
|
|
|
|
<li>Deep Determinstic Policy Gradients</li>
|
|
<li class="wy-breadcrumbs-aside">
|
|
|
|
</li>
|
|
</ul>
|
|
<hr/>
|
|
</div>
|
|
<div role="main">
|
|
<div class="section">
|
|
|
|
<h1 id="deep-deterministic-policy-gradient">Deep Deterministic Policy Gradient</h1>
|
|
<p><strong>Actions space:</strong> Continuous</p>
|
|
<p><strong>References:</strong> <a href="https://arxiv.org/abs/1509.02971">Continuous control with deep reinforcement learning</a></p>
|
|
<h2 id="network-structure">Network Structure</h2>
|
|
<p style="text-align: center;">
|
|
|
|
<img src="..\..\design_imgs\ddpg.png">
|
|
|
|
</p>
|
|
|
|
<h2 id="algorithm-description">Algorithm Description</h2>
|
|
<h3 id="choosing-an-action">Choosing an action</h3>
|
|
<p>Pass the current states through the actor network, and get an action mean vector <script type="math/tex"> \mu </script>. While in training phase, use a continuous exploration policy, such as the Ornstein-Uhlenbeck process, to add exploration noise to the action. When testing, use the mean vector <script type="math/tex">\mu</script> as-is.</p>
|
|
<h3 id="training-the-network">Training the network</h3>
|
|
<p>Start by sampling a batch of transitions from the experience replay.</p>
|
|
<ul>
|
|
<li>To train the <strong>critic network</strong>, use the following targets:</li>
|
|
</ul>
|
|
<p>
|
|
<script type="math/tex; mode=display"> y_t=r(s_t,a_t )+\gamma \cdot Q(s_{t+1},\mu(s_{t+1} )) </script>
|
|
First run the actor target network, using the next states as the inputs, and get <script type="math/tex"> \mu (s_{t+1} ) </script>. Next, run the critic target network using the next states and <script type="math/tex"> \mu (s_{t+1} ) </script>, and use the output to calculate <script type="math/tex"> y_t </script> according to the equation above. To train the network, use the current states and actions as the inputs, and <script type="math/tex">y_t</script> as the targets.</p>
|
|
<ul>
|
|
<li>To train the <strong>actor network</strong>, use the following equation:</li>
|
|
</ul>
|
|
<p>
|
|
<script type="math/tex; mode=display"> \nabla_{\theta^\mu } J \approx E_{s_t \tilde{} \rho^\beta } [\nabla_a Q(s,a)|_{s=s_t,a=\mu (s_t ) } \cdot \nabla_{\theta^\mu} \mu(s)|_{s=s_t} ] </script>
|
|
Use the actor's online network to get the action mean values using the current states as the inputs. Then, use the critic online network in order to get the gradients of the critic output with respect to the action mean values <script type="math/tex"> \nabla _a Q(s,a)|_{s=s_t,a=\mu(s_t ) } </script>. Using the chain rule, calculate the gradients of the actor's output, with respect to the actor weights, given <script type="math/tex"> \nabla_a Q(s,a) </script>. Finally, apply those gradients to the actor network.</p>
|
|
<p>After every training step, do a soft update of the critic and actor target networks' weights from the online networks.</p>
|
|
|
|
</div>
|
|
</div>
|
|
<footer>
|
|
|
|
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
|
|
|
|
<a href="../ppo/" class="btn btn-neutral float-right" title="Proximal Policy Optimization">Next <span class="icon icon-circle-arrow-right"></span></a>
|
|
|
|
|
|
<a href="../ac/" class="btn btn-neutral" title="Actor-Critic"><span class="icon icon-circle-arrow-left"></span> Previous</a>
|
|
|
|
</div>
|
|
|
|
|
|
<hr/>
|
|
|
|
<div role="contentinfo">
|
|
<!-- Copyright etc -->
|
|
|
|
</div>
|
|
|
|
Built with <a href="http://www.mkdocs.org">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
|
</footer>
|
|
|
|
</div>
|
|
</div>
|
|
|
|
</section>
|
|
|
|
</div>
|
|
|
|
<div class="rst-versions" role="note" style="cursor: pointer">
|
|
<span class="rst-current-version" data-toggle="rst-current-version">
|
|
|
|
|
|
<span><a href="../ac/" style="color: #fcfcfc;">« Previous</a></span>
|
|
|
|
|
|
<span style="margin-left: 15px"><a href="../ppo/" style="color: #fcfcfc">Next »</a></span>
|
|
|
|
</span>
|
|
</div>
|
|
<script>var base_url = '../../..';</script>
|
|
<script src="../../../js/theme.js"></script>
|
|
<script src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML"></script>
|
|
<script src="../../../search/require.js"></script>
|
|
<script src="../../../search/search.js"></script>
|
|
|
|
</body>
|
|
</html>
|