mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 19:20:19 +01:00
300 lines
10 KiB
HTML
300 lines
10 KiB
HTML
<!DOCTYPE html>
|
|
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
|
|
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
|
|
<head>
|
|
<meta charset="utf-8">
|
|
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
|
|
|
|
<link rel="shortcut icon" href="../../../img/favicon.ico">
|
|
<title>Actor-Critic - Reinforcement Learning Coach</title>
|
|
<link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>
|
|
|
|
<link rel="stylesheet" href="../../../css/theme.css" type="text/css" />
|
|
<link rel="stylesheet" href="../../../css/theme_extra.css" type="text/css" />
|
|
<link rel="stylesheet" href="../../../css/highlight.css">
|
|
<link href="../../../extra.css" rel="stylesheet">
|
|
|
|
<script>
|
|
// Current page data
|
|
var mkdocs_page_name = "Actor-Critic";
|
|
var mkdocs_page_input_path = "algorithms/policy_optimization/ac.md";
|
|
var mkdocs_page_url = "/algorithms/policy_optimization/ac/";
|
|
</script>
|
|
|
|
<script src="../../../js/jquery-2.1.1.min.js"></script>
|
|
<script src="../../../js/modernizr-2.8.3.min.js"></script>
|
|
<script type="text/javascript" src="../../../js/highlight.pack.js"></script>
|
|
|
|
</head>
|
|
|
|
<body class="wy-body-for-nav" role="document">
|
|
|
|
<div class="wy-grid-for-nav">
|
|
|
|
|
|
<nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
|
|
<div class="wy-side-nav-search">
|
|
<a href="../../.." class="icon icon-home"> Reinforcement Learning Coach</a>
|
|
<div role="search">
|
|
<form id ="rtd-search-form" class="wy-form" action="../../../search.html" method="get">
|
|
<input type="text" name="q" placeholder="Search docs" />
|
|
</form>
|
|
</div>
|
|
</div>
|
|
|
|
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
|
|
<ul class="current">
|
|
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<a class="" href="../../..">Home</a>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<a class="" href="../../../usage/">Usage</a>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<span class="caption-text">Design</span>
|
|
<ul class="subnav">
|
|
<li class="">
|
|
|
|
<a class="" href="../../../design/features/">Features</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../../design/control_flow/">Control Flow</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../../design/network/">Network</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../../design/filters/">Filters</a>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<span class="caption-text">Algorithms</span>
|
|
<ul class="subnav">
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/dqn/">DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/double_dqn/">Double DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/dueling_dqn/">Dueling DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/categorical_dqn/">Categorical DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/mmc/">Mixed Monte Carlo</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/pal/">Persistent Advantage Learning</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/nec/">Neural Episodic Control</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/bs_dqn/">Bootstrapped DQN</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/n_step/">N-Step Q Learning</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../value_optimization/naf/">Normalized Advantage Functions</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../pg/">Policy Gradient</a>
|
|
</li>
|
|
<li class=" current">
|
|
|
|
<a class="current" href="./">Actor-Critic</a>
|
|
<ul class="subnav">
|
|
|
|
<li class="toctree-l3"><a href="#actor-critic">Actor-Critic</a></li>
|
|
|
|
<ul>
|
|
|
|
<li><a class="toctree-l4" href="#network-structure">Network Structure</a></li>
|
|
|
|
<li><a class="toctree-l4" href="#algorithm-description">Algorithm Description</a></li>
|
|
|
|
</ul>
|
|
|
|
|
|
</ul>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../ddpg/">Deep Determinstic Policy Gradients</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../ppo/">Proximal Policy Optimization</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../cppo/">Clipped Proximal Policy Optimization</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../other/dfp/">Direct Future Prediction</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../imitation/bc/">Behavioral Cloning</a>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<a class="" href="../../../dashboard/">Coach Dashboard</a>
|
|
</li>
|
|
|
|
<li class="toctree-l1">
|
|
|
|
<span class="caption-text">Contributing</span>
|
|
<ul class="subnav">
|
|
<li class="">
|
|
|
|
<a class="" href="../../../contributing/add_agent/">Adding a New Agent</a>
|
|
</li>
|
|
<li class="">
|
|
|
|
<a class="" href="../../../contributing/add_env/">Adding a New Environment</a>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
|
|
</ul>
|
|
</div>
|
|
|
|
</nav>
|
|
|
|
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
|
|
|
|
|
|
<nav class="wy-nav-top" role="navigation" aria-label="top navigation">
|
|
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
|
<a href="../../..">Reinforcement Learning Coach</a>
|
|
</nav>
|
|
|
|
|
|
<div class="wy-nav-content">
|
|
<div class="rst-content">
|
|
<div role="navigation" aria-label="breadcrumbs navigation">
|
|
<ul class="wy-breadcrumbs">
|
|
<li><a href="../../..">Docs</a> »</li>
|
|
|
|
|
|
|
|
<li>Algorithms »</li>
|
|
|
|
|
|
|
|
<li>Actor-Critic</li>
|
|
<li class="wy-breadcrumbs-aside">
|
|
|
|
</li>
|
|
</ul>
|
|
<hr/>
|
|
</div>
|
|
<div role="main">
|
|
<div class="section">
|
|
|
|
<h1 id="actor-critic">Actor-Critic</h1>
|
|
<p><strong>Actions space:</strong> Discrete|Continuous</p>
|
|
<p><strong>References:</strong> <a href="https://arxiv.org/abs/1602.01783">Asynchronous Methods for Deep Reinforcement Learning</a></p>
|
|
<h2 id="network-structure">Network Structure</h2>
|
|
<p><p style="text-align: center;">
|
|
<img src="..\..\design_imgs\ac.png" width=500>
|
|
</p></p>
|
|
<h2 id="algorithm-description">Algorithm Description</h2>
|
|
<h3 id="choosing-an-action-discrete-actions">Choosing an action - Discrete actions</h3>
|
|
<p>The policy network is used in order to predict action probabilites. While training, a sample is taken from a categorical distribution assigned with these probabilities. When testing, the action with the highest probability is used.</p>
|
|
<h3 id="training-the-network">Training the network</h3>
|
|
<p>A batch of <script type="math/tex"> T_{max} </script> transitions is used, and the advantages are calculated upon it.</p>
|
|
<p>Advantages can be calculated by either of the following methods (configured by the selected preset) -</p>
|
|
<ol>
|
|
<li><strong>A_VALUE</strong> - Estimating advantage directly:<script type="math/tex; mode=display"> A(s_t, a_t) = \underbrace{\sum_{i=t}^{i=t + k - 1} \gamma^{i-t}r_i +\gamma^{k} V(s_{t+k})}_{Q(s_t, a_t)} - V(s_t) </script>where <script type="math/tex">k</script> is <script type="math/tex">T_{max} - State\_Index</script> for each state in the batch.</li>
|
|
<li><strong>GAE</strong> - By following the <a href="https://arxiv.org/abs/1506.02438">Generalized Advantage Estimation</a> paper. </li>
|
|
</ol>
|
|
<p>The advantages are then used in order to accumulate gradients according to
|
|
<script type="math/tex; mode=display"> L = -\mathop{\mathbb{E}} [log (\pi) \cdot A] </script>
|
|
</p>
|
|
|
|
</div>
|
|
</div>
|
|
<footer>
|
|
|
|
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
|
|
|
|
<a href="../ddpg/" class="btn btn-neutral float-right" title="Deep Determinstic Policy Gradients">Next <span class="icon icon-circle-arrow-right"></span></a>
|
|
|
|
|
|
<a href="../pg/" class="btn btn-neutral" title="Policy Gradient"><span class="icon icon-circle-arrow-left"></span> Previous</a>
|
|
|
|
</div>
|
|
|
|
|
|
<hr/>
|
|
|
|
<div role="contentinfo">
|
|
<!-- Copyright etc -->
|
|
|
|
</div>
|
|
|
|
Built with <a href="http://www.mkdocs.org">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
|
</footer>
|
|
|
|
</div>
|
|
</div>
|
|
|
|
</section>
|
|
|
|
</div>
|
|
|
|
<div class="rst-versions" role="note" style="cursor: pointer">
|
|
<span class="rst-current-version" data-toggle="rst-current-version">
|
|
|
|
|
|
<span><a href="../pg/" style="color: #fcfcfc;">« Previous</a></span>
|
|
|
|
|
|
<span style="margin-left: 15px"><a href="../ddpg/" style="color: #fcfcfc">Next »</a></span>
|
|
|
|
</span>
|
|
</div>
|
|
<script>var base_url = '../../..';</script>
|
|
<script src="../../../js/theme.js"></script>
|
|
<script src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML"></script>
|
|
<script src="../../../search/require.js"></script>
|
|
<script src="../../../search/search.js"></script>
|
|
|
|
</body>
|
|
</html>
|