mirror of
https://github.com/gryf/coach.git
synced 2025-12-17 19:20:19 +01:00
* updating the documentation website * adding the built docs * update of api docstrings across coach and tutorials 0-2 * added some missing api documentation * New Sphinx based documentation
289 lines
12 KiB
HTML
289 lines
12 KiB
HTML
|
|
|
|
<!DOCTYPE html>
|
|
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
|
|
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
|
|
<head>
|
|
<meta charset="utf-8">
|
|
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
|
|
<title>Dueling DQN — Reinforcement Learning Coach 0.11.0 documentation</title>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<link rel="stylesheet" href="../../../_static/css/theme.css" type="text/css" />
|
|
<link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" />
|
|
<link rel="stylesheet" href="../../../_static/css/custom.css" type="text/css" />
|
|
<link rel="index" title="Index" href="../../../genindex.html" />
|
|
<link rel="search" title="Search" href="../../../search.html" />
|
|
<link rel="next" title="Mixed Monte Carlo" href="mmc.html" />
|
|
<link rel="prev" title="Deep Q Networks" href="dqn.html" />
|
|
<link href="../../../_static/css/custom.css" rel="stylesheet" type="text/css">
|
|
|
|
|
|
|
|
<script src="../../../_static/js/modernizr.min.js"></script>
|
|
|
|
</head>
|
|
|
|
<body class="wy-body-for-nav">
|
|
|
|
|
|
<div class="wy-grid-for-nav">
|
|
|
|
|
|
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
|
<div class="wy-side-scroll">
|
|
<div class="wy-side-nav-search">
|
|
|
|
|
|
|
|
<a href="../../../index.html" class="icon icon-home"> Reinforcement Learning Coach
|
|
|
|
|
|
|
|
|
|
<img src="../../../_static/dark_logo.png" class="logo" alt="Logo"/>
|
|
|
|
</a>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<div role="search">
|
|
<form id="rtd-search-form" class="wy-form" action="../../../search.html" method="get">
|
|
<input type="text" name="q" placeholder="Search docs" />
|
|
<input type="hidden" name="check_keywords" value="yes" />
|
|
<input type="hidden" name="area" value="default" />
|
|
</form>
|
|
</div>
|
|
|
|
|
|
</div>
|
|
|
|
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<p class="caption"><span class="caption-text">Intro</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../usage.html">Usage</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../features/index.html">Features</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../selecting_an_algorithm.html">Selecting an Algorithm</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../dashboard.html">Coach Dashboard</a></li>
|
|
</ul>
|
|
<p class="caption"><span class="caption-text">Design</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../design/control_flow.html">Control Flow</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../design/network.html">Network Design</a></li>
|
|
</ul>
|
|
<p class="caption"><span class="caption-text">Contributing</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../contributing/add_agent.html">Adding a New Agent</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../contributing/add_env.html">Adding a New Environment</a></li>
|
|
</ul>
|
|
<p class="caption"><span class="caption-text">Components</span></p>
|
|
<ul class="current">
|
|
<li class="toctree-l1 current"><a class="reference internal" href="../index.html">Agents</a><ul class="current">
|
|
<li class="toctree-l2"><a class="reference internal" href="../policy_optimization/ac.html">Actor-Critic</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../imitation/bc.html">Behavioral Cloning</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="bs_dqn.html">Bootstrapped DQN</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="categorical_dqn.html">Categorical DQN</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../imitation/cil.html">Conditional Imitation Learning</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../policy_optimization/cppo.html">Clipped Proximal Policy Optimization</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../policy_optimization/ddpg.html">Deep Deterministic Policy Gradient</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../other/dfp.html">Direct Future Prediction</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="double_dqn.html">Double DQN</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="dqn.html">Deep Q Networks</a></li>
|
|
<li class="toctree-l2 current"><a class="current reference internal" href="#">Dueling DQN</a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#network-structure">Network Structure</a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#general-description">General Description</a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="mmc.html">Mixed Monte Carlo</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="n_step.html">N-Step Q Learning</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="naf.html">Normalized Advantage Functions</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="nec.html">Neural Episodic Control</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="pal.html">Persistent Advantage Learning</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../policy_optimization/pg.html">Policy Gradient</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../policy_optimization/ppo.html">Proximal Policy Optimization</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="rainbow.html">Rainbow</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="qr_dqn.html">Quantile Regression DQN</a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../architectures/index.html">Architectures</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../environments/index.html">Environments</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../exploration_policies/index.html">Exploration Policies</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../filters/index.html">Filters</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../memories/index.html">Memories</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../core_types.html">Core Types</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../spaces.html">Spaces</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../additional_parameters.html">Additional Parameters</a></li>
|
|
</ul>
|
|
|
|
|
|
|
|
</div>
|
|
</div>
|
|
</nav>
|
|
|
|
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
|
|
|
|
|
|
<nav class="wy-nav-top" aria-label="top navigation">
|
|
|
|
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
|
<a href="../../../index.html">Reinforcement Learning Coach</a>
|
|
|
|
</nav>
|
|
|
|
|
|
<div class="wy-nav-content">
|
|
|
|
<div class="rst-content">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<div role="navigation" aria-label="breadcrumbs navigation">
|
|
|
|
<ul class="wy-breadcrumbs">
|
|
|
|
<li><a href="../../../index.html">Docs</a> »</li>
|
|
|
|
<li><a href="../index.html">Agents</a> »</li>
|
|
|
|
<li>Dueling DQN</li>
|
|
|
|
|
|
<li class="wy-breadcrumbs-aside">
|
|
|
|
|
|
<a href="../../../_sources/components/agents/value_optimization/dueling_dqn.rst.txt" rel="nofollow"> View page source</a>
|
|
|
|
|
|
</li>
|
|
|
|
</ul>
|
|
|
|
|
|
<hr/>
|
|
</div>
|
|
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
|
<div itemprop="articleBody">
|
|
|
|
<div class="section" id="dueling-dqn">
|
|
<h1>Dueling DQN<a class="headerlink" href="#dueling-dqn" title="Permalink to this headline">¶</a></h1>
|
|
<p><strong>Actions space:</strong> Discrete</p>
|
|
<p><strong>References:</strong> <a class="reference external" href="https://arxiv.org/abs/1511.06581">Dueling Network Architectures for Deep Reinforcement Learning</a></p>
|
|
<div class="section" id="network-structure">
|
|
<h2>Network Structure<a class="headerlink" href="#network-structure" title="Permalink to this headline">¶</a></h2>
|
|
<img alt="../../../_images/dueling_dqn.png" class="align-center" src="../../../_images/dueling_dqn.png" />
|
|
</div>
|
|
<div class="section" id="general-description">
|
|
<h2>General Description<a class="headerlink" href="#general-description" title="Permalink to this headline">¶</a></h2>
|
|
<p>Dueling DQN presents a change in the network structure comparing to DQN.</p>
|
|
<p>Dueling DQN uses a specialized <em>Dueling Q Head</em> in order to separate <span class="math notranslate nohighlight">\(Q\)</span> to an <span class="math notranslate nohighlight">\(A\)</span> (advantage)
|
|
stream and a <span class="math notranslate nohighlight">\(V\)</span> stream. Adding this type of structure to the network head allows the network to better differentiate
|
|
actions from one another, and significantly improves the learning.</p>
|
|
<p>In many states, the values of the different actions are very similar, and it is less important which action to take.
|
|
This is especially important in environments where there are many actions to choose from. In DQN, on each training
|
|
iteration, for each of the states in the batch, we update the <a href="#id1"><span class="problematic" id="id2">:ath:`Q`</span></a> values only for the specific actions taken in
|
|
those states. This results in slower learning as we do not learn the <span class="math notranslate nohighlight">\(Q\)</span> values for actions that were not taken yet.
|
|
On dueling architecture, on the other hand, learning is faster - as we start learning the state-value even if only a
|
|
single action has been taken at this state.</p>
|
|
</div>
|
|
</div>
|
|
|
|
|
|
</div>
|
|
|
|
</div>
|
|
<footer>
|
|
|
|
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
|
|
|
|
<a href="mmc.html" class="btn btn-neutral float-right" title="Mixed Monte Carlo" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right"></span></a>
|
|
|
|
|
|
<a href="dqn.html" class="btn btn-neutral" title="Deep Q Networks" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> Previous</a>
|
|
|
|
</div>
|
|
|
|
|
|
<hr/>
|
|
|
|
<div role="contentinfo">
|
|
<p>
|
|
© Copyright 2018, Intel AI Lab
|
|
|
|
</p>
|
|
</div>
|
|
Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
|
|
|
</footer>
|
|
|
|
</div>
|
|
</div>
|
|
|
|
</section>
|
|
|
|
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<script type="text/javascript" id="documentation_options" data-url_root="../../../" src="../../../_static/documentation_options.js"></script>
|
|
<script type="text/javascript" src="../../../_static/jquery.js"></script>
|
|
<script type="text/javascript" src="../../../_static/underscore.js"></script>
|
|
<script type="text/javascript" src="../../../_static/doctools.js"></script>
|
|
<script async="async" type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
|
|
|
|
|
|
|
|
|
|
<script type="text/javascript" src="../../../_static/js/theme.js"></script>
|
|
|
|
<script type="text/javascript">
|
|
jQuery(function () {
|
|
SphinxRtdTheme.Navigation.enable(true);
|
|
});
|
|
</script>
|
|
|
|
</body>
|
|
</html> |