parameter_server.html 15.1 KB
Newer Older
1 2


3 4


5 6 7 8 9 10 11 12
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
13
  <title>Design Doc: Parameter Server &mdash; PaddlePaddle  文档</title>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
  

  
  

  

  
  
    

  

  
  
    <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  

  
33

34 35 36 37 38
  
        <link rel="index" title="索引"
              href="../../genindex.html"/>
        <link rel="search" title="搜索" href="../../search.html"/>
    <link rel="top" title="PaddlePaddle  文档" href="../../index.html"/> 
39 40 41 42 43 44 45 46 47 48
<script>
var _hmt = _hmt || [];
(function() {
  var hm = document.createElement("script");
  hm.src = "//hm.baidu.com/hm.js?b9a314ab40d04d805655aab1deee08ba";
  var s = document.getElementsByTagName("script")[0]; 
  s.parentNode.insertBefore(hm, s);
})();
</script>

49 50 51 52 53 54 55 56

  
  <script src="../../_static/js/modernizr.min.js"></script>

</head>

<body class="wy-body-for-nav" role="document">

57 58 59 60 61 62 63 64 65 66 67 68 69
  <div class="wy-grid-for-nav">

    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search">
          

          
            <a href="../../index_cn.html" class="icon icon-home"> PaddlePaddle
          

          
70 71
          </a>

72 73 74 75 76 77
          
            
            
          

          
78 79 80 81 82 83
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
84
</div>
85 86

          
87 88 89 90
        </div>

        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
<nav class="doc-menu-vertical" role="navigation">

<ul>
<li class="toctree-l1"><a class="reference internal" href="../../getstarted/index_cn.html">新手入门</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../getstarted/quickstart_cn.html">快速开始</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../getstarted/concepts/use_concepts_cn.html">基本使用概念</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../build_and_install/index_cn.html">安装与编译</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../build_and_install/pip_install_cn.html">使用pip安装</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../build_and_install/docker_install_cn.html">使用Docker安装运行</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../build_and_install/build_from_source_cn.html">从源码编译</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../howto/index_cn.html">进阶使用</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../howto/cmd_parameter/index_cn.html">命令行参数设置</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../howto/cmd_parameter/use_case_cn.html">使用案例</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../howto/cmd_parameter/arguments_cn.html">参数概述</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../howto/cmd_parameter/detail_introduction_cn.html">细节描述</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../howto/cluster/index_cn.html">分布式训练</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../howto/cluster/preparations_cn.html">环境准备</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../howto/cluster/cmd_argument_cn.html">启动参数说明</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../howto/cluster/multi_cluster/index_cn.html">在不同集群中运行</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../howto/cluster/multi_cluster/k8s_cn.html">Kubernetes单机训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../howto/cluster/multi_cluster/k8s_distributed_cn.html">Kubernetes分布式训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../howto/cluster/multi_cluster/openmpi_cn.html">在OpenMPI集群中启动训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../howto/cluster/multi_cluster/fabric_cn.html">使用fabric启动集群训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../howto/cluster/multi_cluster/k8s_aws_cn.html">Kubernetes on AWS</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../howto/capi/index_cn.html">C-API预测库</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../howto/capi/compile_paddle_lib_cn.html">安装与编译C-API预测库</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../howto/capi/organization_of_the_inputs_cn.html">输入/输出数据组织</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../howto/capi/workflow_of_capi_cn.html">C-API使用流程</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../howto/rnn/index_cn.html">RNN模型</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../howto/rnn/rnn_config_cn.html">RNN配置</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../howto/rnn/recurrent_group_cn.html">Recurrent Group教程</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../howto/rnn/hierarchical_layer_cn.html">支持双层序列作为输入的Layer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../howto/rnn/hrnn_rnn_api_compare_cn.html">单双层RNN API对比介绍</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../howto/optimization/gpu_profiling_cn.html">GPU性能调优</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../dev/index_cn.html">开发标准</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../dev/contribute_to_paddle_cn.html">如何贡献代码</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../dev/write_docs_cn.html">如何贡献文档</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../faq/index_cn.html">FAQ</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../faq/build_and_install/index_cn.html">编译安装与单元测试</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/model/index_cn.html">模型配置</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/parameter/index_cn.html">参数设置</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/local/index_cn.html">本地训练与预测</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/cluster/index_cn.html">集群训练与预测</a></li>
</ul>
</li>
154 155
</ul>

156 157
</nav>

158 159
        </div>
      </div>
160 161
    </nav>

162
    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
163

164 165 166 167 168
      
      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
        <a href="../../index_cn.html">PaddlePaddle</a>
      </nav>
169 170


171 172 173 174
      
      <div class="wy-nav-content">
        <div class="rst-content">
          
175

176
 
177 178 179 180 181



<div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
182
    <li><a href="../../index_cn.html">Docs</a> &raquo;</li>
183
      
184
    <li>Design Doc: Parameter Server</li>
185 186 187 188 189 190 191
      <li class="wy-breadcrumbs-aside">
        
          
            <a href="../../_sources/design/dist_refactor/parameter_server.md.txt" rel="nofollow"> View page source</a>
          
        
      </li>
192
  </ul>
193
  <hr/>
194 195 196 197
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
198 199
  <div class="section" id="design-doc-parameter-server">
<span id="design-doc-parameter-server"></span><h1>Design Doc: Parameter Server<a class="headerlink" href="#design-doc-parameter-server" title="永久链接至标题"></a></h1>
200 201 202 203 204 205 206 207 208
<div class="section" id="abstract">
<span id="abstract"></span><h2>Abstract<a class="headerlink" href="#abstract" title="永久链接至标题"></a></h2>
<p>We propose an approach to implement the parameter server. In this
approach, there is no fundamental difference between the trainer and
the parameter server: they both run subgraphs, but subgraphs of
different purposes.</p>
</div>
<div class="section" id="background">
<span id="background"></span><h2>Background<a class="headerlink" href="#background" title="永久链接至标题"></a></h2>
209
<p>The previous implementations of the parameter server do not run a
210
fluid sub-program. Parameter initialization, optimizer computation, network
211
communication and checkpointing are implemented twice on both the
212 213 214 215 216 217
trainer as well as the parameter server.</p>
<p>It would be great if we can write code once and use them on both: the
trainer and the parameter server, since this reduces code duplication and
improves extensibility. Given that after the current refactoring, we are
representing everything as a computation graph on the
trainer. Representing everything as a computation graph on the parameter
218 219 220 221
server becomes a natural extension.</p>
</div>
<div class="section" id="design">
<span id="design"></span><h2>Design<a class="headerlink" href="#design" title="永久链接至标题"></a></h2>
222 223 224 225
<div class="section" id="distributed-transpiler">
<span id="distributed-transpiler"></span><h3>Distributed Transpiler<a class="headerlink" href="#distributed-transpiler" title="永久链接至标题"></a></h3>
<p>The <em>Distributed Transpiler</em> converts the user-defined fluid program
into sub-programs to be scheduled on different nodes with the following
226 227 228
steps:</p>
<ol class="simple">
<li>OP placement: the OPs will be placed on different nodes according
229
to a heuristic that minimizes the estimated total computation
230
time. Currently we will use a simple heuristic that puts parameter
231
variable on parameter server workers and everything else on trainer
232 233 234 235 236 237 238 239 240 241
workers.</li>
<li>Add communication OPs to enable the communication between nodes.</li>
</ol>
<p>We will need these OPs: <em>Send</em>, <em>Recv</em>, <em>Enqueue</em>, <em>Dequeue</em>.</p>
<p>Below is an example of converting the user defined graph to the
subgraphs for the trainer and the parameter server:</p>
<p><img src="src/local-graph.png" width="300"/></p>
<p>After converting:</p>
<p><img src="src/dist-graph.png" width="700"/></p>
<ol class="simple">
242
<li>The parameter variable W and its optimizer program are placed on the parameter server.</li>
243
<li>Operators are added to the program.<ul>
244 245 246 247
<li><em>Send</em> sends data to the connected <em>Recv</em> operator.  The
scheduler on the receive node will only schedule <em>Recv</em> operator
to run when the <em>Send</em> operator has ran (the <em>Send</em> OP will mark
the <em>Recv</em> OP runnable automatically).</li>
248
<li><em>Enqueue</em> enqueues the input variable, it can block until space
249 250
become available in the queue.</li>
<li><em>Dequeue</em> outputs configurable numbers of tensors from the
251
queue. It will block until the queue has the required number of
252 253 254 255 256 257 258 259
tensors.</li>
</ul>
</li>
</ol>
</div>
<div class="section" id="benefits">
<span id="benefits"></span><h3>Benefits<a class="headerlink" href="#benefits" title="永久链接至标题"></a></h3>
<ul class="simple">
260
<li>Model parallelism becomes easier to implement: it is an extension to
261 262
the trainer - parameter server approach. We can have several &#8220;Transpilers&#8221;
to achieve different goals.</li>
263
<li>User-defined optimizer is easier to add - user can now express it as
264
a sub-program.</li>
265 266 267 268 269 270 271
<li>No more duplication logic inside the trainer and the parameter
server mentioned in the background section.</li>
</ul>
</div>
<div class="section" id="challenges">
<span id="challenges"></span><h3>Challenges<a class="headerlink" href="#challenges" title="永久链接至标题"></a></h3>
<ul class="simple">
272 273
<li>It is important to balance the parameter shards on multiple
parameter servers. If a single parameter is very big (for example: some
274 275 276 277
word-embedding, fully connected, softmax layer), we need to
automatically partition the single parameter onto different
parameter servers when possible (only element-wise optimizer depends
on the parameter variable).</li>
278 279
<li>In the &#8220;Async SGD&#8221; figure, the &#8220;W&#8221; variable on the parameter server
could be read and written concurrently. See
280
<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/pull/6394">here</a> for more
281
details about concurrent program in Fluid.</li>
282 283 284 285 286 287
</ul>
</div>
<div class="section" id="discussion">
<span id="discussion"></span><h3>Discussion<a class="headerlink" href="#discussion" title="永久链接至标题"></a></h3>
<ul class="simple">
<li>Can the Enqueue OP be implemented under our current tensor design
288 289
(put the input tensor into the queue tensor)?</li>
<li><em>Dequeue</em> OP will have variable numbers of output (depending on the
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
<code class="docutils literal"><span class="pre">min_count</span></code> attribute), does our current design support it? (similar
question for the <em>Add</em> OP)</li>
</ul>
</div>
<div class="section" id="references">
<span id="references"></span><h3>References:<a class="headerlink" href="#references" title="永久链接至标题"></a></h3>
<p>[1] <a class="reference external" href="https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf">TensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems</a></p>
</div>
</div>
</div>


           </div>
          </div>
          <footer>
  

  <hr/>

  <div role="contentinfo">
    <p>
        &copy; Copyright 2016, PaddlePaddle developers.

    </p>
  </div>
  Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  


  

    <script type="text/javascript">
        var DOCUMENTATION_OPTIONS = {
            URL_ROOT:'../../',
            VERSION:'',
            COLLAPSE_INDEX:false,
            FILE_SUFFIX:'.html',
336
            HAS_SOURCE:  true
337 338 339 340 341 342 343
        };
    </script>
      <script type="text/javascript" src="../../_static/jquery.js"></script>
      <script type="text/javascript" src="../../_static/underscore.js"></script>
      <script type="text/javascript" src="../../_static/doctools.js"></script>
      <script type="text/javascript" src="../../_static/translations.js"></script>
      <script type="text/javascript" src="https://cdn.bootcss.com/mathjax/2.7.0/MathJax.js"></script>
344

345 346 347 348 349 350
  

  
  
    <script type="text/javascript" src="../../_static/js/theme.js"></script>
  
351

352
  
353 354 355 356 357 358 359
  
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.StickyNav.enable();
      });
  </script>
   
360 361 362

</body>
</html>