cmd_argument_cn.html 24.8 KB
Newer Older
1 2


3 4


5 6 7 8 9 10 11 12
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
13
  <title>启动参数说明 &mdash; PaddlePaddle  文档</title>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
  

  
  

  

  
  
    

  

  
  
29
    <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
30 31 32
  

  
33

34 35
  
        <link rel="index" title="索引"
36 37 38 39 40 41
              href="../../genindex.html"/>
        <link rel="search" title="搜索" href="../../search.html"/>
    <link rel="top" title="PaddlePaddle  文档" href="../../index.html"/>
        <link rel="up" title="分布式训练" href="index_cn.html"/>
        <link rel="next" title="在不同集群中运行" href="multi_cluster/index_cn.html"/>
        <link rel="prev" title="环境准备" href="preparations_cn.html"/> 
42 43 44 45 46 47 48 49 50 51
<script>
var _hmt = _hmt || [];
(function() {
  var hm = document.createElement("script");
  hm.src = "//hm.baidu.com/hm.js?b9a314ab40d04d805655aab1deee08ba";
  var s = document.getElementsByTagName("script")[0]; 
  s.parentNode.insertBefore(hm, s);
})();
</script>

52 53

  
54
  <script src="../../_static/js/modernizr.min.js"></script>
55 56 57 58 59

</head>

<body class="wy-body-for-nav" role="document">

60 61 62 63 64 65 66 67 68 69 70 71 72
  <div class="wy-grid-for-nav">

    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search">
          

          
            <a href="../../index_cn.html" class="icon icon-home"> PaddlePaddle
          

          
73 74
          </a>

75 76 77 78 79 80
          
            
            
          

          
81
<div role="search">
82
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
83 84 85 86
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
87
</div>
88 89

          
90 91 92 93
        </div>

        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
94 95 96 97 98 99 100 101 102 103 104 105 106 107
<nav class="doc-menu-vertical" role="navigation">

<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../getstarted/index_cn.html">新手入门</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../getstarted/quickstart_cn.html">快速开始</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../getstarted/concepts/use_concepts_cn.html">基本使用概念</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../build_and_install/index_cn.html">安装与编译</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../build_and_install/pip_install_cn.html">使用pip安装</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../build_and_install/docker_install_cn.html">使用Docker安装运行</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../build_and_install/build_from_source_cn.html">从源码编译</a></li>
</ul>
</li>
108
<li class="toctree-l1 current"><a class="reference internal" href="../index_cn.html">进阶使用</a><ul class="current">
109 110 111 112 113 114
<li class="toctree-l2"><a class="reference internal" href="../cmd_parameter/index_cn.html">命令行参数设置</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../cmd_parameter/use_case_cn.html">使用案例</a></li>
<li class="toctree-l3"><a class="reference internal" href="../cmd_parameter/arguments_cn.html">参数概述</a></li>
<li class="toctree-l3"><a class="reference internal" href="../cmd_parameter/detail_introduction_cn.html">细节描述</a></li>
</ul>
</li>
115 116
<li class="toctree-l2 current"><a class="reference internal" href="index_cn.html">分布式训练</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="preparations_cn.html">环境准备</a></li>
117 118 119 120 121 122 123
<li class="toctree-l3 current"><a class="current reference internal" href="#">启动参数说明</a></li>
<li class="toctree-l3"><a class="reference internal" href="multi_cluster/index_cn.html">在不同集群中运行</a><ul>
<li class="toctree-l4"><a class="reference internal" href="multi_cluster/k8s_cn.html">Kubernetes单机训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="multi_cluster/k8s_distributed_cn.html">Kubernetes分布式训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="multi_cluster/openmpi_cn.html">在OpenMPI集群中启动训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="multi_cluster/fabric_cn.html">使用fabric启动集群训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="multi_cluster/k8s_aws_cn.html">Kubernetes on AWS</a></li>
124 125
</ul>
</li>
126 127
</ul>
</li>
128 129 130 131 132 133 134 135 136 137 138 139 140
<li class="toctree-l2"><a class="reference internal" href="../capi/index_cn.html">C-API预测库</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../capi/compile_paddle_lib_cn.html">安装与编译C-API预测库</a></li>
<li class="toctree-l3"><a class="reference internal" href="../capi/organization_of_the_inputs_cn.html">输入/输出数据组织</a></li>
<li class="toctree-l3"><a class="reference internal" href="../capi/workflow_of_capi_cn.html">C-API使用流程</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../rnn/index_cn.html">RNN模型</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../rnn/rnn_config_cn.html">RNN配置</a></li>
<li class="toctree-l3"><a class="reference internal" href="../rnn/recurrent_group_cn.html">Recurrent Group教程</a></li>
<li class="toctree-l3"><a class="reference internal" href="../rnn/hierarchical_layer_cn.html">支持双层序列作为输入的Layer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../rnn/hrnn_rnn_api_compare_cn.html">单双层RNN API对比介绍</a></li>
</ul>
</li>
141
<li class="toctree-l2"><a class="reference internal" href="../optimization/gpu_profiling_cn.html">GPU性能调优</a></li>
142 143
</ul>
</li>
144 145 146 147 148 149 150 151 152 153 154 155 156
<li class="toctree-l1"><a class="reference internal" href="../../dev/index_cn.html">开发标准</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../dev/contribute_to_paddle_cn.html">如何贡献代码</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../dev/write_docs_cn.html">如何贡献文档</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../faq/index_cn.html">FAQ</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../faq/build_and_install/index_cn.html">编译安装与单元测试</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/model/index_cn.html">模型配置</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/parameter/index_cn.html">参数设置</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/local/index_cn.html">本地训练与预测</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/cluster/index_cn.html">集群训练与预测</a></li>
</ul>
</li>
157 158
</ul>

159 160
</nav>

161 162
        </div>
      </div>
163 164
    </nav>

165
    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
166

167 168 169 170 171
      
      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
        <a href="../../index_cn.html">PaddlePaddle</a>
      </nav>
172 173


174 175 176 177
      
      <div class="wy-nav-content">
        <div class="rst-content">
          
178

179
 
180 181 182 183 184



<div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
185
    <li><a href="../../index_cn.html">Docs</a> &raquo;</li>
186
      
187
          <li><a href="../index_cn.html">进阶使用</a> &raquo;</li>
188
      
189
          <li><a href="index_cn.html">分布式训练</a> &raquo;</li>
190
      
191
    <li>启动参数说明</li>
192 193 194 195 196 197 198
      <li class="wy-breadcrumbs-aside">
        
          
            <a href="../../_sources/howto/cluster/cmd_argument_cn.md.txt" rel="nofollow"> View page source</a>
          
        
      </li>
199
  </ul>
200
  <hr/>
201 202 203 204
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
205
  <div class="section" id="">
206 207
<span id="id1"></span><h1>启动参数说明<a class="headerlink" href="#" title="永久链接至标题"></a></h1>
<p>下面以<code class="docutils literal"><span class="pre">doc/howto/cluster/src/word2vec</span></code>中的代码作为实例,介绍使用PaddlePaddle v2 API完成分布式训练。</p>
208
<div class="section" id="">
209
<span id="id2"></span><h2>启动参数服务器<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
210 211 212 213 214 215
<p>执行以下的命令启动一个参数服务器并等待和计算节点的数据交互</p>
<div class="highlight-bash"><div class="highlight"><pre><span></span>$ paddle pserver --port<span class="o">=</span><span class="m">7164</span> --ports_num<span class="o">=</span><span class="m">1</span> --ports_num_for_sparse<span class="o">=</span><span class="m">1</span> --num_gradient_servers<span class="o">=</span><span class="m">1</span>
</pre></div>
</div>
<p>如果希望可以在后台运行pserver程序,并保存输出到一个日志文件,可以运行:</p>
<div class="highlight-bash"><div class="highlight"><pre><span></span>$ stdbuf -oL /usr/bin/nohup paddle pserver --port<span class="o">=</span><span class="m">7164</span> --ports_num<span class="o">=</span><span class="m">1</span> --ports_num_for_sparse<span class="o">=</span><span class="m">1</span> --num_gradient_servers<span class="o">=</span><span class="m">1</span> <span class="p">&amp;</span>&gt; pserver.log
216 217
</pre></div>
</div>
218 219 220 221
<p>参数说明</p>
<ul class="simple">
<li>port:<strong>必选,默认7164</strong>,pserver监听的起始端口,根据ports_num决定总端口个数,从起始端口监听多个端口用于通信</li>
<li>ports_num:<strong>必选,默认1</strong>,监听的端口个数</li>
222
<li>ports_num_for_sparse:<strong>必选,默认0</strong>,用于稀疏类型参数通信的端口个数</li>
223 224
<li>num_gradient_servers:<strong>必选,默认1</strong>,当前训练任务pserver总数</li>
</ul>
225 226
</div>
<div class="section" id="">
227
<span id="id3"></span><h2>启动计算节点<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
228 229 230 231
<p>执行以下命令启动使用python编写的trainer程序(文件名为任意文件名,如train.py)</p>
<div class="highlight-bash"><div class="highlight"><pre><span></span>$ python train.py
</pre></div>
</div>
232
<p>trainer需要和pserver保持网络联通以完成训练。trainer启动需要传入端口、pserver地址等参数使trainer可以正确连接到pserver。这些参数可以通过<a class="reference external" href="https://zh.wikipedia.org/wiki/环境变量">环境变量</a>或编写程序时<code class="docutils literal"><span class="pre">paddle.init()</span></code>中传入参数。如果同时使用<code class="docutils literal"><span class="pre">paddle.init()</span></code>参数和环境变量,将会优先使用<code class="docutils literal"><span class="pre">paddle.init()</span></code>中传入的参数。</p>
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
<p>使用环境变量:</p>
<div class="highlight-bash"><div class="highlight"><pre><span></span><span class="nb">export</span> <span class="nv">PADDLE_INIT_USE_GPU</span><span class="o">=</span>False
<span class="nb">export</span> <span class="nv">PADDLE_INIT_TRAINER_COUNT</span><span class="o">=</span><span class="m">1</span>
<span class="nb">export</span> <span class="nv">PADDLE_INIT_PORT</span><span class="o">=</span><span class="m">7164</span>
<span class="nb">export</span> <span class="nv">PADDLE_INIT_PORTS_NUM</span><span class="o">=</span><span class="m">1</span>
<span class="nb">export</span> <span class="nv">PADDLE_INIT_PORTS_NUM_FOR_SPARSE</span><span class="o">=</span><span class="m">1</span>
<span class="nb">export</span> <span class="nv">PADDLE_INIT_NUM_GRADIENT_SERVERS</span><span class="o">=</span><span class="m">1</span>
<span class="nb">export</span> <span class="nv">PADDLE_INIT_TRAINER_ID</span><span class="o">=</span><span class="m">0</span>
<span class="nb">export</span> <span class="nv">PADDLE_INIT_PSERVERS</span><span class="o">=</span><span class="m">127</span>.0.0.1
</pre></div>
</div>
<p>使用参数:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">paddle</span><span class="o">.</span><span class="n">init</span><span class="p">(</span>
        <span class="n">use_gpu</span><span class="o">=</span><span class="bp">False</span><span class="p">,</span>
        <span class="n">trainer_count</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
        <span class="n">port</span><span class="o">=</span><span class="mi">7164</span><span class="p">,</span>
        <span class="n">ports_num</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
        <span class="n">ports_num_for_sparse</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
        <span class="n">num_gradient_servers</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
        <span class="n">trainer_id</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
        <span class="n">pservers</span><span class="o">=</span><span class="s2">&quot;127.0.0.1&quot;</span><span class="p">)</span>
</pre></div>
</div>
256 257 258
<p>参数说明</p>
<ul class="simple">
<li>use_gpu: <strong>可选,默认False</strong>,是否启用GPU训练</li>
259
<li>trainer_count:<strong>必选,默认1</strong>,当前trainer的线程数目</li>
260 261
<li>port:<strong>必选,默认7164</strong>,连接到pserver的端口</li>
<li>ports_num:<strong>必选,默认1</strong>,连接到pserver的端口个数</li>
262
<li>ports_num_for_sparse:<strong>必选,默认0</strong>,和pserver之间用于稀疏类型参数通信的端口个数</li>
263
<li>num_gradient_servers:<strong>必选,默认1</strong>,当前训练任务trainer总数</li>
264 265 266
<li>trainer_id:<strong>必选,默认0</strong>,每个trainer的唯一ID,从0开始的整数</li>
<li>pservers:<strong>必选,默认127.0.0.1</strong>,当前训练任务启动的pserver的IP列表,多个IP使用“,”隔开</li>
</ul>
267 268
</div>
<div class="section" id="">
269
<span id="id4"></span><h2>准备数据集<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
270 271 272 273 274 275 276 277 278 279 280 281
<p>参考样例数据准备脚本<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/prepare.py">prepare.py</a>,准备训练数据和验证数据集,我们使用paddle.dataset.imikolov数据集,并根据分布式训练并发数(trainer节点个数),在<code class="docutils literal"><span class="pre">prepare.py</span></code>开头部分指定<code class="docutils literal"><span class="pre">SPLIT_COUNT</span></code>将数据切分成多份。</p>
<p>在线上系统中,通常会使用MapReduce任务的输出结果作为训练结果,这样训练文件的个数会比较多,而且个数并不确定。在trainer中可以使用下面取模的方法为每个trainer分配训练数据文件:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="n">train_list</span> <span class="o">=</span> <span class="p">[]</span>
<span class="n">flist</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">listdir</span><span class="p">(</span><span class="s2">&quot;/train_data/&quot;</span><span class="p">)</span>
<span class="k">for</span> <span class="n">f</span> <span class="ow">in</span> <span class="n">flist</span><span class="p">:</span>
  <span class="n">suffix</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">f</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s2">&quot;-&quot;</span><span class="p">)[</span><span class="mi">1</span><span class="p">])</span>
  <span class="k">if</span> <span class="n">suffix</span> <span class="o">%</span> <span class="n">TRAINER_COUNT</span> <span class="o">==</span> <span class="n">TRAINER_ID</span><span class="p">:</span>
    <span class="n">train_list</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">f</span><span class="p">)</span>
</pre></div>
</div>
<p>示例程序<code class="docutils literal"><span class="pre">prepare.py</span></code>会把训练集和测试集分别分割成多个文件(例子中为3个,后缀为<code class="docutils literal"><span class="pre">-00000</span></code><code class="docutils literal"><span class="pre">-00001</span></code><code class="docutils literal"><span class="pre">-00002</span></code>):</p>
282 283 284 285 286 287 288 289
<div class="highlight-bash"><div class="highlight"><pre><span></span>train.txt
train.txt-00000
train.txt-00001
train.txt-00002
test.txt
test.txt-00000
test.txt-00001
test.txt-00002
290 291
</pre></div>
</div>
292 293 294
<p>在进行分布式训练时,每个trainer进程需要能够读取属于自己的一份数据。在一些分布式系统中,系统会提供一个分布式存储服务,这样保存在分布式存储中的数据可以被集群中的每个节点读取到。如果不使用分布式存储,则需要手动拷贝属于每个trainer节点的训练数据到对应的节点上。</p>
<p>对于不同的训练任务,训练数据格式和训练程序的<code class="docutils literal"><span class="pre">reader()</span></code>会大不相同,所以开发者需要根据自己训练任务的实际场景完成训练数据的分割和<code class="docutils literal"><span class="pre">reader()</span></code>的编写。</p>
</div>
295
<div class="section" id="">
296
<span id="id5"></span><h2>准备训练程序<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
297 298
<p>我们会对每个训练任务都会在每个节点上创建一个工作空间(workspace),其中包含了用户的训练程序、程序依赖、挂载或下载的训练数据分片。</p>
<p>最后,工作空间应如下所示:</p>
299 300 301 302 303 304 305 306 307 308 309 310
<div class="highlight-bash"><div class="highlight"><pre><span></span>.
<span class="p">|</span>-- my_lib.py
<span class="p">|</span>-- word_dict.pickle
<span class="p">|</span>-- train.py
<span class="p">|</span>-- train_data_dir/
<span class="p">|</span>   <span class="p">|</span>-- train.txt-00000
<span class="p">|</span>   <span class="p">|</span>-- train.txt-00001
<span class="p">|</span>   <span class="p">|</span>-- train.txt-00002
<span class="sb">`</span>-- test_data_dir/
    <span class="p">|</span>-- test.txt-00000
    <span class="p">|</span>-- test.txt-00001
    <span class="sb">`</span>-- test.txt-00002
311 312 313 314 315 316 317
</pre></div>
</div>
<ul>
<li><p class="first"><code class="docutils literal"><span class="pre">my_lib.py</span></code>:会被<code class="docutils literal"><span class="pre">train.py</span></code>调用的一些用户定义的库函数,比如PIL库等。</p>
</li>
<li><p class="first"><code class="docutils literal"><span class="pre">word_dict.pickle</span></code>:在<code class="docutils literal"><span class="pre">train.py</span></code>中会使用到的字典数据文件。</p>
</li>
318
<li><p class="first"><code class="docutils literal"><span class="pre">train.py</span></code>:训练程序,代码参考<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py">api_train_v2_cluster.py</a><strong><em>注意:</em></strong> 对于本样例代码,在使用不同的分布式计算平台时,您可能需要修改<code class="docutils literal"><span class="pre">train.py</span></code>开头的部分(如下),以便获得训练数据的位置和获取环境变量配置:</p>
319 320 321 322 323 324 325 326 327 328 329 330 331 332
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">cluster_train_file</span> <span class="o">=</span> <span class="s2">&quot;./train_data_dir/train/train.txt&quot;</span>
<span class="n">cluster_test_file</span> <span class="o">=</span> <span class="s2">&quot;./test_data_dir/test/test.txt&quot;</span>
<span class="n">node_id</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getenv</span><span class="p">(</span><span class="s2">&quot;OMPI_COMM_WORLD_RANK&quot;</span><span class="p">)</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">node_id</span><span class="p">:</span>
    <span class="k">raise</span> <span class="ne">EnvironmentError</span><span class="p">(</span><span class="s2">&quot;must provied OMPI_COMM_WORLD_RANK&quot;</span><span class="p">)</span>
</pre></div>
</div>
</li>
<li><p class="first"><code class="docutils literal"><span class="pre">train_data_dir</span></code>:包含训练数据的目录,可以是从分布式存储挂载过来的,也可以是在任务启动前下载到本地的。</p>
</li>
<li><p class="first"><code class="docutils literal"><span class="pre">test_data_dir</span></code>:包含测试数据集的目录。</p>
</li>
</ul>
</div>
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
<div class="section" id="sgd">
<span id="sgd"></span><h2>异步 SGD 更新<a class="headerlink" href="#sgd" title="永久链接至标题"></a></h2>
<p>我们可以通过设置 <code class="docutils literal"><span class="pre">optimize</span></code> 的参数使之支持异步SGD更新。
例如,设置 <code class="docutils literal"><span class="pre">AdaGrad</span></code> optimize 的 <code class="docutils literal"><span class="pre">is_async</span></code><code class="docutils literal"><span class="pre">async_lagged_grad_discard_ratio</span></code> 参数:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">adagrad</span> <span class="o">=</span> <span class="n">paddle</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">AdaGrad</span><span class="p">(</span>
    <span class="n">is_async</span><span class="o">=</span><span class="bp">True</span><span class="p">,</span>
    <span class="n">async_lagged_grad_discard_ratio</span><span class="o">=</span><span class="mf">1.6</span><span class="p">,</span>
    <span class="n">learning_rate</span><span class="o">=</span><span class="mf">3e-3</span><span class="p">,</span>
    <span class="n">regularization</span><span class="o">=</span><span class="n">paddle</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">L2Regularization</span><span class="p">(</span><span class="mf">8e-4</span><span class="p">))</span>
</pre></div>
</div>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">is_async</span></code>: 是否为异步SGD更新模式。</li>
<li><code class="docutils literal"><span class="pre">async_lagged_grad_discard_ratio</span></code>: 异步SGD更新的步长控制,接收到足够的gradient(
<code class="docutils literal"><span class="pre">async_lagged_grad_discard_ratio</span> <span class="pre">*</span> <span class="pre">num_gradient_servers</span></code>)之后,后面的gradient
将会被抛弃。</li>
</ul>
</div>
351 352 353 354 355 356 357 358 359
</div>


           </div>
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
360
        <a href="multi_cluster/index_cn.html" class="btn btn-neutral float-right" title="在不同集群中运行" accesskey="n">Next <span class="fa fa-arrow-circle-right"></span></a>
361 362
      
      
363
        <a href="preparations_cn.html" class="btn btn-neutral" title="环境准备" accesskey="p"><span class="fa fa-arrow-circle-left"></span> Previous</a>
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <p>
        &copy; Copyright 2016, PaddlePaddle developers.

    </p>
  </div>
  Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  


  

    <script type="text/javascript">
        var DOCUMENTATION_OPTIONS = {
393
            URL_ROOT:'../../',
394 395 396
            VERSION:'',
            COLLAPSE_INDEX:false,
            FILE_SUFFIX:'.html',
397
            HAS_SOURCE:  true
398 399
        };
    </script>
400 401 402 403
      <script type="text/javascript" src="../../_static/jquery.js"></script>
      <script type="text/javascript" src="../../_static/underscore.js"></script>
      <script type="text/javascript" src="../../_static/doctools.js"></script>
      <script type="text/javascript" src="../../_static/translations.js"></script>
404
      <script type="text/javascript" src="https://cdn.bootcss.com/mathjax/2.7.0/MathJax.js"></script>
405

406 407 408 409
  

  
  
410
    <script type="text/javascript" src="../../_static/js/theme.js"></script>
411
  
412

413
  
414 415 416 417 418 419 420
  
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.StickyNav.enable();
      });
  </script>
   
421 422

</body>
423
</html>