networks.html 57.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Networks &mdash; PaddlePaddle  documentation</title>
  

  
  

  

  
  
    

  

  
  
    <link rel="stylesheet" href="../../../_static/css/theme.css" type="text/css" />
  

  
  
        <link rel="index" title="Index"
              href="../../../genindex.html"/>
        <link rel="search" title="Search" href="../../../search.html"/>
    <link rel="top" title="PaddlePaddle  documentation" href="../../../index.html"/>
        <link rel="up" title="Model Configuration" href="../model_configs.html"/>
        <link rel="next" title="Parameter Attribute" href="attr.html"/>
        <link rel="prev" title="Pooling" href="pooling.html"/> 

  <link rel="stylesheet" href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/css/override.css" type="text/css" />
  <script>
  var _hmt = _hmt || [];
  (function() {
    var hm = document.createElement("script");
    hm.src = "//hm.baidu.com/hm.js?b9a314ab40d04d805655aab1deee08ba";
    var s = document.getElementsByTagName("script")[0]; 
    s.parentNode.insertBefore(hm, s);
  })();
  </script>

  

  
  <script src="../../../_static/js/modernizr.min.js"></script>

</head>

<body class="wy-body-for-nav" role="document">

  
  <header class="site-header">
    <div class="site-logo">
      <a href="/"><img src="../../../_static/images/PP_w.png"></a>
    </div>
    <div class="site-nav-links">
      <div class="site-menu">
68
        <a class="fork-on-github" href="https://github.com/PaddlePaddle/Paddle" target="_blank"><i class="fa fa-github"></i>Fork me on Github</a>
69 70 71 72 73 74 75 76 77 78 79 80
        <div class="language-switcher dropdown">
          <a type="button" data-toggle="dropdown">
            <span>English</span>
            <i class="fa fa-angle-up"></i>
            <i class="fa fa-angle-down"></i>
          </a>
          <ul class="dropdown-menu">
            <li><a href="/doc_cn">中文</a></li>
            <li><a href="/doc">English</a></li>
          </ul>
        </div>
        <ul class="site-page-links">
81
          <li><a href="/">Home</a></li>
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
        </ul>
      </div>
      <div class="doc-module">
        
        <ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../../getstarted/index_en.html">GET STARTED</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../howto/index_en.html">HOW TO</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="../../index_en.html">API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../about/index_en.html">ABOUT</a></li>
</ul>

        
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>        
      </div>
    </div>
  </header>
  
  <div class="main-content-wrap">

    
    <nav class="doc-menu-vertical" role="navigation">
        
          
          <ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../../getstarted/index_en.html">GET STARTED</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../../getstarted/build_and_install/index_en.html">Install and Build</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../getstarted/build_and_install/docker_install_en.html">PaddlePaddle in Docker Containers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../getstarted/build_and_install/ubuntu_install_en.html">Debian Package installation guide</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../getstarted/build_and_install/build_from_source_en.html">Installing from Sources</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../../howto/index_en.html">HOW TO</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../../howto/usage/cmd_parameter/index_en.html">Set Command-line Parameters</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cmd_parameter/use_case_en.html">Use Case</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cmd_parameter/arguments_en.html">Argument Outline</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cmd_parameter/detail_introduction_en.html">Detail Description</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../howto/usage/cluster/cluster_train_en.html">Run Distributed Training</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../howto/usage/k8s/k8s_en.html">Paddle On Kubernetes</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../howto/usage/k8s/k8s_aws_en.html">Distributed PaddlePaddle Training on AWS with Kubernetes</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../howto/dev/new_layer_en.html">Write New Layers</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../howto/dev/contribute_to_paddle_en.html">Contribute Code</a></li>
133 134 135 136
<li class="toctree-l2"><a class="reference internal" href="../../../howto/deep_model/rnn/index_en.html">RNN Models</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../howto/deep_model/rnn/rnn_config_en.html">RNN Configuration</a></li>
</ul>
</li>
137 138 139 140 141 142 143
<li class="toctree-l2"><a class="reference internal" href="../../../howto/optimization/gpu_profiling_en.html">Tune GPU Performance</a></li>
</ul>
</li>
<li class="toctree-l1 current"><a class="reference internal" href="../../index_en.html">API</a><ul class="current">
<li class="toctree-l2 current"><a class="reference internal" href="../model_configs.html">Model Configuration</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="activation.html">Activation</a></li>
<li class="toctree-l3"><a class="reference internal" href="layer.html">Layers</a></li>
144
<li class="toctree-l3"><a class="reference internal" href="evaluators.html">Evaluators</a></li>
145 146 147 148 149 150
<li class="toctree-l3"><a class="reference internal" href="optimizer.html">Optimizer</a></li>
<li class="toctree-l3"><a class="reference internal" href="pooling.html">Pooling</a></li>
<li class="toctree-l3 current"><a class="current reference internal" href="#">Networks</a></li>
<li class="toctree-l3"><a class="reference internal" href="attr.html">Parameter Attribute</a></li>
</ul>
</li>
151
<li class="toctree-l2"><a class="reference internal" href="../data.html">Data Reader Interface and DataSets</a></li>
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
<li class="toctree-l2"><a class="reference internal" href="../run_logic.html">Training and Inference</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../../about/index_en.html">ABOUT</a></li>
</ul>

        
    </nav>
    
    <section class="doc-content-wrap">

      

 







<div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
      
        <li><a href="../../index_en.html">API</a> > </li>
      
        <li><a href="../model_configs.html">Model Configuration</a> > </li>
      
    <li>Networks</li>
  </ul>
</div>
      
      <div class="wy-nav-content" id="doc-content">
        <div class="rst-content">
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="networks">
<h1>Networks<a class="headerlink" href="#networks" title="Permalink to this headline"></a></h1>
<p>The v2.networks module contains pieces of neural network that combine multiple layers.</p>
<div class="section" id="nlp">
<h2>NLP<a class="headerlink" href="#nlp" title="Permalink to this headline"></a></h2>
<div class="section" id="sequence-conv-pool">
<h3>sequence_conv_pool<a class="headerlink" href="#sequence-conv-pool" title="Permalink to this headline"></a></h3>
196
<dl class="function">
197
<dt>
198
<code class="descclassname">paddle.v2.networks.</code><code class="descname">sequence_conv_pool</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
199 200 201 202 203 204 205 206
<dd><p>Text convolution pooling layers helper.</p>
<p>Text input =&gt; Context Projection =&gt; FC Layer =&gt; Pooling =&gt; Output.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of output layer(pooling layer name)</li>
207
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; name of input layer</li>
208 209 210 211 212
<li><strong>context_len</strong> (<em>int</em>) &#8211; context projection length. See
context_projection&#8217;s document.</li>
<li><strong>hidden_size</strong> (<em>int</em>) &#8211; FC Layer size.</li>
<li><strong>context_start</strong> (<em>int</em><em> or </em><em>None</em>) &#8211; context projection length. See
context_projection&#8217;s context_start.</li>
213 214
<li><strong>pool_type</strong> (<em>BasePoolingType.</em>) &#8211; pooling layer type. See pooling_layer&#8217;s document.</li>
<li><strong>context_proj_layer_name</strong> (<em>basestring</em>) &#8211; context projection layer name.
215
None if user don&#8217;t care.</li>
216
<li><strong>context_proj_param_attr</strong> (<em>ParameterAttribute</em><em> or </em><em>None.</em>) &#8211; context projection parameter attribute.
217
None if user don&#8217;t care.</li>
218 219 220
<li><strong>fc_layer_name</strong> (<em>basestring</em>) &#8211; fc layer name. None if user don&#8217;t care.</li>
<li><strong>fc_param_attr</strong> (<em>ParameterAttribute</em><em> or </em><em>None</em>) &#8211; fc layer parameter attribute. None if user don&#8217;t care.</li>
<li><strong>fc_bias_attr</strong> (<em>ParameterAttribute</em><em> or </em><em>None</em>) &#8211; fc bias parameter attribute. False if no bias,
221
None if user don&#8217;t care.</li>
222 223
<li><strong>fc_act</strong> (<em>BaseActivation</em>) &#8211; fc layer activation type. None means tanh</li>
<li><strong>pool_bias_attr</strong> (<em>ParameterAttribute</em><em> or </em><em>None.</em>) &#8211; pooling layer bias attr. None if don&#8217;t care.
224
False if no bias.</li>
225 226 227
<li><strong>fc_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; fc layer extra attribute.</li>
<li><strong>context_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; context projection layer extra attribute.</li>
<li><strong>pool_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; pooling layer extra attribute.</li>
228 229 230 231 232 233
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">output layer name.</p>
</td>
</tr>
234
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
235 236 237 238 239 240 241 242 243
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="text-conv-pool">
<span id="api-trainer-config-helpers-network-text-conv-pool"></span><h3>text_conv_pool<a class="headerlink" href="#text-conv-pool" title="Permalink to this headline"></a></h3>
244
<dl class="function">
245
<dt>
246
<code class="descclassname">paddle.v2.networks.</code><code class="descname">text_conv_pool</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
247 248 249 250 251 252 253 254
<dd><p>Text convolution pooling layers helper.</p>
<p>Text input =&gt; Context Projection =&gt; FC Layer =&gt; Pooling =&gt; Output.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of output layer(pooling layer name)</li>
255
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; name of input layer</li>
256 257 258 259 260
<li><strong>context_len</strong> (<em>int</em>) &#8211; context projection length. See
context_projection&#8217;s document.</li>
<li><strong>hidden_size</strong> (<em>int</em>) &#8211; FC Layer size.</li>
<li><strong>context_start</strong> (<em>int</em><em> or </em><em>None</em>) &#8211; context projection length. See
context_projection&#8217;s context_start.</li>
261 262
<li><strong>pool_type</strong> (<em>BasePoolingType.</em>) &#8211; pooling layer type. See pooling_layer&#8217;s document.</li>
<li><strong>context_proj_layer_name</strong> (<em>basestring</em>) &#8211; context projection layer name.
263
None if user don&#8217;t care.</li>
264
<li><strong>context_proj_param_attr</strong> (<em>ParameterAttribute</em><em> or </em><em>None.</em>) &#8211; context projection parameter attribute.
265
None if user don&#8217;t care.</li>
266 267 268
<li><strong>fc_layer_name</strong> (<em>basestring</em>) &#8211; fc layer name. None if user don&#8217;t care.</li>
<li><strong>fc_param_attr</strong> (<em>ParameterAttribute</em><em> or </em><em>None</em>) &#8211; fc layer parameter attribute. None if user don&#8217;t care.</li>
<li><strong>fc_bias_attr</strong> (<em>ParameterAttribute</em><em> or </em><em>None</em>) &#8211; fc bias parameter attribute. False if no bias,
269
None if user don&#8217;t care.</li>
270 271
<li><strong>fc_act</strong> (<em>BaseActivation</em>) &#8211; fc layer activation type. None means tanh</li>
<li><strong>pool_bias_attr</strong> (<em>ParameterAttribute</em><em> or </em><em>None.</em>) &#8211; pooling layer bias attr. None if don&#8217;t care.
272
False if no bias.</li>
273 274 275
<li><strong>fc_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; fc layer extra attribute.</li>
<li><strong>context_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; context projection layer extra attribute.</li>
<li><strong>pool_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; pooling layer extra attribute.</li>
276 277 278 279 280 281
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">output layer name.</p>
</td>
</tr>
282
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
283 284 285 286 287 288 289 290 291 292 293 294
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
<div class="section" id="images">
<h2>Images<a class="headerlink" href="#images" title="Permalink to this headline"></a></h2>
<div class="section" id="img-conv-bn-pool">
<h3>img_conv_bn_pool<a class="headerlink" href="#img-conv-bn-pool" title="Permalink to this headline"></a></h3>
295
<dl class="function">
296
<dt>
297
<code class="descclassname">paddle.v2.networks.</code><code class="descname">img_conv_bn_pool</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
298 299 300 301 302 303 304
<dd><p>Convolution, batch normalization, pooling group.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; group name</li>
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; layer&#8217;s input</li>
<li><strong>filter_size</strong> (<em>int</em>) &#8211; see img_conv_layer&#8217;s document</li>
<li><strong>num_filters</strong> (<em>int</em>) &#8211; see img_conv_layer&#8217;s document</li>
<li><strong>pool_size</strong> (<em>int</em>) &#8211; see img_pool_layer&#8217;s document.</li>
<li><strong>pool_type</strong> (<em>BasePoolingType</em>) &#8211; see img_pool_layer&#8217;s document.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; see batch_norm_layer&#8217;s document.</li>
<li><strong>groups</strong> (<em>int</em>) &#8211; see img_conv_layer&#8217;s document</li>
<li><strong>conv_stride</strong> (<em>int</em>) &#8211; see img_conv_layer&#8217;s document.</li>
<li><strong>conv_padding</strong> (<em>int</em>) &#8211; see img_conv_layer&#8217;s document.</li>
<li><strong>conv_bias_attr</strong> (<em>ParameterAttribute</em>) &#8211; see img_conv_layer&#8217;s document.</li>
<li><strong>num_channel</strong> (<em>int</em>) &#8211; see img_conv_layer&#8217;s document.</li>
<li><strong>conv_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; see img_conv_layer&#8217;s document.</li>
<li><strong>shared_bias</strong> (<em>bool</em>) &#8211; see img_conv_layer&#8217;s document.</li>
<li><strong>conv_layer_attr</strong> (<em>ExtraLayerOutput</em>) &#8211; see img_conv_layer&#8217;s document.</li>
<li><strong>bn_param_attr</strong> (<em>ParameterAttribute.</em>) &#8211; see batch_norm_layer&#8217;s document.</li>
<li><strong>bn_bias_attr</strong> &#8211; see batch_norm_layer&#8217;s document.</li>
<li><strong>bn_layer_attr</strong> &#8211; ParameterAttribute.</li>
<li><strong>pool_stride</strong> (<em>int</em>) &#8211; see img_pool_layer&#8217;s document.</li>
<li><strong>pool_padding</strong> (<em>int</em>) &#8211; see img_pool_layer&#8217;s document.</li>
<li><strong>pool_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; see img_pool_layer&#8217;s document.</li>
325 326 327 328 329 330
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">Layer groups output</p>
</td>
</tr>
331
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
332 333 334 335 336 337 338 339 340
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="img-conv-group">
<h3>img_conv_group<a class="headerlink" href="#img-conv-group" title="Permalink to this headline"></a></h3>
341
<dl class="function">
342
<dt>
343
<code class="descclassname">paddle.v2.networks.</code><code class="descname">img_conv_group</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
344 345 346 347 348 349
<dd><p>Image Convolution Group, Used for vgg net.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
<li><strong>conv_batchnorm_drop_rate</strong> (<em>list</em>) &#8211; if conv_with_batchnorm[i] is true,
conv_batchnorm_drop_rate[i] represents the drop rate of each batch norm.</li>
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; layer&#8217;s input.</li>
<li><strong>conv_num_filter</strong> (<em>int</em>) &#8211; output channels num.</li>
<li><strong>pool_size</strong> (<em>int</em>) &#8211; pooling filter size.</li>
<li><strong>num_channels</strong> (<em>int</em>) &#8211; input channels num.</li>
<li><strong>conv_padding</strong> (<em>int</em>) &#8211; convolution padding size.</li>
<li><strong>conv_filter_size</strong> (<em>int</em>) &#8211; convolution filter size.</li>
<li><strong>conv_act</strong> (<em>BaseActivation</em>) &#8211; activation funciton after convolution.</li>
<li><strong>conv_with_batchnorm</strong> (<em>list</em>) &#8211; conv_with_batchnorm[i] represents
if there is a batch normalization after each convolution.</li>
<li><strong>pool_stride</strong> (<em>int</em>) &#8211; pooling stride size.</li>
<li><strong>pool_type</strong> (<em>BasePoolingType</em>) &#8211; pooling type.</li>
<li><strong>param_attr</strong> (<em>ParameterAttribute</em>) &#8211; Convolution param attribute.
None means default attribute.</li>
365 366 367
</ul>
</td>
</tr>
368 369 370 371
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">Layer&#8217;s output</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Type:</th><td class="field-body"><p class="first last">LayerOutput</p>
372 373 374 375 376 377 378 379 380
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="simple-img-conv-pool">
<span id="api-trainer-config-helpers-network-simple-img-conv-pool"></span><h3>simple_img_conv_pool<a class="headerlink" href="#simple-img-conv-pool" title="Permalink to this headline"></a></h3>
381
<dl class="function">
382
<dt>
383
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_img_conv_pool</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
384 385 386 387 388 389 390 391
<dd><p>Simple image convolution and pooling group.</p>
<p>Input =&gt; conv =&gt; pooling</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; group name</li>
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer name.</li>
<li><strong>filter_size</strong> (<em>int</em>) &#8211; see img_conv_layer for details</li>
<li><strong>num_filters</strong> (<em>int</em>) &#8211; see img_conv_layer for details</li>
<li><strong>pool_size</strong> (<em>int</em>) &#8211; see img_pool_layer for details</li>
<li><strong>pool_type</strong> (<em>BasePoolingType</em>) &#8211; see img_pool_layer for details</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; see img_conv_layer for details</li>
<li><strong>groups</strong> (<em>int</em>) &#8211; see img_conv_layer for details</li>
<li><strong>conv_stride</strong> (<em>int</em>) &#8211; see img_conv_layer for details</li>
<li><strong>conv_padding</strong> (<em>int</em>) &#8211; see img_conv_layer for details</li>
<li><strong>bias_attr</strong> (<em>ParameterAttribute</em>) &#8211; see img_conv_layer for details</li>
<li><strong>num_channel</strong> (<em>int</em>) &#8211; see img_conv_layer for details</li>
<li><strong>param_attr</strong> (<em>ParameterAttribute</em>) &#8211; see img_conv_layer for details</li>
<li><strong>shared_bias</strong> (<em>bool</em>) &#8211; see img_conv_layer for details</li>
<li><strong>conv_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; see img_conv_layer for details</li>
<li><strong>pool_stride</strong> (<em>int</em>) &#8211; see img_pool_layer for details</li>
<li><strong>pool_padding</strong> (<em>int</em>) &#8211; see img_pool_layer for details</li>
<li><strong>pool_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; see img_pool_layer for details</li>
409 410 411 412 413 414
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">Layer&#8217;s output</p>
</td>
</tr>
415
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
416 417 418 419 420 421
</td>
</tr>
</tbody>
</table>
</dd></dl>

422 423 424
</div>
<div class="section" id="small-vgg">
<h3>small_vgg<a class="headerlink" href="#small-vgg" title="Permalink to this headline"></a></h3>
425 426 427
</div>
<div class="section" id="vgg-16-network">
<h3>vgg_16_network<a class="headerlink" href="#vgg-16-network" title="Permalink to this headline"></a></h3>
428
<dl class="function">
429
<dt>
430
<code class="descclassname">paddle.v2.networks.</code><code class="descname">vgg_16_network</code><span class="sig-paren">(</span><em>input_image</em>, <em>num_channels</em>, <em>num_classes=1000</em><span class="sig-paren">)</span></dt>
431 432 433 434 435 436 437
<dd><p>Same model from <a class="reference external" href="https://gist.github.com/ksimonyan/211839e770f7b538e2d8">https://gist.github.com/ksimonyan/211839e770f7b538e2d8</a></p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>num_classes</strong> &#8211; </li>
438
<li><strong>input_image</strong> (<em>LayerOutput</em>) &#8211; </li>
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
<li><strong>num_channels</strong> (<em>int</em>) &#8211; </li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last"></p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
<div class="section" id="recurrent">
<h2>Recurrent<a class="headerlink" href="#recurrent" title="Permalink to this headline"></a></h2>
<div class="section" id="lstm">
<h3>LSTM<a class="headerlink" href="#lstm" title="Permalink to this headline"></a></h3>
<div class="section" id="lstmemory-unit">
<h4>lstmemory_unit<a class="headerlink" href="#lstmemory-unit" title="Permalink to this headline"></a></h4>
458
<dl class="function">
459
<dt>
460
<code class="descclassname">paddle.v2.networks.</code><code class="descname">lstmemory_unit</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
461 462 463
<dd><p>Define calculations that a LSTM unit performs during a single time step.
This function itself is not a recurrent layer, so it can not be
directly used to process sequence inputs. This function is always used in
464 465 466 467 468 469
recurrent_group (see layers.py for more details) to implement attention
mechanism.</p>
<p>Please refer to  <strong>Generating Sequences With Recurrent Neural Networks</strong>
for more details about LSTM. The link goes as follows:
.. _Link: <a class="reference external" href="https://arxiv.org/abs/1308.0850">https://arxiv.org/abs/1308.0850</a></p>
<div class="math">
470
\[ \begin{align}\begin{aligned}i_t &amp; = \sigma(W_{x_i}x_{t} + W_{h_i}h_{t-1} + W_{c_i}c_{t-1} + b_i)\\f_t &amp; = \sigma(W_{x_f}x_{t} + W_{h_f}h_{t-1} + W_{c_f}c_{t-1} + b_f)\\c_t &amp; = f_tc_{t-1} + i_t tanh (W_{x_c}x_t+W_{h_c}h_{t-1} + b_c)\\o_t &amp; = \sigma(W_{x_o}x_{t} + W_{h_o}h_{t-1} + W_{c_o}c_t + b_o)\\h_t &amp; = o_t tanh(c_t)\end{aligned}\end{align} \]</div>
471 472 473
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">lstm_step</span> <span class="o">=</span> <span class="n">lstmemory_unit</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span>
                           <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">,</span>
474 475 476
                           <span class="n">act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">(),</span>
                           <span class="n">gate_act</span><span class="o">=</span><span class="n">SigmoidActivation</span><span class="p">(),</span>
                           <span class="n">state_act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">())</span>
477 478 479 480 481 482 483
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
484
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer name.</li>
485
<li><strong>out_memory</strong> (<em>LayerOutput | None</em>) &#8211; output of previous time step</li>
486 487
<li><strong>name</strong> (<em>basestring</em>) &#8211; lstmemory unit name.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; lstmemory unit size.</li>
488 489 490 491
<li><strong>param_attr</strong> (<em>ParameterAttribute</em>) &#8211; Parameter config, None if use default.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; lstm final activiation type</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; lstm gate activiation type</li>
<li><strong>state_act</strong> (<em>BaseActivation</em>) &#8211; lstm state activiation type.</li>
492
<li><strong>input_proj_bias_attr</strong> (<em>ParameterAttribute|False|None</em>) &#8211; bias attribute for input-to-hidden projection.
493
False means no bias, None means default bias.</li>
494 495
<li><strong>input_proj_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; extra layer attribute for input to hidden
projection of the LSTM unit, such as dropout, error clipping.</li>
496
<li><strong>lstm_bias_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; bias parameter attribute of lstm layer.
497
False means no bias, None means default bias.</li>
498
<li><strong>lstm_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; lstm layer&#8217;s extra attribute.</li>
499 500 501 502 503 504
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">lstmemory unit name.</p>
</td>
</tr>
505
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
506 507 508 509 510 511 512 513 514
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="lstmemory-group">
<h4>lstmemory_group<a class="headerlink" href="#lstmemory-group" title="Permalink to this headline"></a></h4>
515
<dl class="function">
516
<dt>
517
<code class="descclassname">paddle.v2.networks.</code><code class="descname">lstmemory_group</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
518
<dd><p>lstm_group is a recurrent_group version of Long Short Term Memory. It
519 520 521 522 523 524 525 526 527
does exactly the same calculation as the lstmemory layer (see lstmemory in
layers.py for the maths) does. A promising benefit is that LSTM memory
cell states, or hidden states in every time step are accessible to the
user. This is especially useful in attention model. If you do not need to
access the internal states of the lstm, but merely use its outputs,
it is recommended to use the lstmemory, which is relatively faster than
lstmemory_group.</p>
<p>NOTE: In PaddlePaddle&#8217;s implementation, the following input-to-hidden
multiplications:
528 529
<span class="math">\(W_{x_i}x_{t}\)</span> , <span class="math">\(W_{x_f}x_{t}\)</span>,
<span class="math">\(W_{x_c}x_t\)</span>, <span class="math">\(W_{x_o}x_{t}\)</span> are not done in lstmemory_unit to
530
speed up the calculations. Consequently, an additional mixed_layer with
531 532 533 534
full_matrix_projection must be included before lstmemory_unit is called.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">lstm_step</span> <span class="o">=</span> <span class="n">lstmemory_group</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span>
                            <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">,</span>
535 536 537
                            <span class="n">act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">(),</span>
                            <span class="n">gate_act</span><span class="o">=</span><span class="n">SigmoidActivation</span><span class="p">(),</span>
                            <span class="n">state_act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">())</span>
538 539 540 541 542 543 544
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
545
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer name.</li>
546
<li><strong>size</strong> (<em>int</em>) &#8211; lstmemory group size.</li>
547
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the lstmemory group.</li>
548
<li><strong>out_memory</strong> (<em>LayerOutput | None</em>) &#8211; output of previous time step</li>
549
<li><strong>reverse</strong> (<em>bool</em>) &#8211; is lstm reversed</li>
550 551 552 553 554
<li><strong>param_attr</strong> (<em>ParameterAttribute</em>) &#8211; Parameter config, None if use default.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; lstm final activiation type</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; lstm gate activiation type</li>
<li><strong>state_act</strong> (<em>BaseActivation</em>) &#8211; lstm state activiation type.</li>
<li><strong>lstm_bias_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; bias parameter attribute of lstm layer.
555
False means no bias, None means default bias.</li>
556 557 558 559
<li><strong>input_proj_bias_attr</strong> (<em>ParameterAttribute|False|None</em>) &#8211; bias attribute for input-to-hidden projection.
False means no bias, None means default bias.</li>
<li><strong>input_proj_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; extra layer attribute for input to hidden
projection of the LSTM unit, such as dropout, error clipping.</li>
560
<li><strong>lstm_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; lstm layer&#8217;s extra attribute.</li>
561 562 563 564 565 566
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the lstmemory group.</p>
</td>
</tr>
567
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
568 569 570 571 572 573 574 575 576
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="simple-lstm">
<h4>simple_lstm<a class="headerlink" href="#simple-lstm" title="Permalink to this headline"></a></h4>
577
<dl class="function">
578
<dt>
579
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_lstm</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
580 581 582 583 584 585 586 587 588 589 590 591 592
<dd><p>Simple LSTM Cell.</p>
<p>It just combine a mixed layer with fully_matrix_projection and a lstmemory
layer. The simple lstm cell was implemented as follow equations.</p>
<div class="math">
\[ \begin{align}\begin{aligned}i_t &amp; = \sigma(W_{xi}x_{t} + W_{hi}h_{t-1} + W_{ci}c_{t-1} + b_i)\\f_t &amp; = \sigma(W_{xf}x_{t} + W_{hf}h_{t-1} + W_{cf}c_{t-1} + b_f)\\c_t &amp; = f_tc_{t-1} + i_t tanh (W_{xc}x_t+W_{hc}h_{t-1} + b_c)\\o_t &amp; = \sigma(W_{xo}x_{t} + W_{ho}h_{t-1} + W_{co}c_t + b_o)\\h_t &amp; = o_t tanh(c_t)\end{aligned}\end{align} \]</div>
<p>Please refer <strong>Generating Sequences With Recurrent Neural Networks</strong> if you
want to know what lstm is. <a class="reference external" href="http://arxiv.org/abs/1308.0850">Link</a> is here.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; lstm layer name.</li>
593
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer name.</li>
594 595
<li><strong>size</strong> (<em>int</em>) &#8211; lstm layer size.</li>
<li><strong>reverse</strong> (<em>bool</em>) &#8211; whether to process the input data in a reverse order</li>
596 597
<li><strong>mat_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; mixed layer&#8217;s matrix projection parameter attribute.</li>
<li><strong>bias_param_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; bias parameter attribute. False means no bias, None
598
means default bias.</li>
599 600 601 602 603 604
<li><strong>inner_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; lstm cell parameter attribute.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; lstm final activiation type</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; lstm gate activiation type</li>
<li><strong>state_act</strong> (<em>BaseActivation</em>) &#8211; lstm state activiation type.</li>
<li><strong>mixed_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; mixed layer&#8217;s extra attribute.</li>
<li><strong>lstm_cell_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; lstm layer&#8217;s extra attribute.</li>
605 606 607 608 609 610
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">lstm layer name.</p>
</td>
</tr>
611
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
612 613 614 615 616 617 618 619 620
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="bidirectional-lstm">
<h4>bidirectional_lstm<a class="headerlink" href="#bidirectional-lstm" title="Permalink to this headline"></a></h4>
621
<dl class="function">
622
<dt>
623
<code class="descclassname">paddle.v2.networks.</code><code class="descname">bidirectional_lstm</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
<dd><p>A bidirectional_lstm is a recurrent unit that iterates over the input
sequence both in forward and bardward orders, and then concatenate two
outputs form a final output. However, concatenation of two outputs
is not the only way to form the final output, you can also, for example,
just add them together.</p>
<p>Please refer to  <strong>Neural Machine Translation by Jointly Learning to Align
and Translate</strong> for more details about the bidirectional lstm.
The link goes as follows:
.. _Link: <a class="reference external" href="https://arxiv.org/pdf/1409.0473v3.pdf">https://arxiv.org/pdf/1409.0473v3.pdf</a></p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">bi_lstm</span> <span class="o">=</span> <span class="n">bidirectional_lstm</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">input1</span><span class="p">],</span> <span class="n">size</span><span class="o">=</span><span class="mi">512</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; bidirectional lstm layer name.</li>
643
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
644 645 646 647 648 649 650 651 652
<li><strong>size</strong> (<em>int</em>) &#8211; lstm layer size.</li>
<li><strong>return_seq</strong> (<em>bool</em>) &#8211; If set False, outputs of the last time step are
concatenated and returned.
If set True, the entire output sequences that are
processed in forward and backward directions are
concatenated and returned.</li>
</ul>
</td>
</tr>
653
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">LayerOutput object accroding to the return_seq.</p>
654 655
</td>
</tr>
656
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
657 658 659 660 661 662 663 664 665 666 667 668
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
<div class="section" id="gru">
<h3>GRU<a class="headerlink" href="#gru" title="Permalink to this headline"></a></h3>
<div class="section" id="gru-unit">
<h4>gru_unit<a class="headerlink" href="#gru-unit" title="Permalink to this headline"></a></h4>
669
<dl class="function">
670
<dt>
671
<code class="descclassname">paddle.v2.networks.</code><code class="descname">gru_unit</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
672
<dd><p>Define calculations that a gated recurrent unit performs in a single time
673 674
step. This function itself is not a recurrent layer, so it can not be
directly used to process sequence inputs. This function is always used in
675 676 677 678 679 680 681 682
the recurrent_group (see layers.py for more details) to implement attention
mechanism.</p>
<p>Please see grumemory in layers.py for the details about the maths.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
683
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer name.</li>
684
<li><strong>memory_boot</strong> (<em>LayerOutput | None</em>) &#8211; the initialization state of the LSTM cell.</li>
685 686
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the gru group.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; hidden size of the gru.</li>
687 688 689
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; type of the activation</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; type of the gate activation</li>
<li><strong>gru_layer_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; Extra parameter attribute of the gru layer.</li>
690 691 692 693 694 695
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the gru output layer.</p>
</td>
</tr>
696
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
697 698 699 700 701 702 703 704 705
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="gru-group">
<h4>gru_group<a class="headerlink" href="#gru-group" title="Permalink to this headline"></a></h4>
706
<dl class="function">
707
<dt>
708
<code class="descclassname">paddle.v2.networks.</code><code class="descname">gru_group</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
709
<dd><p>gru_group is a recurrent_group version of Gated Recurrent Unit. It
710 711 712 713 714 715 716 717 718
does exactly the same calculation as the grumemory layer does. A promising
benefit is that gru hidden states are accessible to the user. This is
especially useful in attention model. If you do not need to access
any internal state, but merely use the outputs of a GRU, it is recommended
to use the grumemory, which is relatively faster.</p>
<p>Please see grumemory in layers.py for more detail about the maths.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">gru</span> <span class="o">=</span> <span class="n">gur_group</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span>
                <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">,</span>
719 720
                <span class="n">act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">(),</span>
                <span class="n">gate_act</span><span class="o">=</span><span class="n">SigmoidActivation</span><span class="p">())</span>
721 722 723 724 725 726 727
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
728
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer name.</li>
729
<li><strong>memory_boot</strong> (<em>LayerOutput | None</em>) &#8211; the initialization state of the LSTM cell.</li>
730 731 732
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the gru group.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; hidden size of the gru.</li>
<li><strong>reverse</strong> (<em>bool</em>) &#8211; whether to process the input data in a reverse order</li>
733 734 735 736
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; type of the activiation</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; type of the gate activiation</li>
<li><strong>gru_bias_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; bias. False means no bias, None means default bias.</li>
<li><strong>gru_layer_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; Extra parameter attribute of the gru layer.</li>
737 738 739 740 741 742
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the gru group.</p>
</td>
</tr>
743
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
744 745 746 747 748 749 750 751 752
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="simple-gru">
<h4>simple_gru<a class="headerlink" href="#simple-gru" title="Permalink to this headline"></a></h4>
753
<dl class="function">
754
<dt>
755 756
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_gru</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>You maybe see gru_step_layer, grumemory in layers.py, gru_unit, gru_group,
757 758 759
simple_gru in network.py. The reason why there are so many interfaces is
that we have two ways to implement recurrent neural network. One way is to
use one complete layer to implement rnn (including simple rnn, gru and lstm)
760
with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But,
761 762 763 764 765 766
the multiplication operation <span class="math">\(W x_t\)</span> is not computed in these layers.
See details in their interfaces in layers.py.
The other implementation is to use an recurrent group which can ensemble a
series of layers to compute rnn step by step. This way is flexible for
attenion mechanism or other complex connections.</p>
<ul class="simple">
767
<li>gru_step_layer: only compute rnn by one step. It needs an memory as input
768
and can be used in recurrent group.</li>
769
<li>gru_unit: a wrapper of gru_step_layer with memory.</li>
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
<li>gru_group: a GRU cell implemented by a combination of multiple layers in
recurrent group.
But <span class="math">\(W x_t\)</span> is not done in group.</li>
<li>gru_memory: a GRU cell implemented by one layer, which does same calculation
with gru_group and is faster than gru_group.</li>
<li>simple_gru: a complete GRU implementation inlcuding <span class="math">\(W x_t\)</span> and
gru_group. <span class="math">\(W\)</span> contains <span class="math">\(W_r\)</span>, <span class="math">\(W_z\)</span> and <span class="math">\(W\)</span>, see
formula in grumemory.</li>
</ul>
<p>The computational speed is that, grumemory is relatively better than
gru_group, and gru_group is relatively better than simple_gru.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">gru</span> <span class="o">=</span> <span class="n">simple_gru</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span> <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
790
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer name.</li>
791 792 793
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the gru group.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; hidden size of the gru.</li>
<li><strong>reverse</strong> (<em>bool</em>) &#8211; whether to process the input data in a reverse order</li>
794 795 796 797
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; type of the activiation</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; type of the gate activiation</li>
<li><strong>gru_bias_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; bias. False means no bias, None means default bias.</li>
<li><strong>gru_layer_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; Extra parameter attribute of the gru layer.</li>
798 799 800 801 802 803
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the gru group.</p>
</td>
</tr>
804
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
805 806 807 808 809 810 811 812 813
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="simple-gru2">
<h4>simple_gru2<a class="headerlink" href="#simple-gru2" title="Permalink to this headline"></a></h4>
814
<dl class="function">
815
<dt>
816
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_gru2</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
817 818 819 820 821 822 823 824 825 826 827 828
<dd><p>simple_gru2 is the same with simple_gru, but using grumemory instead
Please see grumemory in layers.py for more detail about the maths.
simple_gru2 is faster than simple_gru.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">gru</span> <span class="o">=</span> <span class="n">simple_gru2</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span> <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
829
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer name.</li>
830 831 832
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the gru group.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; hidden size of the gru.</li>
<li><strong>reverse</strong> (<em>bool</em>) &#8211; whether to process the input data in a reverse order</li>
833 834 835 836
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; type of the activiation</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; type of the gate activiation</li>
<li><strong>gru_bias_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; bias. False means no bias, None means default bias.</li>
<li><strong>gru_layer_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; Extra parameter attribute of the gru layer.</li>
837 838 839 840 841 842
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the gru group.</p>
</td>
</tr>
843
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
844 845 846 847 848 849 850 851 852
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="bidirectional-gru">
<h4>bidirectional_gru<a class="headerlink" href="#bidirectional-gru" title="Permalink to this headline"></a></h4>
853
<dl class="function">
854
<dt>
855
<code class="descclassname">paddle.v2.networks.</code><code class="descname">bidirectional_gru</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
<dd><p>A bidirectional_gru is a recurrent unit that iterates over the input
sequence both in forward and bardward orders, and then concatenate two
outputs to form a final output. However, concatenation of two outputs
is not the only way to form the final output, you can also, for example,
just add them together.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">bi_gru</span> <span class="o">=</span> <span class="n">bidirectional_gru</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">input1</span><span class="p">],</span> <span class="n">size</span><span class="o">=</span><span class="mi">512</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; bidirectional gru layer name.</li>
871
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
872 873 874 875 876 877 878 879 880
<li><strong>size</strong> (<em>int</em>) &#8211; gru layer size.</li>
<li><strong>return_seq</strong> (<em>bool</em>) &#8211; If set False, outputs of the last time step are
concatenated and returned.
If set True, the entire output sequences that are
processed in forward and backward directions are
concatenated and returned.</li>
</ul>
</td>
</tr>
881
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">LayerOutput object.</p>
882 883
</td>
</tr>
884
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
885 886 887 888 889 890 891 892 893 894
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
<div class="section" id="simple-attention">
<h3>simple_attention<a class="headerlink" href="#simple-attention" title="Permalink to this headline"></a></h3>
895
<dl class="function">
896
<dt>
897
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_attention</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
<dd><p>Calculate and then return a context vector by attention machanism.
Size of the context vector equals to size of the encoded_sequence.</p>
<div class="math">
\[ \begin{align}\begin{aligned}a(s_{i-1},h_{j}) &amp; = v_{a}f(W_{a}s_{t-1} + U_{a}h_{j})\\e_{i,j} &amp; = a(s_{i-1}, h_{j})\\a_{i,j} &amp; = \frac{exp(e_{i,j})}{\sum_{k=1}^{T_x}{exp(e_{i,k})}}\\c_{i} &amp; = \sum_{j=1}^{T_{x}}a_{i,j}h_{j}\end{aligned}\end{align} \]</div>
<p>where <span class="math">\(h_{j}\)</span> is the jth element of encoded_sequence,
<span class="math">\(U_{a}h_{j}\)</span> is the jth element of encoded_proj
<span class="math">\(s_{i-1}\)</span> is decoder_state
<span class="math">\(f\)</span> is weight_act, and is set to tanh by default.</p>
<p>Please refer to <strong>Neural Machine Translation by Jointly Learning to
Align and Translate</strong> for more details. The link is as follows:
<a class="reference external" href="https://arxiv.org/abs/1409.0473">https://arxiv.org/abs/1409.0473</a>.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">context</span> <span class="o">=</span> <span class="n">simple_attention</span><span class="p">(</span><span class="n">encoded_sequence</span><span class="o">=</span><span class="n">enc_seq</span><span class="p">,</span>
                           <span class="n">encoded_proj</span><span class="o">=</span><span class="n">enc_proj</span><span class="p">,</span>
                           <span class="n">decoder_state</span><span class="o">=</span><span class="n">decoder_prev</span><span class="p">,)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the attention model.</li>
921
<li><strong>softmax_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; parameter attribute of sequence softmax
922 923
that is used to produce attention weight</li>
<li><strong>weight_act</strong> (<em>Activation</em>) &#8211; activation of the attention model</li>
924 925
<li><strong>encoded_sequence</strong> (<em>LayerOutput</em>) &#8211; output of the encoder</li>
<li><strong>encoded_proj</strong> (<em>LayerOutput</em>) &#8211; attention weight is computed by a feed forward neural
926 927 928 929 930
network which has two inputs : decoder&#8217;s hidden state
of previous time step and encoder&#8217;s output.
encoded_proj is output of the feed-forward network for
encoder&#8217;s output. Here we pre-compute it outside
simple_attention for speed consideration.</li>
931 932
<li><strong>decoder_state</strong> (<em>LayerOutput</em>) &#8211; hidden state of decoder in previous time step</li>
<li><strong>transform_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; parameter attribute of the feed-forward
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
network that takes decoder_state as inputs to
compute attention weight.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">a context vector</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
</div>


           </div>
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="attr.html" class="btn btn-neutral float-right" title="Parameter Attribute" accesskey="n">Next <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="pooling.html" class="btn btn-neutral" title="Pooling" accesskey="p"><span class="fa fa-arrow-circle-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <p>
        &copy; Copyright 2016, PaddlePaddle developers.

    </p>
  </div>
  Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  


  

    <script type="text/javascript">
        var DOCUMENTATION_OPTIONS = {
            URL_ROOT:'../../../',
            VERSION:'',
            COLLAPSE_INDEX:false,
            FILE_SUFFIX:'.html',
993 994
            HAS_SOURCE:  true,
            SOURCELINK_SUFFIX: ".txt",
995 996 997 998 999
        };
    </script>
      <script type="text/javascript" src="../../../_static/jquery.js"></script>
      <script type="text/javascript" src="../../../_static/underscore.js"></script>
      <script type="text/javascript" src="../../../_static/doctools.js"></script>
1000
      <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
       
  

  
  
    <script type="text/javascript" src="../../../_static/js/theme.js"></script>
  
  
  <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
  <script src="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/js/perfect-scrollbar.jquery.min.js"></script>
  <script src="../../../_static/js/paddle_doc_init.js"></script> 

</body>
</html>