networks.html 60.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Networks &mdash; PaddlePaddle  documentation</title>
  

  
  

  

  
  
    

  

  
  
    <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  

  
  
        <link rel="index" title="Index"
              href="../../genindex.html"/>
        <link rel="search" title="Search" href="../../search.html"/>
    <link rel="top" title="PaddlePaddle  documentation" href="../../index.html"/>
        <link rel="up" title="Model Configuration" href="../model_configs.html"/>
        <link rel="next" title="Parameter Attribute" href="attr.html"/>
        <link rel="prev" title="Pooling" href="pooling.html"/> 

  <link rel="stylesheet" href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/override.css" type="text/css" />
  <script>
  var _hmt = _hmt || [];
  (function() {
    var hm = document.createElement("script");
    hm.src = "//hm.baidu.com/hm.js?b9a314ab40d04d805655aab1deee08ba";
    var s = document.getElementsByTagName("script")[0]; 
    s.parentNode.insertBefore(hm, s);
  })();
  </script>

  

  
  <script src="../../_static/js/modernizr.min.js"></script>

</head>

<body class="wy-body-for-nav" role="document">

  
  <header class="site-header">
    <div class="site-logo">
      <a href="/"><img src="../../_static/images/PP_w.png"></a>
    </div>
    <div class="site-nav-links">
      <div class="site-menu">
        <a class="fork-on-github" href="https://github.com/PaddlePaddle/Paddle" target="_blank"><i class="fa fa-github"></i>Fork me on Github</a>
        <div class="language-switcher dropdown">
          <a type="button" data-toggle="dropdown">
            <span>English</span>
            <i class="fa fa-angle-up"></i>
            <i class="fa fa-angle-down"></i>
          </a>
          <ul class="dropdown-menu">
            <li><a href="/doc_cn">中文</a></li>
            <li><a href="/doc">English</a></li>
          </ul>
        </div>
        <ul class="site-page-links">
          <li><a href="/">Home</a></li>
        </ul>
      </div>
      <div class="doc-module">
        
        <ul class="current">
87
<li class="toctree-l1"><a class="reference internal" href="../../overview.html">API Overview</a></li>
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
<li class="toctree-l1 current"><a class="reference internal" href="../model_configs.html">Model Configuration</a></li>
<li class="toctree-l1"><a class="reference internal" href="../data.html">Data Reader Interface and DataSets</a></li>
<li class="toctree-l1"><a class="reference internal" href="../run_logic.html">Training and Inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../fluid.html">Fluid</a></li>
</ul>

        
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>        
      </div>
    </div>
  </header>
  
  <div class="main-content-wrap">

    
    <nav class="doc-menu-vertical" role="navigation">
        
          
          <ul class="current">
113
<li class="toctree-l1"><a class="reference internal" href="../../overview.html">API Overview</a></li>
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
<li class="toctree-l1 current"><a class="reference internal" href="../model_configs.html">Model Configuration</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="activation.html">Activation</a></li>
<li class="toctree-l2"><a class="reference internal" href="layer.html">Layers</a></li>
<li class="toctree-l2"><a class="reference internal" href="evaluators.html">Evaluators</a></li>
<li class="toctree-l2"><a class="reference internal" href="optimizer.html">Optimizer</a></li>
<li class="toctree-l2"><a class="reference internal" href="pooling.html">Pooling</a></li>
<li class="toctree-l2 current"><a class="current reference internal" href="#">Networks</a></li>
<li class="toctree-l2"><a class="reference internal" href="attr.html">Parameter Attribute</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../data.html">Data Reader Interface and DataSets</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../data/data_reader.html">Data Reader Interface</a></li>
<li class="toctree-l2"><a class="reference internal" href="../data/image.html">Image Interface</a></li>
<li class="toctree-l2"><a class="reference internal" href="../data/dataset.html">Dataset</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../run_logic.html">Training and Inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../fluid.html">Fluid</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../fluid/layers.html">layers</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/data_feeder.html">data_feeder</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/executor.html">executor</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/initializer.html">initializer</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/evaluator.html">evaluator</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/nets.html">nets</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/optimizer.html">optimizer</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/param_attr.html">param_attr</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/profiler.html">profiler</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/regularizer.html">regularizer</a></li>
<li class="toctree-l2"><a class="reference internal" href="../fluid/io.html">io</a></li>
</ul>
</li>
</ul>

        
    </nav>
    
    <section class="doc-content-wrap">

      

 







<div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
      
        <li><a href="../model_configs.html">Model Configuration</a> > </li>
      
    <li>Networks</li>
  </ul>
</div>
      
      <div class="wy-nav-content" id="doc-content">
        <div class="rst-content">
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="networks">
<h1>Networks<a class="headerlink" href="#networks" title="Permalink to this headline"></a></h1>
<p>The v2.networks module contains pieces of neural network that combine multiple layers.</p>
<div class="section" id="nlp">
<h2>NLP<a class="headerlink" href="#nlp" title="Permalink to this headline"></a></h2>
<div class="section" id="sequence-conv-pool">
<h3>sequence_conv_pool<a class="headerlink" href="#sequence-conv-pool" title="Permalink to this headline"></a></h3>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">sequence_conv_pool</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>Text convolution pooling group.</p>
<p>Text input =&gt; Context Projection =&gt; FC Layer =&gt; Pooling =&gt; Output.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; group name.</li>
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>context_len</strong> (<em>int</em>) &#8211; context projection length. See
context_projection&#8217;s document.</li>
<li><strong>hidden_size</strong> (<em>int</em>) &#8211; FC Layer size.</li>
<li><strong>context_start</strong> (<em>int|None</em>) &#8211; context start position. See
context_projection&#8217;s context_start.</li>
<li><strong>pool_type</strong> (<em>BasePoolingType</em>) &#8211; pooling layer type. See pooling_layer&#8217;s document.</li>
<li><strong>context_proj_layer_name</strong> (<em>basestring</em>) &#8211; context projection layer name.
None if user don&#8217;t care.</li>
<li><strong>context_proj_param_attr</strong> (<em>ParameterAttribute|None</em>) &#8211; padding parameter attribute of context projection layer.
If false, it means padding always be zero.</li>
<li><strong>fc_layer_name</strong> (<em>basestring</em>) &#8211; fc layer name. None if user don&#8217;t care.</li>
<li><strong>fc_param_attr</strong> (<em>ParameterAttribute|None</em>) &#8211; fc layer parameter attribute. None if user don&#8217;t care.</li>
<li><strong>fc_bias_attr</strong> (<em>ParameterAttribute|False|None</em>) &#8211; fc bias parameter attribute. False if no bias,
None if user don&#8217;t care.</li>
<li><strong>fc_act</strong> (<em>BaseActivation</em>) &#8211; fc layer activation type. None means tanh.</li>
<li><strong>pool_bias_attr</strong> (<em>ParameterAttribute|False|None</em>) &#8211; pooling layer bias attr. False if no bias.
None if user don&#8217;t care.</li>
<li><strong>fc_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; fc layer extra attribute.</li>
<li><strong>context_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; context projection layer extra attribute.</li>
<li><strong>pool_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; pooling layer extra attribute.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">layer&#8217;s output.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="text-conv-pool">
<span id="api-trainer-config-helpers-network-text-conv-pool"></span><h3>text_conv_pool<a class="headerlink" href="#text-conv-pool" title="Permalink to this headline"></a></h3>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">text_conv_pool</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>Text convolution pooling group.</p>
<p>Text input =&gt; Context Projection =&gt; FC Layer =&gt; Pooling =&gt; Output.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; group name.</li>
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>context_len</strong> (<em>int</em>) &#8211; context projection length. See
context_projection&#8217;s document.</li>
<li><strong>hidden_size</strong> (<em>int</em>) &#8211; FC Layer size.</li>
<li><strong>context_start</strong> (<em>int|None</em>) &#8211; context start position. See
context_projection&#8217;s context_start.</li>
<li><strong>pool_type</strong> (<em>BasePoolingType</em>) &#8211; pooling layer type. See pooling_layer&#8217;s document.</li>
<li><strong>context_proj_layer_name</strong> (<em>basestring</em>) &#8211; context projection layer name.
None if user don&#8217;t care.</li>
<li><strong>context_proj_param_attr</strong> (<em>ParameterAttribute|None</em>) &#8211; padding parameter attribute of context projection layer.
If false, it means padding always be zero.</li>
<li><strong>fc_layer_name</strong> (<em>basestring</em>) &#8211; fc layer name. None if user don&#8217;t care.</li>
<li><strong>fc_param_attr</strong> (<em>ParameterAttribute|None</em>) &#8211; fc layer parameter attribute. None if user don&#8217;t care.</li>
<li><strong>fc_bias_attr</strong> (<em>ParameterAttribute|False|None</em>) &#8211; fc bias parameter attribute. False if no bias,
None if user don&#8217;t care.</li>
<li><strong>fc_act</strong> (<em>BaseActivation</em>) &#8211; fc layer activation type. None means tanh.</li>
<li><strong>pool_bias_attr</strong> (<em>ParameterAttribute|False|None</em>) &#8211; pooling layer bias attr. False if no bias.
None if user don&#8217;t care.</li>
<li><strong>fc_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; fc layer extra attribute.</li>
<li><strong>context_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; context projection layer extra attribute.</li>
<li><strong>pool_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; pooling layer extra attribute.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">layer&#8217;s output.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
<div class="section" id="images">
<h2>Images<a class="headerlink" href="#images" title="Permalink to this headline"></a></h2>
<div class="section" id="img-conv-bn-pool">
<h3>img_conv_bn_pool<a class="headerlink" href="#img-conv-bn-pool" title="Permalink to this headline"></a></h3>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">img_conv_bn_pool</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>Convolution, batch normalization, pooling group.</p>
<p>Img input =&gt; Conv =&gt; BN =&gt; Pooling =&gt; Output.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; group name.</li>
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>filter_size</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>num_filters</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>pool_size</strong> (<em>int</em>) &#8211; see img_pool_layer for details.</li>
<li><strong>pool_type</strong> (<em>BasePoolingType</em>) &#8211; see img_pool_layer for details.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; see batch_norm_layer for details.</li>
<li><strong>groups</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>conv_stride</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>conv_padding</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>conv_bias_attr</strong> (<em>ParameterAttribute</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>num_channel</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>conv_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>shared_bias</strong> (<em>bool</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>conv_layer_attr</strong> (<em>ExtraLayerOutput</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>bn_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; see batch_norm_layer for details.</li>
<li><strong>bn_bias_attr</strong> (<em>ParameterAttribute</em>) &#8211; see batch_norm_layer for details.</li>
<li><strong>bn_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; see batch_norm_layer for details.</li>
<li><strong>pool_stride</strong> (<em>int</em>) &#8211; see img_pool_layer for details.</li>
<li><strong>pool_padding</strong> (<em>int</em>) &#8211; see img_pool_layer for details.</li>
<li><strong>pool_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; see img_pool_layer for details.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">layer&#8217;s output</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="img-conv-group">
<h3>img_conv_group<a class="headerlink" href="#img-conv-group" title="Permalink to this headline"></a></h3>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">img_conv_group</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>Image Convolution Group, Used for vgg net.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>conv_batchnorm_drop_rate</strong> (<em>list</em>) &#8211; if conv_with_batchnorm[i] is true,
conv_batchnorm_drop_rate[i] represents the drop rate of each batch norm.</li>
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>conv_num_filter</strong> (<em>list|tuple</em>) &#8211; list of output channels num.</li>
<li><strong>pool_size</strong> (<em>int</em>) &#8211; pooling filter size.</li>
<li><strong>num_channels</strong> (<em>int</em>) &#8211; input channels num.</li>
<li><strong>conv_padding</strong> (<em>int</em>) &#8211; convolution padding size.</li>
<li><strong>conv_filter_size</strong> (<em>int</em>) &#8211; convolution filter size.</li>
<li><strong>conv_act</strong> (<em>BaseActivation</em>) &#8211; activation funciton after convolution.</li>
<li><strong>conv_with_batchnorm</strong> (<em>list</em>) &#8211; if conv_with_batchnorm[i] is true,
there is a batch normalization operation after each convolution.</li>
<li><strong>pool_stride</strong> (<em>int</em>) &#8211; pooling stride size.</li>
<li><strong>pool_type</strong> (<em>BasePoolingType</em>) &#8211; pooling type.</li>
<li><strong>param_attr</strong> (<em>ParameterAttribute</em>) &#8211; param attribute of convolution layer,
None means default attribute.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">layer&#8217;s output</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="simple-img-conv-pool">
<span id="api-trainer-config-helpers-network-simple-img-conv-pool"></span><h3>simple_img_conv_pool<a class="headerlink" href="#simple-img-conv-pool" title="Permalink to this headline"></a></h3>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_img_conv_pool</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>Simple image convolution and pooling group.</p>
<p>Img input =&gt; Conv =&gt; Pooling =&gt; Output.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; group name.</li>
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>filter_size</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>num_filters</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>pool_size</strong> (<em>int</em>) &#8211; see img_pool_layer for details.</li>
<li><strong>pool_type</strong> (<em>BasePoolingType</em>) &#8211; see img_pool_layer for details.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>groups</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>conv_stride</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>conv_padding</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>bias_attr</strong> (<em>ParameterAttribute</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>num_channel</strong> (<em>int</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>param_attr</strong> (<em>ParameterAttribute</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>shared_bias</strong> (<em>bool</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>conv_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; see img_conv_layer for details.</li>
<li><strong>pool_stride</strong> (<em>int</em>) &#8211; see img_pool_layer for details.</li>
<li><strong>pool_padding</strong> (<em>int</em>) &#8211; see img_pool_layer for details.</li>
<li><strong>pool_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; see img_pool_layer for details.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">layer&#8217;s output</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="small-vgg">
<h3>small_vgg<a class="headerlink" href="#small-vgg" title="Permalink to this headline"></a></h3>
</div>
<div class="section" id="vgg-16-network">
<h3>vgg_16_network<a class="headerlink" href="#vgg-16-network" title="Permalink to this headline"></a></h3>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">vgg_16_network</code><span class="sig-paren">(</span><em>input_image</em>, <em>num_channels</em>, <em>num_classes=1000</em><span class="sig-paren">)</span></dt>
<dd><p>Same model from <a class="reference external" href="https://gist.github.com/ksimonyan/211839e770f7b538e2d8">https://gist.github.com/ksimonyan/211839e770f7b538e2d8</a></p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>num_classes</strong> (<em>int</em>) &#8211; number of class.</li>
<li><strong>input_image</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>num_channels</strong> (<em>int</em>) &#8211; input channels num.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">layer&#8217;s output</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
<div class="section" id="recurrent">
<h2>Recurrent<a class="headerlink" href="#recurrent" title="Permalink to this headline"></a></h2>
<div class="section" id="lstm">
<h3>LSTM<a class="headerlink" href="#lstm" title="Permalink to this headline"></a></h3>
<div class="section" id="lstmemory-unit">
<h4>lstmemory_unit<a class="headerlink" href="#lstmemory-unit" title="Permalink to this headline"></a></h4>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">lstmemory_unit</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>lstmemory_unit defines the caculation process of a LSTM unit during a
single time step. This function is not a recurrent layer, so it can not be
directly used to process sequence input. This function is always used in
recurrent_group (see layers.py for more details) to implement attention
mechanism.</p>
<p>Please refer to  <strong>Generating Sequences With Recurrent Neural Networks</strong>
for more details about LSTM. The link goes as follows:
.. _Link: <a class="reference external" href="https://arxiv.org/abs/1308.0850">https://arxiv.org/abs/1308.0850</a></p>
<div class="math">
\[ \begin{align}\begin{aligned}i_t &amp; = \sigma(W_{x_i}x_{t} + W_{h_i}h_{t-1} + W_{c_i}c_{t-1} + b_i)\\f_t &amp; = \sigma(W_{x_f}x_{t} + W_{h_f}h_{t-1} + W_{c_f}c_{t-1} + b_f)\\c_t &amp; = f_tc_{t-1} + i_t tanh (W_{x_c}x_t+W_{h_c}h_{t-1} + b_c)\\o_t &amp; = \sigma(W_{x_o}x_{t} + W_{h_o}h_{t-1} + W_{c_o}c_t + b_o)\\h_t &amp; = o_t tanh(c_t)\end{aligned}\end{align} \]</div>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">lstm_step</span> <span class="o">=</span> <span class="n">lstmemory_unit</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span>
                           <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">,</span>
                           <span class="n">act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">(),</span>
                           <span class="n">gate_act</span><span class="o">=</span><span class="n">SigmoidActivation</span><span class="p">(),</span>
                           <span class="n">state_act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">())</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; Input layer.</li>
<li><strong>out_memory</strong> (<em>LayerOutput | None</em>) &#8211; The output of previous time step.</li>
<li><strong>name</strong> (<em>basestring</em>) &#8211; The lstmemory unit name.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; The lstmemory unit size.</li>
<li><strong>param_attr</strong> (<em>ParameterAttribute</em>) &#8211; The parameter attribute for the weights in
input to hidden projection.
None means default attribute.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; The last activiation type of lstm.</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; The gate activiation type of lstm.</li>
<li><strong>state_act</strong> (<em>BaseActivation</em>) &#8211; The state activiation type of lstm.</li>
<li><strong>input_proj_bias_attr</strong> (<em>ParameterAttribute|bool|None</em>) &#8211; The parameter attribute for the bias in
input to hidden projection.
False or None means no bias.
If this parameter is set to True,
the bias is initialized to zero.</li>
<li><strong>input_proj_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; The extra layer attribute for
input to hidden projection of the LSTM unit,
such as dropout, error clipping.</li>
<li><strong>lstm_bias_attr</strong> (<em>ParameterAttribute|True|None</em>) &#8211; The parameter attribute for the bias in lstm layer.
False or None means no bias.
If this parameter is set to True,
the bias is initialized to zero.</li>
<li><strong>lstm_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; The extra attribute of lstm layer.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">The lstmemory unit name.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="lstmemory-group">
<h4>lstmemory_group<a class="headerlink" href="#lstmemory-group" title="Permalink to this headline"></a></h4>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">lstmemory_group</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>lstm_group is a recurrent_group version of Long Short Term Memory. It
does exactly the same calculation as the lstmemory layer (see lstmemory in
layers.py for the maths) does. A promising benefit is that LSTM memory
cell states(or hidden states) in every time step are accessible to the
user. This is especially useful in attention model. If you do not need to
access the internal states of the lstm and merely use its outputs,
it is recommended to use the lstmemory, which is relatively faster than
lstmemory_group.</p>
<p>NOTE: In PaddlePaddle&#8217;s implementation, the following input-to-hidden
multiplications:
<span class="math">\(W_{x_i}x_{t}\)</span> , <span class="math">\(W_{x_f}x_{t}\)</span>,
<span class="math">\(W_{x_c}x_t\)</span>, <span class="math">\(W_{x_o}x_{t}\)</span> are not done in lstmemory_unit to
speed up the calculations. Consequently, an additional mixed_layer with
full_matrix_projection must be included before lstmemory_unit is called.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">lstm_step</span> <span class="o">=</span> <span class="n">lstmemory_group</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span>
                            <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">,</span>
                            <span class="n">act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">(),</span>
                            <span class="n">gate_act</span><span class="o">=</span><span class="n">SigmoidActivation</span><span class="p">(),</span>
                            <span class="n">state_act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">())</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; Input layer.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; The lstmemory group size.</li>
<li><strong>name</strong> (<em>basestring</em>) &#8211; The name of lstmemory group.</li>
<li><strong>out_memory</strong> (<em>LayerOutput | None</em>) &#8211; The output of previous time step.</li>
<li><strong>reverse</strong> (<em>bool</em>) &#8211; Process the input in a reverse order or not.</li>
<li><strong>param_attr</strong> (<em>ParameterAttribute</em>) &#8211; The parameter attribute for the weights in
input to hidden projection.
None means default attribute.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; The last activiation type of lstm.</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; The gate activiation type of lstm.</li>
<li><strong>state_act</strong> (<em>BaseActivation</em>) &#8211; The state activiation type of lstm.</li>
<li><strong>input_proj_bias_attr</strong> (<em>ParameterAttribute|bool|None</em>) &#8211; The parameter attribute for the bias in
input to hidden projection.
False or None means no bias.
If this parameter is set to True,
the bias is initialized to zero.</li>
<li><strong>input_proj_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; The extra layer attribute for
input to hidden projection of the LSTM unit,
such as dropout, error clipping.</li>
<li><strong>lstm_bias_attr</strong> (<em>ParameterAttribute|True|None</em>) &#8211; The parameter attribute for the bias in lstm layer.
False or None means no bias.
If this parameter is set to True,
the bias is initialized to zero.</li>
<li><strong>lstm_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; The extra attribute of lstm layer.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the lstmemory group.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="simple-lstm">
<h4>simple_lstm<a class="headerlink" href="#simple-lstm" title="Permalink to this headline"></a></h4>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_lstm</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>Simple LSTM Cell.</p>
<p>It just combines a mixed layer with fully_matrix_projection and a lstmemory
layer. The simple lstm cell was implemented with follow equations.</p>
<div class="math">
\[ \begin{align}\begin{aligned}i_t &amp; = \sigma(W_{xi}x_{t} + W_{hi}h_{t-1} + W_{ci}c_{t-1} + b_i)\\f_t &amp; = \sigma(W_{xf}x_{t} + W_{hf}h_{t-1} + W_{cf}c_{t-1} + b_f)\\c_t &amp; = f_tc_{t-1} + i_t tanh (W_{xc}x_t+W_{hc}h_{t-1} + b_c)\\o_t &amp; = \sigma(W_{xo}x_{t} + W_{ho}h_{t-1} + W_{co}c_t + b_o)\\h_t &amp; = o_t tanh(c_t)\end{aligned}\end{align} \]</div>
<p>Please refer to <strong>Generating Sequences With Recurrent Neural Networks</strong> for more
details about lstm. <a class="reference external" href="http://arxiv.org/abs/1308.0850">Link</a> is here.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; lstm layer name.</li>
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; layer&#8217;s input.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; lstm layer size.</li>
<li><strong>reverse</strong> (<em>bool</em>) &#8211; process the input in a reverse order or not.</li>
<li><strong>mat_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; parameter attribute of matrix projection in mixed layer.</li>
<li><strong>bias_param_attr</strong> (<em>ParameterAttribute|False</em>) &#8211; bias parameter attribute. False means no bias, None
means default bias.</li>
<li><strong>inner_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; parameter attribute of lstm cell.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; last activiation type of lstm.</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; gate activiation type of lstm.</li>
<li><strong>state_act</strong> (<em>BaseActivation</em>) &#8211; state activiation type of lstm.</li>
<li><strong>mixed_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; extra attribute of mixed layer.</li>
<li><strong>lstm_cell_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; extra attribute of lstm.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">layer&#8217;s output.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="bidirectional-lstm">
<h4>bidirectional_lstm<a class="headerlink" href="#bidirectional-lstm" title="Permalink to this headline"></a></h4>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">bidirectional_lstm</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>A bidirectional_lstm is a recurrent unit that iterates over the input
sequence both in forward and backward orders, and then concatenate two
outputs to form a final output. However, concatenation of two outputs
is not the only way to form the final output, you can also, for example,
just add them together.</p>
<p>Please refer to  <strong>Neural Machine Translation by Jointly Learning to Align
and Translate</strong> for more details about the bidirectional lstm.
The link goes as follows:
.. _Link: <a class="reference external" href="https://arxiv.org/pdf/1409.0473v3.pdf">https://arxiv.org/pdf/1409.0473v3.pdf</a></p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">bi_lstm</span> <span class="o">=</span> <span class="n">bidirectional_lstm</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">input1</span><span class="p">],</span> <span class="n">size</span><span class="o">=</span><span class="mi">512</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; bidirectional lstm layer name.</li>
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; lstm layer size.</li>
<li><strong>return_seq</strong> (<em>bool</em>) &#8211; If set False, the last time step of output are
concatenated and returned.
If set True, the entire output sequences in forward
and backward directions are concatenated and returned.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">LayerOutput object.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
<div class="section" id="gru">
<h3>GRU<a class="headerlink" href="#gru" title="Permalink to this headline"></a></h3>
<div class="section" id="gru-unit">
<h4>gru_unit<a class="headerlink" href="#gru-unit" title="Permalink to this headline"></a></h4>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">gru_unit</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>gru_unit defines the calculation process of a gated recurrent unit during a single
time step. This function is not a recurrent layer, so it can not be
directly used to process sequence input. This function is always used in
the recurrent_group (see layers.py for more details) to implement attention
mechanism.</p>
<p>Please see grumemory in layers.py for the details about the maths.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>memory_boot</strong> (<em>LayerOutput | None</em>) &#8211; the initialization state of the LSTM cell.</li>
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the gru group.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; hidden size of the gru.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; activation type of gru</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; gate activation type or gru</li>
<li><strong>gru_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; Extra attribute of the gru layer.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the gru output layer.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="gru-group">
<h4>gru_group<a class="headerlink" href="#gru-group" title="Permalink to this headline"></a></h4>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">gru_group</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>gru_group is a recurrent_group version of Gated Recurrent Unit. It
does exactly the same calculation as the grumemory layer does. A promising
benefit is that gru hidden states are accessible to the user. This is
especially useful in attention model. If you do not need to access
any internal state and merely use the outputs of a GRU, it is recommended
to use the grumemory, which is relatively faster.</p>
<p>Please see grumemory in layers.py for more detail about the maths.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">gru</span> <span class="o">=</span> <span class="n">gru_group</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span>
                <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">,</span>
                <span class="n">act</span><span class="o">=</span><span class="n">TanhActivation</span><span class="p">(),</span>
                <span class="n">gate_act</span><span class="o">=</span><span class="n">SigmoidActivation</span><span class="p">())</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>memory_boot</strong> (<em>LayerOutput | None</em>) &#8211; the initialization state of the LSTM cell.</li>
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the gru group.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; hidden size of the gru.</li>
<li><strong>reverse</strong> (<em>bool</em>) &#8211; process the input in a reverse order or not.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; activiation type of gru</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; gate activiation type of gru</li>
<li><strong>gru_bias_attr</strong> (<em>ParameterAttribute|False|None</em>) &#8211; bias parameter attribute of gru layer,
False means no bias, None means default bias.</li>
<li><strong>gru_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; Extra attribute of the gru layer.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the gru group.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="simple-gru">
<h4>simple_gru<a class="headerlink" href="#simple-gru" title="Permalink to this headline"></a></h4>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_gru</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>You may see gru_step_layer, grumemory in layers.py, gru_unit, gru_group,
simple_gru in network.py. The reason why there are so many interfaces is
that we have two ways to implement recurrent neural network. One way is to
use one complete layer to implement rnn (including simple rnn, gru and lstm)
with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But
the multiplication operation <span class="math">\(W x_t\)</span> is not computed in these layers.
See details in their interfaces in layers.py.
The other implementation is to use an recurrent group which can ensemble a
series of layers to compute rnn step by step. This way is flexible for
attenion mechanism or other complex connections.</p>
<ul class="simple">
<li>gru_step_layer: only compute rnn by one step. It needs an memory as input
and can be used in recurrent group.</li>
<li>gru_unit: a wrapper of gru_step_layer with memory.</li>
<li>gru_group: a GRU cell implemented by a combination of multiple layers in
recurrent group.
But <span class="math">\(W x_t\)</span> is not done in group.</li>
<li>gru_memory: a GRU cell implemented by one layer, which does same calculation
with gru_group and is faster than gru_group.</li>
<li>simple_gru: a complete GRU implementation inlcuding <span class="math">\(W x_t\)</span> and
gru_group. <span class="math">\(W\)</span> contains <span class="math">\(W_r\)</span>, <span class="math">\(W_z\)</span> and <span class="math">\(W\)</span>, see
formula in grumemory.</li>
</ul>
<p>The computational speed is that, grumemory is relatively better than
gru_group, and gru_group is relatively better than simple_gru.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">gru</span> <span class="o">=</span> <span class="n">simple_gru</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span> <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the gru group.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; hidden size of the gru.</li>
<li><strong>reverse</strong> (<em>bool</em>) &#8211; process the input in a reverse order or not.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; activiation type of gru</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; gate activiation type of gru</li>
<li><strong>gru_bias_attr</strong> (<em>ParameterAttribute|False|None</em>) &#8211; bias parameter attribute of gru layer,
False means no bias, None means default bias.</li>
<li><strong>gru_layer_attr</strong> (<em>ExtraLayerAttribute</em>) &#8211; Extra attribute of the gru layer.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the gru group.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="simple-gru2">
<h4>simple_gru2<a class="headerlink" href="#simple-gru2" title="Permalink to this headline"></a></h4>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_gru2</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>simple_gru2 is the same with simple_gru, but using grumemory instead.
Please refer to grumemory in layers.py for more detail about the math.
simple_gru2 is faster than simple_gru.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">gru</span> <span class="o">=</span> <span class="n">simple_gru2</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">layer1</span><span class="p">],</span> <span class="n">size</span><span class="o">=</span><span class="mi">256</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the gru group.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; hidden size of the gru.</li>
<li><strong>reverse</strong> (<em>bool</em>) &#8211; process the input in a reverse order or not.</li>
<li><strong>act</strong> (<em>BaseActivation</em>) &#8211; activiation type of gru</li>
<li><strong>gate_act</strong> (<em>BaseActivation</em>) &#8211; gate activiation type of gru</li>
<li><strong>gru_bias_attr</strong> (<em>ParameterAttribute|False|None</em>) &#8211; bias parameter attribute of gru layer,
False means no bias, None means default bias.</li>
<li><strong>gru_param_attr</strong> (<em>ParameterAttribute|None</em>) &#8211; param parameter attribute of gru layer,
None means default param.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">the gru group.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="bidirectional-gru">
<h4>bidirectional_gru<a class="headerlink" href="#bidirectional-gru" title="Permalink to this headline"></a></h4>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">bidirectional_gru</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>A bidirectional_gru is a recurrent unit that iterates over the input
sequence both in forward and backward orders, and then concatenate two
outputs to form a final output. However, concatenation of two outputs
is not the only way to form the final output, you can also, for example,
just add them together.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">bi_gru</span> <span class="o">=</span> <span class="n">bidirectional_gru</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="p">[</span><span class="n">input1</span><span class="p">],</span> <span class="n">size</span><span class="o">=</span><span class="mi">512</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; bidirectional gru layer name.</li>
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; input layer.</li>
<li><strong>size</strong> (<em>int</em>) &#8211; gru layer size.</li>
<li><strong>return_seq</strong> (<em>bool</em>) &#8211; If set False, the last time step of output are
concatenated and returned.
If set True, the entire output sequences in forward
and backward directions are concatenated and returned.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">LayerOutput object.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
<div class="section" id="simple-attention">
<h3>simple_attention<a class="headerlink" href="#simple-attention" title="Permalink to this headline"></a></h3>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">simple_attention</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>Calculate and return a context vector with attention mechanism.
Size of the context vector equals to size of the encoded_sequence.</p>
<div class="math">
\[ \begin{align}\begin{aligned}a(s_{i-1},h_{j}) &amp; = v_{a}f(W_{a}s_{t-1} + U_{a}h_{j})\\e_{i,j} &amp; = a(s_{i-1}, h_{j})\\a_{i,j} &amp; = \frac{exp(e_{i,j})}{\sum_{k=1}^{T_x}{exp(e_{i,k})}}\\c_{i} &amp; = \sum_{j=1}^{T_{x}}a_{i,j}h_{j}\end{aligned}\end{align} \]</div>
<p>where <span class="math">\(h_{j}\)</span> is the jth element of encoded_sequence,
<span class="math">\(U_{a}h_{j}\)</span> is the jth element of encoded_proj
<span class="math">\(s_{i-1}\)</span> is decoder_state
<span class="math">\(f\)</span> is weight_act, and is set to tanh by default.</p>
<p>Please refer to <strong>Neural Machine Translation by Jointly Learning to
Align and Translate</strong> for more details. The link is as follows:
<a class="reference external" href="https://arxiv.org/abs/1409.0473">https://arxiv.org/abs/1409.0473</a>.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">context</span> <span class="o">=</span> <span class="n">simple_attention</span><span class="p">(</span><span class="n">encoded_sequence</span><span class="o">=</span><span class="n">enc_seq</span><span class="p">,</span>
                           <span class="n">encoded_proj</span><span class="o">=</span><span class="n">enc_proj</span><span class="p">,</span>
                           <span class="n">decoder_state</span><span class="o">=</span><span class="n">decoder_prev</span><span class="p">,)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; name of the attention model.</li>
<li><strong>softmax_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; parameter attribute of sequence softmax
that is used to produce attention weight.</li>
<li><strong>weight_act</strong> (<em>BaseActivation</em>) &#8211; activation of the attention model.</li>
<li><strong>encoded_sequence</strong> (<em>LayerOutput</em>) &#8211; output of the encoder</li>
<li><strong>encoded_proj</strong> (<em>LayerOutput</em>) &#8211; attention weight is computed by a feed forward neural
network which has two inputs : decoder&#8217;s hidden state
of previous time step and encoder&#8217;s output.
encoded_proj is output of the feed-forward network for
encoder&#8217;s output. Here we pre-compute it outside
simple_attention for speed consideration.</li>
<li><strong>decoder_state</strong> (<em>LayerOutput</em>) &#8211; hidden state of decoder in previous time step</li>
<li><strong>transform_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; parameter attribute of the feed-forward
network that takes decoder_state as inputs to
compute attention weight.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">a context vector</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
<div class="section" id="dot-product-attention">
<h3>dot_product_attention<a class="headerlink" href="#dot-product-attention" title="Permalink to this headline"></a></h3>
<dl class="function">
<dt>
<code class="descclassname">paddle.v2.networks.</code><code class="descname">dot_product_attention</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><p>Calculate and return a context vector with dot-product attention mechanism.
The dimension of the context vector equals to that of the attended_sequence.</p>
<div class="math">
\[ \begin{align}\begin{aligned}a(s_{i-1},h_{j}) &amp; = s_{i-1}^\mathrm{T} h_{j}\\e_{i,j} &amp; = a(s_{i-1}, h_{j})\\a_{i,j} &amp; = \frac{exp(e_{i,j})}{\sum_{k=1}^{T_x}{exp(e_{i,k})}}\\c_{i} &amp; = \sum_{j=1}^{T_{x}}a_{i,j}z_{j}\end{aligned}\end{align} \]</div>
<p>where <span class="math">\(h_{j}\)</span> is the jth element of encoded_sequence,
<span class="math">\(z_{j}\)</span> is the jth element of attended_sequence,
<span class="math">\(s_{i-1}\)</span> is transformed_state.</p>
<p>The example usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">context</span> <span class="o">=</span> <span class="n">dot_product_attention</span><span class="p">(</span><span class="n">encoded_sequence</span><span class="o">=</span><span class="n">enc_seq</span><span class="p">,</span>
                                <span class="n">attended_sequence</span><span class="o">=</span><span class="n">att_seq</span><span class="p">,</span>
                                <span class="n">transformed_state</span><span class="o">=</span><span class="n">state</span><span class="p">,)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>basestring</em>) &#8211; A prefix attached to the name of each layer that defined inside
the dot_product_attention.</li>
<li><strong>softmax_param_attr</strong> (<em>ParameterAttribute</em>) &#8211; The parameter attribute of sequence softmax
that is used to produce attention weight.</li>
<li><strong>encoded_sequence</strong> (<em>LayerOutput</em>) &#8211; The output hidden vectors of the encoder.</li>
<li><strong>attended_sequence</strong> (<em>LayerOutput</em>) &#8211; The attention weight is computed by a feed forward neural
network which has two inputs : decoder&#8217;s transformed hidden
state of previous time step and encoder&#8217;s output.
attended_sequence is the sequence to be attended.</li>
<li><strong>transformed_state</strong> (<em>LayerOutput</em>) &#8211; The transformed hidden state of decoder in previous time step.
Since the dot-product operation will be performed on it and the
encoded_sequence, their dimensions must be equal. For flexibility,
we suppose transformations of the decoder&#8217;s hidden state have been
done outside dot_product_attention and no more will be performed
inside. Then users can use either the original or transformed one.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">The context vector.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>

</div>
</div>
</div>


           </div>
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="attr.html" class="btn btn-neutral float-right" title="Parameter Attribute" accesskey="n">Next <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="pooling.html" class="btn btn-neutral" title="Pooling" accesskey="p"><span class="fa fa-arrow-circle-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <p>
        &copy; Copyright 2016, PaddlePaddle developers.

    </p>
  </div>
  Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  


  

    <script type="text/javascript">
        var DOCUMENTATION_OPTIONS = {
            URL_ROOT:'../../',
            VERSION:'',
            COLLAPSE_INDEX:false,
            FILE_SUFFIX:'.html',
            HAS_SOURCE:  true,
            SOURCELINK_SUFFIX: ".txt",
        };
    </script>
      <script type="text/javascript" src="../../_static/jquery.js"></script>
      <script type="text/javascript" src="../../_static/underscore.js"></script>
      <script type="text/javascript" src="../../_static/doctools.js"></script>
      <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
       
  

  
  
    <script type="text/javascript" src="../../_static/js/theme.js"></script>
  
  
  <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
  <script src="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/js/perfect-scrollbar.jquery.min.js"></script>
  <script src="../../_static/js/paddle_doc_init.js"></script> 

</body>
</html>