<!DOCTYPE html> <!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]--> <!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]--> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>layers — PaddlePaddle 文档</title> <link rel="stylesheet" href="../../../_static/css/theme.css" type="text/css" /> <link rel="index" title="索引" href="../../../genindex.html"/> <link rel="search" title="搜索" href="../../../search.html"/> <link rel="top" title="PaddlePaddle 文档" href="../../../index.html"/> <link rel="up" title="Fluid" href="../fluid.html"/> <link rel="next" title="data_feeder" href="data_feeder.html"/> <link rel="prev" title="Fluid" href="../fluid.html"/> <link rel="stylesheet" href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" type="text/css" /> <link rel="stylesheet" href="../../../_static/css/override.css" type="text/css" /> <script> var _hmt = _hmt || []; (function() { var hm = document.createElement("script"); hm.src = "//hm.baidu.com/hm.js?b9a314ab40d04d805655aab1deee08ba"; var s = document.getElementsByTagName("script")[0]; s.parentNode.insertBefore(hm, s); })(); </script> <script src="../../../_static/js/modernizr.min.js"></script> </head> <body class="wy-body-for-nav" role="document"> <header class="site-header"> <div class="site-logo"> <a href="/"><img src="../../../_static/images/PP_w.png"></a> </div> <div class="site-nav-links"> <div class="site-menu"> <a class="fork-on-github" href="https://github.com/PaddlePaddle/Paddle" target="_blank"><i class="fa fa-github"></i>Fork me on Github</a> <div class="language-switcher dropdown"> <a type="button" data-toggle="dropdown"> <span>English</span> <i class="fa fa-angle-up"></i> <i class="fa fa-angle-down"></i> </a> <ul class="dropdown-menu"> <li><a href="/doc_cn">中文</a></li> <li><a href="/doc">English</a></li> </ul> </div> <ul class="site-page-links"> <li><a href="/">Home</a></li> </ul> </div> <div class="doc-module"> <ul class="current"> <li class="toctree-l1"><a class="reference internal" href="../../../getstarted/index_cn.html">新手入门</a></li> <li class="toctree-l1"><a class="reference internal" href="../../../howto/index_cn.html">进阶指南</a></li> <li class="toctree-l1 current"><a class="reference internal" href="../../index_cn.html">API</a></li> <li class="toctree-l1"><a class="reference internal" href="../../../faq/index_cn.html">FAQ</a></li> </ul> <div role="search"> <form id="rtd-search-form" class="wy-form" action="../../../search.html" method="get"> <input type="text" name="q" placeholder="Search docs" /> <input type="hidden" name="check_keywords" value="yes" /> <input type="hidden" name="area" value="default" /> </form> </div> </div> </div> </header> <div class="main-content-wrap"> <nav class="doc-menu-vertical" role="navigation"> <ul class="current"> <li class="toctree-l1"><a class="reference internal" href="../../../getstarted/index_cn.html">新手入门</a><ul> <li class="toctree-l2"><a class="reference internal" href="../../../getstarted/build_and_install/index_cn.html">安装与编译</a><ul> <li class="toctree-l3"><a class="reference internal" href="../../../getstarted/build_and_install/pip_install_cn.html">使用pip安装</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../getstarted/build_and_install/docker_install_cn.html">使用Docker安装运行</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/dev/build_cn.html">用Docker编译和测试PaddlePaddle</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../getstarted/build_and_install/build_from_source_cn.html">从源码编译</a></li> </ul> </li> <li class="toctree-l2"><a class="reference internal" href="../../../getstarted/concepts/use_concepts_cn.html">基本使用概念</a></li> </ul> </li> <li class="toctree-l1"><a class="reference internal" href="../../../howto/index_cn.html">进阶指南</a><ul> <li class="toctree-l2"><a class="reference internal" href="../../../howto/usage/cmd_parameter/index_cn.html">设置命令行参数</a><ul> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cmd_parameter/use_case_cn.html">使用案例</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cmd_parameter/arguments_cn.html">参数概述</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cmd_parameter/detail_introduction_cn.html">细节描述</a></li> </ul> </li> <li class="toctree-l2"><a class="reference internal" href="../../../howto/usage/cluster/cluster_train_cn.html">分布式训练</a><ul> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cluster/fabric_cn.html">fabric集群</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cluster/openmpi_cn.html">openmpi集群</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cluster/k8s_cn.html">kubernetes单机</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cluster/k8s_distributed_cn.html">kubernetes distributed分布式</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/cluster/k8s_aws_cn.html">AWS上运行kubernetes集群训练</a></li> </ul> </li> <li class="toctree-l2"><a class="reference internal" href="../../../howto/usage/capi/index_cn.html">PaddlePaddle C-API</a><ul> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/capi/compile_paddle_lib_cn.html">编译 PaddlePaddle 预测库</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/capi/organization_of_the_inputs_cn.html">输入/输出数据组织</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/usage/capi/workflow_of_capi_cn.html">C-API 使用流程</a></li> </ul> </li> <li class="toctree-l2"><a class="reference internal" href="../../../howto/dev/contribute_to_paddle_cn.html">如何贡献代码</a></li> <li class="toctree-l2"><a class="reference internal" href="../../../howto/dev/write_docs_cn.html">如何贡献/修改文档</a></li> <li class="toctree-l2"><a class="reference internal" href="../../../howto/deep_model/rnn/index_cn.html">RNN相关模型</a><ul> <li class="toctree-l3"><a class="reference internal" href="../../../howto/deep_model/rnn/rnn_config_cn.html">RNN配置</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/deep_model/rnn/recurrent_group_cn.html">Recurrent Group教程</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/deep_model/rnn/hierarchical_layer_cn.html">支持双层序列作为输入的Layer</a></li> <li class="toctree-l3"><a class="reference internal" href="../../../howto/deep_model/rnn/hrnn_rnn_api_compare_cn.html">单双层RNN API对比介绍</a></li> </ul> </li> <li class="toctree-l2"><a class="reference internal" href="../../../howto/optimization/gpu_profiling_cn.html">GPU性能分析与调优</a></li> </ul> </li> <li class="toctree-l1 current"><a class="reference internal" href="../../index_cn.html">API</a><ul class="current"> <li class="toctree-l2"><a class="reference internal" href="../model_configs.html">模型配置</a><ul> <li class="toctree-l3"><a class="reference internal" href="../config/activation.html">Activation</a></li> <li class="toctree-l3"><a class="reference internal" href="../config/layer.html">Layers</a></li> <li class="toctree-l3"><a class="reference internal" href="../config/evaluators.html">Evaluators</a></li> <li class="toctree-l3"><a class="reference internal" href="../config/optimizer.html">Optimizer</a></li> <li class="toctree-l3"><a class="reference internal" href="../config/pooling.html">Pooling</a></li> <li class="toctree-l3"><a class="reference internal" href="../config/networks.html">Networks</a></li> <li class="toctree-l3"><a class="reference internal" href="../config/attr.html">Parameter Attribute</a></li> </ul> </li> <li class="toctree-l2"><a class="reference internal" href="../data.html">数据访问</a><ul> <li class="toctree-l3"><a class="reference internal" href="../data/data_reader.html">Data Reader Interface</a></li> <li class="toctree-l3"><a class="reference internal" href="../data/image.html">Image Interface</a></li> <li class="toctree-l3"><a class="reference internal" href="../data/dataset.html">Dataset</a></li> </ul> </li> <li class="toctree-l2"><a class="reference internal" href="../run_logic.html">训练与应用</a></li> <li class="toctree-l2 current"><a class="reference internal" href="../fluid.html">Fluid</a><ul class="current"> <li class="toctree-l3 current"><a class="current reference internal" href="#">layers</a></li> <li class="toctree-l3"><a class="reference internal" href="data_feeder.html">data_feeder</a></li> <li class="toctree-l3"><a class="reference internal" href="executor.html">executor</a></li> <li class="toctree-l3"><a class="reference internal" href="initializer.html">initializer</a></li> <li class="toctree-l3"><a class="reference internal" href="evaluator.html">evaluator</a></li> <li class="toctree-l3"><a class="reference internal" href="nets.html">nets</a></li> <li class="toctree-l3"><a class="reference internal" href="optimizer.html">optimizer</a></li> <li class="toctree-l3"><a class="reference internal" href="param_attr.html">param_attr</a></li> <li class="toctree-l3"><a class="reference internal" href="profiler.html">profiler</a></li> <li class="toctree-l3"><a class="reference internal" href="regularizer.html">regularizer</a></li> <li class="toctree-l3"><a class="reference internal" href="io.html">io</a></li> </ul> </li> </ul> </li> <li class="toctree-l1"><a class="reference internal" href="../../../faq/index_cn.html">FAQ</a><ul> <li class="toctree-l2"><a class="reference internal" href="../../../faq/build_and_install/index_cn.html">编译安装与单元测试</a></li> <li class="toctree-l2"><a class="reference internal" href="../../../faq/model/index_cn.html">模型配置</a></li> <li class="toctree-l2"><a class="reference internal" href="../../../faq/parameter/index_cn.html">参数设置</a></li> <li class="toctree-l2"><a class="reference internal" href="../../../faq/local/index_cn.html">本地训练与预测</a></li> <li class="toctree-l2"><a class="reference internal" href="../../../faq/cluster/index_cn.html">集群训练与预测</a></li> </ul> </li> </ul> </nav> <section class="doc-content-wrap"> <div role="navigation" aria-label="breadcrumbs navigation"> <ul class="wy-breadcrumbs"> <li><a href="../../index_cn.html">API</a> > </li> <li><a href="../fluid.html">Fluid</a> > </li> <li>layers</li> </ul> </div> <div class="wy-nav-content" id="doc-content"> <div class="rst-content"> <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article"> <div itemprop="articleBody"> <div class="section" id="layers"> <h1>layers<a class="headerlink" href="#layers" title="永久链接至标题">¶</a></h1> <div class="section" id="control-flow"> <h2>control_flow<a class="headerlink" href="#control-flow" title="永久链接至标题">¶</a></h2> <div class="section" id="split-lod-tensor"> <h3>split_lod_tensor<a class="headerlink" href="#split-lod-tensor" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">split_lod_tensor</code><span class="sig-paren">(</span><em>input</em>, <em>mask</em>, <em>level=0</em><span class="sig-paren">)</span></dt> <dd><p><strong>split_lod_tensor</strong></p> <p>This function takes in an input that contains the complete lod information, and takes in a mask which is used to mask certain parts of the input. The output is the true branch and the false branch with the mask applied to the input at a certain level in the tensor.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>tuple|list|None</em>) – The input tensor that contains complete lod information needed to construct the output.</li> <li><strong>mask</strong> (<em>list</em>) – A bool column vector which masks the input.</li> <li><strong>level</strong> (<em>int</em>) – The specific lod level to rank.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The true branch of tensor as per the mask applied to input. Variable: The false branch of tensor as per the mask applied to input.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span> <span class="n">x</span><span class="o">.</span><span class="n">persistable</span> <span class="o">=</span> <span class="bp">True</span> <span class="n">y</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'y'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span> <span class="n">y</span><span class="o">.</span><span class="n">persistable</span> <span class="o">=</span> <span class="bp">True</span> <span class="n">out_true</span><span class="p">,</span> <span class="n">out_false</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">split_lod_tensor</span><span class="p">(</span> <span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">mask</span><span class="o">=</span><span class="n">y</span><span class="p">,</span> <span class="n">level</span><span class="o">=</span><span class="n">level</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="merge-lod-tensor"> <h3>merge_lod_tensor<a class="headerlink" href="#merge-lod-tensor" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">merge_lod_tensor</code><span class="sig-paren">(</span><em>in_true</em>, <em>in_false</em>, <em>x</em>, <em>mask</em>, <em>level=0</em><span class="sig-paren">)</span></dt> <dd><p><strong>merge_lod_tensor</strong></p> <p>This function takes in an input <span class="math">\(x\)</span>, the True branch, the False branch and a binary <span class="math">\(mask\)</span>. Using this information, this function merges the True and False branches of the tensor into a single Output at a certain lod level indiacted by <span class="math">\(level\)</span>.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>in_true</strong> (<em>tuple|list|None</em>) – The True branch to be merged.</li> <li><strong>in_false</strong> (<em>tuple|list|None</em>) – The False branch to be merged.</li> <li><strong>x</strong> (<em>tuple|list|None</em>) – The input tensor that contains complete lod information needed to construct the output.</li> <li><strong>mask</strong> (<em>list</em>) – A bool column vector which masks the input.</li> <li><strong>level</strong> (<em>int</em>) – The specific lod level to rank.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The merged output tensor.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span> <span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">,</span> <span class="n">stop_gradient</span><span class="o">=</span><span class="bp">False</span><span class="p">)</span> <span class="n">y</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span> <span class="n">name</span><span class="o">=</span><span class="s1">'y'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'bool'</span><span class="p">,</span> <span class="n">stop_gradient</span><span class="o">=</span><span class="bp">False</span><span class="p">)</span> <span class="n">level</span> <span class="o">=</span> <span class="mi">0</span> <span class="n">out_true</span><span class="p">,</span> <span class="n">out_false</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">split_lod_tensor</span><span class="p">(</span> <span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">mask</span><span class="o">=</span><span class="n">y</span><span class="p">,</span> <span class="n">level</span><span class="o">=</span><span class="n">level</span><span class="p">)</span> <span class="n">out</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">merge_lod_tensor</span><span class="p">(</span> <span class="n">in_true</span><span class="o">=</span><span class="n">out_true</span><span class="p">,</span> <span class="n">in_false</span><span class="o">=</span><span class="n">out_false</span><span class="p">,</span> <span class="n">mask</span><span class="o">=</span><span class="n">y</span><span class="p">,</span> <span class="n">x</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">level</span><span class="o">=</span><span class="n">level</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="blockguard"> <h3>BlockGuard<a class="headerlink" href="#blockguard" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">BlockGuard</code><span class="sig-paren">(</span><em>main_program</em><span class="sig-paren">)</span></dt> <dd><p>BlockGuard class.</p> <p>BlockGuard class is used to create a sub-block in a program by using the Python <cite>with</cite> keyword.</p> </dd></dl> </div> <div class="section" id="blockguardwithcompletion"> <h3>BlockGuardWithCompletion<a class="headerlink" href="#blockguardwithcompletion" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">BlockGuardWithCompletion</code><span class="sig-paren">(</span><em>rnn</em><span class="sig-paren">)</span></dt> <dd><p>BlockGuardWithCompletion class.</p> <p>BlockGuardWithCompletion class is used to create an op with a block in a program.</p> </dd></dl> </div> <div class="section" id="staticrnnmemorylink"> <h3>StaticRNNMemoryLink<a class="headerlink" href="#staticrnnmemorylink" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">StaticRNNMemoryLink</code><span class="sig-paren">(</span><em>init</em>, <em>pre_mem</em>, <em>mem=None</em><span class="sig-paren">)</span></dt> <dd><p>StaticRNNMemoryLink class.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first last simple"> <li><strong>init</strong> – the initial variable for Memory</li> <li><strong>init</strong> – Variable</li> <li><strong>pre_mem</strong> – the memory variable in previous time step</li> <li><strong>pre_mem</strong> – Variable</li> <li><strong>mem</strong> – the memory variable in current time step</li> <li><strong>mem</strong> – Variable</li> </ul> </td> </tr> </tbody> </table> <p>StaticRNNMemoryLink class is used to create a link between two memory cells of a StaticRNN.</p> </dd></dl> </div> <div class="section" id="whileguard"> <h3>WhileGuard<a class="headerlink" href="#whileguard" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">WhileGuard</code><span class="sig-paren">(</span><em>while_op</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="while"> <h3>While<a class="headerlink" href="#while" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">While</code><span class="sig-paren">(</span><em>cond</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="lod-rank-table"> <h3>lod_rank_table<a class="headerlink" href="#lod-rank-table" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">lod_rank_table</code><span class="sig-paren">(</span><em>x</em>, <em>level=0</em><span class="sig-paren">)</span></dt> <dd><p>LoD Rank Table Operator. Given an input variable <strong>x</strong> and a level number of LoD, this layer creates a LodRankTable object. A LoDRankTable object contains a list of bi-element tuples. Each tuple consists of an index and a length, both of which are int type. Refering to specified level of LoD, the index is the sequence index number and the length representes the sequence length. Please note that the list is ranked in descending order by the length. The following is an example:</p> <blockquote> <div><div class="highlight-text"><div class="highlight"><pre><span></span>x is a LoDTensor: x.lod = [[0, 2, 3], [0, 5, 6, 7]] x.data = [a, b, c, d, e, f, g] 1. set level to 0: Create lod rank table: lod_rank_table_obj = lod_rank_table(x, level=0) Get: lod_rank_table_obj.items() = [(0, 2), (1, 1)] 2. set level to 1: Create lod rank table: lod_rank_table_obj = lod_rank_table(x, level=1) Get: lod_rank_table_obj.items() = [(0, 5), (1, 1), (2, 1)] </pre></div> </div> </div></blockquote> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>Variable</em>) – Input variable, a LoDTensor based which to create the lod rank table.</li> <li><strong>level</strong> (<em>int</em>) – Specify the LoD level, on which to create the lod rank table.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The created LoDRankTable object.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">10</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">,</span> <span class="n">lod_level</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">out</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">lod_rank_table</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">level</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="max-sequence-len"> <h3>max_sequence_len<a class="headerlink" href="#max-sequence-len" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">max_sequence_len</code><span class="sig-paren">(</span><em>rank_table</em><span class="sig-paren">)</span></dt> <dd><p>Max Sequence Len Operator. Given a LoDRankTable object, this layer returns the max length of a batch of sequences. In fact, a LoDRankTable object contains a list of tuples(<sequence index, sequence length>) and the list is already sorted by sequence length in descending order, so the operator just returns the sequence length of the first tuple element.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>rank_table</strong> (<em>Variable</em>) – Input variable which is a LoDRankTable object.</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">The max length of sequence.</td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body">Variable</td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">10</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">,</span> <span class="n">lod_level</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">rank_table</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">lod_rank_table</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">level</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">max_sequence_len</span><span class="p">(</span><span class="n">rank_table</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="topk"> <h3>topk<a class="headerlink" href="#topk" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">topk</code><span class="sig-paren">(</span><em>input</em>, <em>k</em><span class="sig-paren">)</span></dt> <dd><p><strong>topk</strong></p> <p>This function performs the operation that selects the k entries in the input vector and outputs their values and indices as vectors. Thus topk_out[j] is the j-th largest entry in input, and its index is topk_indices[j]</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable|list</em>) – The input tensor that has all the data.</li> <li><strong>k</strong> (<em>int</em>) – The number of top elements that the function will pick.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first"><dl class="docutils"> <dt>The variable of type array that contains the k largest entries</dt> <dd><p class="first last">from input.</p> </dd> <dt>Variable: The variable of type array that contains the indices of k</dt> <dd><p class="first last">largest entries from input.</p> </dd> </dl> </p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">10</span><span class="p">])</span> <span class="n">k</span> <span class="o">=</span> <span class="mi">5</span> <span class="n">array</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">topk</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">k</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="lod-tensor-to-array"> <h3>lod_tensor_to_array<a class="headerlink" href="#lod-tensor-to-array" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">lod_tensor_to_array</code><span class="sig-paren">(</span><em>x</em>, <em>table</em><span class="sig-paren">)</span></dt> <dd><p>Convert a LOD_TENSOR to an LOD_TENSOR_ARRAY.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>Variable|list</em>) – The LOD tensor to be converted to a LOD tensor array.</li> <li><strong>table</strong> (<em>ParamAttr|list</em>) – The variable that stores the level of lod which is ordered by sequence length in descending order.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first"><dl class="docutils"> <dt>The variable of type array that has been converted from a</dt> <dd><p class="first last">tensor.</p> </dd> </dl> </p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">10</span><span class="p">])</span> <span class="n">table</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">lod_rank_table</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">level</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="n">array</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">lod_tensor_to_array</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">table</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="array-to-lod-tensor"> <h3>array_to_lod_tensor<a class="headerlink" href="#array-to-lod-tensor" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">array_to_lod_tensor</code><span class="sig-paren">(</span><em>x</em>, <em>table</em><span class="sig-paren">)</span></dt> <dd><p>Convert a LoD_Tensor_Aarry to an LoDTensor.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>Variable|list</em>) – The lod tensor array to be converted to a tensor.</li> <li><strong>table</strong> (<em>ParamAttr|list</em>) – The variable that stores the level of lod which is ordered by sequence length in descending order.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first"><dl class="docutils"> <dt>The variable of type tensor that has been converted</dt> <dd><p class="first last">from an array.</p> </dd> </dl> </p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">10</span><span class="p">])</span> <span class="n">table</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">lod_rank_table</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">level</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="n">array</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">lod_tensor_to_array</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">table</span><span class="p">)</span> <span class="n">lod_tensor</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">array_to_lod_tensor</span><span class="p">(</span><span class="n">array</span><span class="p">,</span> <span class="n">table</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="increment"> <h3>increment<a class="headerlink" href="#increment" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">increment</code><span class="sig-paren">(</span><em>x</em>, <em>value=1.0</em>, <em>in_place=True</em><span class="sig-paren">)</span></dt> <dd><p>This function performs an operation that increments each value in the input <span class="math">\(x\)</span> by an amount: <span class="math">\(value\)</span> as mentioned in the input parameter. This operation is performed in-place by default.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>Variable|list</em>) – The tensor that has the input values.</li> <li><strong>value</strong> (<em>float</em>) – The amount by which the values should be incremented.</li> <li><strong>in_place</strong> (<em>bool</em>) – If the increment should be performed in-place.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first"><dl class="docutils"> <dt>The tensor variable storing the transformation of</dt> <dd><p class="first last">element-wise increment of each value in the input.</p> </dd> </dl> </p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'data'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">increment</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="n">data</span><span class="p">,</span> <span class="n">value</span><span class="o">=</span><span class="mf">3.0</span><span class="p">,</span> <span class="n">in_place</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="array-write"> <h3>array_write<a class="headerlink" href="#array-write" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">array_write</code><span class="sig-paren">(</span><em>x</em>, <em>i</em>, <em>array=None</em><span class="sig-paren">)</span></dt> <dd><p>This function writes the given input variable to the specified position indicating by the arrary index to an output LOD_TENSOR_ARRAY. If the output LOD_TENSOR_ARRAY is not given(None), a new one will be created and returned.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>Variable|list</em>) – The input tensor from which the data will be read.</li> <li><strong>i</strong> (<em>Variable|list</em>) – The index of the output LOD_TENSOR_ARRAY, pointing to the position to which the input tensor will be written.</li> <li><strong>array</strong> (<em>Variable|list</em>) – The output LOD_TENSOR_ARRAY to which the input tensor will be written. If this parameter is NONE, a new LOD_TENSOR_ARRAY will be created and returned.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The output LOD_TENSOR_ARRAY where the input tensor is written.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> </dd></dl> </div> <div class="section" id="create-array"> <h3>create_array<a class="headerlink" href="#create-array" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">create_array</code><span class="sig-paren">(</span><em>dtype</em><span class="sig-paren">)</span></dt> <dd><p>This function creates an array of type <span class="math">\(LOD_TENSOR_ARRAY\)</span> using the LayerHelper.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>dtype</strong> (<em>int|float</em>) – The data type of the elements in the array.</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">The tensor variable storing the elements of data type.</td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body">Variable</td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">create_array</span><span class="p">(</span><span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="less-than"> <h3>less_than<a class="headerlink" href="#less-than" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">less_than</code><span class="sig-paren">(</span><em>x</em>, <em>y</em>, <em>cond=None</em>, <em>**ignored</em><span class="sig-paren">)</span></dt> <dd><p><strong>Less than</strong></p> <p>This layer returns the truth value of <span class="math">\(x < y\)</span> elementwise.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>Variable</em>) – First operand of <em>less_than</em></li> <li><strong>y</strong> (<em>Variable</em>) – Second operand of <em>less_than</em></li> <li><strong>cond</strong> (<em>Variable|None</em>) – Optional output variable to store the result of <em>less_than</em></li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The tensor variable storing the output of <em>less_than</em>.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">less</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">less_than</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="n">label</span><span class="p">,</span> <span class="n">y</span><span class="o">=</span><span class="n">limit</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="array-read"> <h3>array_read<a class="headerlink" href="#array-read" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">array_read</code><span class="sig-paren">(</span><em>array</em>, <em>i</em><span class="sig-paren">)</span></dt> <dd><p>This function performs the operation to read the data in as an LOD_TENSOR_ARRAY. :param array: The input tensor that will be written to an array. :type array: Variable|list :param i: The subscript index in tensor array, that points the</p> <blockquote> <div>place where data will be written to.</div></blockquote> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">返回:</th><td class="field-body">The tensor type variable that has the data written to it.</td> </tr> <tr class="field-even field"><th class="field-name">返回类型:</th><td class="field-body">Variable</td> </tr> </tbody> </table> <p class="rubric">Examples</p> </dd></dl> </div> <div class="section" id="shrink-memory"> <h3>shrink_memory<a class="headerlink" href="#shrink-memory" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">shrink_memory</code><span class="sig-paren">(</span><em>x</em>, <em>i</em>, <em>table</em><span class="sig-paren">)</span></dt> <dd><p>This function creates an operator to shrink_rnn_memory using the RankTable as mentioned in the input parameter.</p> </dd></dl> </div> <div class="section" id="array-length"> <h3>array_length<a class="headerlink" href="#array-length" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">array_length</code><span class="sig-paren">(</span><em>array</em><span class="sig-paren">)</span></dt> <dd><p>This function performs the operation to find the length of the input LOD_TENSOR_ARRAY.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>array</strong> (<em>LOD_TENSOR_ARRAY</em>) – The input array that will be used to compute the length.</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">The length of the input LoDTensorArray.</td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body">Variable</td> </tr> </tbody> </table> <p class="rubric">Examples</p> </dd></dl> </div> <div class="section" id="ifelse"> <h3>IfElse<a class="headerlink" href="#ifelse" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">IfElse</code><span class="sig-paren">(</span><em>cond</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="dynamicrnn"> <h3>DynamicRNN<a class="headerlink" href="#dynamicrnn" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">DynamicRNN</code><span class="sig-paren">(</span><em>name=None</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="conditionalblock"> <h3>ConditionalBlock<a class="headerlink" href="#conditionalblock" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">ConditionalBlock</code><span class="sig-paren">(</span><em>inputs</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="staticrnn"> <h3>StaticRNN<a class="headerlink" href="#staticrnn" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">StaticRNN</code><span class="sig-paren">(</span><em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>StaticRNN class.</p> <p>StaticRNN class is used to create a StaticRNN. The RNN will have its own parameters like inputs, outputs, memories, status and length.</p> <dl class="method"> <dt> <code class="descname">memory</code><span class="sig-paren">(</span><em>init=None</em>, <em>shape=None</em>, <em>batch_ref=None</em>, <em>init_value=0.0</em>, <em>init_batch_dim_idx=0</em>, <em>ref_batch_dim_idx=1</em><span class="sig-paren">)</span></dt> <dd><table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first last simple"> <li><strong>init</strong> – boot memory, if not set, a shape, batch_ref must be provided</li> <li><strong>shape</strong> – shape of the boot memory</li> <li><strong>batch_ref</strong> – batch size reference variable</li> <li><strong>init_value</strong> – the init value of boot memory</li> <li><strong>init_batch_dim_idx</strong> – the index of batch size in init’s dimension</li> <li><strong>ref_batch_dim_idx</strong> – the index of batch size in batch_ref’s dimension</li> </ul> </td> </tr> </tbody> </table> </dd></dl> </dd></dl> </div> <div class="section" id="reorder-lod-tensor-by-rank"> <h3>reorder_lod_tensor_by_rank<a class="headerlink" href="#reorder-lod-tensor-by-rank" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">reorder_lod_tensor_by_rank</code><span class="sig-paren">(</span><em>x</em>, <em>rank_table</em><span class="sig-paren">)</span></dt> <dd><p>ReorderLoDTensorByRankTable operator.</p> <p>Input(X) is a batch of sequences. Input(RankTable) stores new orders of the input sequence batch. The reorder_lod_tensor_by_rank operator reorders the Input(X) according to the information provided by Input(RankTable).</p> <p>For example:</p> <p>If the indices stored in the Input(RankTable) are [3, 0, 2, 1], the Input(X) will be reordered that the fourth sequence in Input(X) will become the first one, and then followed by the original first, third, and the second one.</p> <p>This is: X = [Seq0, Seq1, Seq2, Seq3]. The indices in RankTable are [3, 0, 2, 1]. Out = [Seq3, Seq0, Seq2, Seq1] with a new LoD information.</p> <p>If the LoD information of Input(X) is empty, this means Input(X) is not sequence data. This is also identical to a batch of sequences where each sequence has a fixed length 1. In this case, the reorder_lod_tensor_by_rank operator reorders each slice of Input(X) along the first axis according to Input(RankTable).</p> <p>This is: X = [Slice0, Slice1, Slice2, Slice3] and its LoD information is empty. The indices in RankTable are [3, 0, 2, 1]. Out = [Slice3, Slice0, Slice2, Slice1] with no LoD information is appended.</p> <p>NOTE: This operator sorts Input(X) according to a given LoDRankTable which does not need to be calculated according to Input(X). It can be calculated according to another different sequence, and then this operator sorts Input(X) according to the given LoDRankTable.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (LoDTensor), the input lod tensor to be reordered according to Input(RankTable). Duplicable: False Optional: False</li> <li><strong>rank_table</strong> – (LoDRankTable), the rank table according to which Input(X) is reordered. Duplicable: False Optional: False</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">(LoDTensor), the reordered lod tensor.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="paralleldo"> <h3>ParallelDo<a class="headerlink" href="#paralleldo" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">ParallelDo</code><span class="sig-paren">(</span><em>places</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>ParallelDo class.</p> <p>ParallelDo class is used to create a ParallelDo.</p> </dd></dl> </div> <div class="section" id="print"> <h3>Print<a class="headerlink" href="#print" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">Print</code><span class="sig-paren">(</span><em>input</em>, <em>first_n=-1</em>, <em>message=None</em>, <em>summarize=-1</em>, <em>print_tensor_name=True</em>, <em>print_tensor_type=True</em>, <em>print_tensor_shape=True</em>, <em>print_tensor_lod=True</em>, <em>print_phase='both'</em><span class="sig-paren">)</span></dt> <dd><p><strong>Print operator</strong></p> <p>This creates a print op that will print when a tensor is accessed.</p> <p>Wraps the tensor passed in so that whenever that a tensor is accessed, the message <cite>message</cite> is printed, along with the current value of the tensor <cite>t</cite>.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – A Tensor to print.</li> <li><strong>summarize</strong> (<em>int</em>) – Print this number of elements in the tensor, will print all if left is negative.</li> <li><strong>message</strong> (<em>str</em>) – A string message to print as a prefix.</li> <li><strong>first_n</strong> (<em>int</em>) – Only log <cite>first_n</cite> number of times.</li> <li><strong>print_tensor_name</strong> (<em>bool</em>) – Print the tensor name.</li> <li><strong>print_tensor_type</strong> (<em>bool</em>) – Print the tensor type.</li> <li><strong>print_tensor_shape</strong> (<em>bool</em>) – Print the tensor shape.</li> <li><strong>print_tensor_lod</strong> (<em>bool</em>) – Print the tensor lod.</li> <li><strong>print_phase</strong> (<em>bool</em>) – Which phase to displace, including ‘forward’, ‘backward’ and ‘both’. If set to ‘backward’ or ‘both’, will print the gradients of input tensor.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">Output tensor, same data with input tensor.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span> </pre></div> </div> <p>value = some_layer(...) Print(value, summarize=10,</p> <blockquote> <div>message=”The content of some_layer: ”)</div></blockquote> </dd></dl> </div> </div> <div class="section" id="device"> <h2>device<a class="headerlink" href="#device" title="永久链接至标题">¶</a></h2> <div class="section" id="get-places"> <h3>get_places<a class="headerlink" href="#get-places" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">get_places</code><span class="sig-paren">(</span><em>device_count=None</em>, <em>device_type=None</em><span class="sig-paren">)</span></dt> <dd><p>Returns a list of places based on flags. The list will be used for parallel execution.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>device_count</strong> (<em>INT</em>) – device count</li> <li><strong>device_type</strong> (<em>STRING</em>) – device type</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">vector of Place</p> </td> </tr> </tbody> </table> </dd></dl> </div> </div> <div class="section" id="io"> <h2>io<a class="headerlink" href="#io" title="永久链接至标题">¶</a></h2> <div class="section" id="data"> <h3>data<a class="headerlink" href="#data" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">data</code><span class="sig-paren">(</span><em>name</em>, <em>shape</em>, <em>append_batch_size=True</em>, <em>dtype='float32'</em>, <em>lod_level=0</em>, <em>type=VarType.LOD_TENSOR</em>, <em>stop_gradient=True</em><span class="sig-paren">)</span></dt> <dd><p><strong>Data Layer</strong></p> <p>This function takes in the input and based on whether data has to be returned back as a minibatch, it creates the global variable by using the helper functions. The global variables can be accessed by all the following operators in the graph.</p> <p>All the input variables of this function are passed in as local variables to the LayerHelper constructor.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>name</strong> (<em>str</em>) – The name/alias of the function</li> <li><strong>shape</strong> (<em>list</em>) – Tuple declaring the shape.</li> <li><strong>append_batch_size</strong> (<em>bool</em>) – Whether or not to append the data as a batch.</li> <li><strong>dtype</strong> (<em>int|float</em>) – The type of data : float32, float_16, int etc</li> <li><strong>type</strong> (<em>VarType</em>) – The output type. By default it is LOD_TENSOR.</li> <li><strong>lod_level</strong> (<em>int</em>) – The LoD Level. 0 means the input data is not a sequence.</li> <li><strong>main_program</strong> (<em>Program</em>) – Name of the main program that calls this</li> <li><strong>startup_program</strong> (<em>Program</em>) – Name of the startup program</li> <li><strong>stop_gradient</strong> (<em>bool</em>) – A boolean that mentions whether gradient should flow.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The global variable that gives access to the data.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">784</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="blockguardserv"> <h3>BlockGuardServ<a class="headerlink" href="#blockguardserv" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">BlockGuardServ</code><span class="sig-paren">(</span><em>server</em><span class="sig-paren">)</span></dt> <dd><p>BlockGuardServ class.</p> <p>BlockGuardServ class is used to create an op with a block in a program.</p> </dd></dl> </div> <div class="section" id="listenandserv"> <h3>ListenAndServ<a class="headerlink" href="#listenandserv" title="永久链接至标题">¶</a></h3> <dl class="class"> <dt> <em class="property">class </em><code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">ListenAndServ</code><span class="sig-paren">(</span><em>endpoint</em>, <em>fan_in=1</em>, <em>optimizer_mode=True</em><span class="sig-paren">)</span></dt> <dd><p>ListenAndServ class.</p> <p>ListenAndServ class is used to wrap listen_and_serv op to create a server which can receive variables from clients and run a block.</p> </dd></dl> </div> <div class="section" id="send"> <h3>Send<a class="headerlink" href="#send" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">Send</code><span class="sig-paren">(</span><em>endpoints</em>, <em>send_vars</em>, <em>get_vars</em><span class="sig-paren">)</span></dt> <dd><p>Send layer</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first last simple"> <li><strong>endpoints</strong> – comma seperated IP:PORT pairs in the order of send_vars to send</li> <li><strong>send_vars</strong> – vars to send</li> <li><strong>get_vars</strong> – vars to get from server after send completes.</li> </ul> </td> </tr> </tbody> </table> <p>Send variables to the server side, and get vars from server side when server have finished running server side program.</p> </dd></dl> </div> </div> <div class="section" id="nn"> <h2>nn<a class="headerlink" href="#nn" title="永久链接至标题">¶</a></h2> <div class="section" id="fc"> <h3>fc<a class="headerlink" href="#fc" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">fc</code><span class="sig-paren">(</span><em>input</em>, <em>size</em>, <em>num_flatten_dims=1</em>, <em>param_attr=None</em>, <em>bias_attr=None</em>, <em>act=None</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p><strong>Fully Connected Layer</strong></p> <p>The fully connected layer can take multiple tensors as its inputs. It creates a variable (one for each input tensor) called weights for each input tensor, which represents a fully connected weight matrix from each input unit to each output unit. The fully connected layer multiplies each input tensor with its coresponding weight to produce an output Tensor. If multiple input tensors are given, the results of multiple multiplications will be sumed up. If bias_attr is not None, a biases variable will be created and added to the output. Finally, if activation is not None, it will be applied to the output as well.</p> <p>This process can be formulated as follows:</p> <div class="math"> \[Out = Act({\sum_{i=0}^{N-1}W_iX_i + b})\]</div> <p>In the above equation:</p> <ul class="simple"> <li><span class="math">\(N\)</span>: Number of the input.</li> <li><span class="math">\(X_i\)</span>: The input tensor.</li> <li><span class="math">\(W\)</span>: The weights created by this layer.</li> <li><span class="math">\(b\)</span>: The bias parameter created by this layer (if needed).</li> <li><span class="math">\(Act\)</span>: The activation funtion.</li> <li><span class="math">\(Out\)</span>: The output tensor.</li> </ul> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable|list</em>) – The input tensor(s) to the fully connected layer.</li> <li><strong>size</strong> (<em>int</em>) – The number of output units in the fully connected layer.</li> <li><strong>num_flatten_dims</strong> (<em>int</em>) – The fc layer can accept an input tensor with more than two dimensions. If this happens, the multidimensional tensor will first be flattened into a 2-dimensional matrix. The parameter <cite>num_flatten_dims</cite> determines how the input tensor is flattened: the first <cite>num_flatten_dims</cite> (inclusive, index starts from 1) dimensions will be flatten to form the first dimension of the final matrix (height of the matrix), and the rest <cite>rank(X) - num_flatten_dims</cite> dimensions are flattened to form the second dimension of the final matrix (width of the matrix). For example, suppose <cite>X</cite> is a 6-dimensional tensor with a shape [2, 3, 4, 5, 6], and <cite>num_flatten_dims</cite> = 3. Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. By default, <cite>num_flatten_dims</cite> is set to 1.</li> <li><strong>param_attr</strong> (<em>ParamAttr|list</em>) – The parameter attribute for learnable parameters/weights of the fully connected layer.</li> <li><strong>param_initializer</strong> (<em>ParamAttr|list</em>) – The initializer used for the weight/parameter. If set None, XavierInitializer() will be used.</li> <li><strong>bias_attr</strong> (<em>ParamAttr|list</em>) – The parameter attribute for the bias parameter for this layer. If set None, no bias will be added to the output units.</li> <li><strong>bias_initializer</strong> (<em>ParamAttr|list</em>) – The initializer used for the bias. If set None, then ConstantInitializer() will be used.</li> <li><strong>act</strong> (<em>str</em>) – Activation to be applied to the output of the fully connected layer.</li> <li><strong>name</strong> (<em>str</em>) – Name/alias of the fully connected layer.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The output tensor variable.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first">Variable</p> </td> </tr> <tr class="field-even field"><th class="field-name">Raises:</th><td class="field-body"><p class="first last"><code class="xref py py-exc docutils literal"><span class="pre">ValueError</span></code> – If rank of the input tensor is less than 2.</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s2">"data"</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s2">"float32"</span><span class="p">)</span> <span class="n">fc</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">data</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="mi">1000</span><span class="p">,</span> <span class="n">act</span><span class="o">=</span><span class="s2">"tanh"</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="embedding"> <h3>embedding<a class="headerlink" href="#embedding" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">embedding</code><span class="sig-paren">(</span><em>input</em>, <em>size</em>, <em>is_sparse=False</em>, <em>padding_idx=None</em>, <em>param_attr=None</em>, <em>dtype='float32'</em><span class="sig-paren">)</span></dt> <dd><p><strong>Embedding Layer</strong></p> <p>This layer is used to lookup embeddings of IDs, provided by <code class="xref py py-attr docutils literal"><span class="pre">input</span></code>, in a lookup table. The result of this lookup is the embedding of each ID in the <code class="xref py py-attr docutils literal"><span class="pre">input</span></code>.</p> <p>All the input variables are passed in as local variables to the LayerHelper constructor.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The tensor variable containing the IDs.</li> <li><strong>size</strong> (<em>tuple|list</em>) – The shape of the look up table parameter. It should have two elements which indicate the size of the dictionary of embeddings and the size of each embedding vector respectively.</li> <li><strong>is_sparse</strong> (<em>bool</em>) – The flag indicating whether to use sparse update.</li> <li><strong>padding_idx</strong> (<em>int|long|None</em>) – If <code class="xref py py-attr docutils literal"><span class="pre">None</span></code>, it makes no effect to lookup. Otherwise the given <code class="xref py py-attr docutils literal"><span class="pre">padding_idx</span></code> indicates padding the output with zeros whenever lookup encounters it in <code class="xref py py-attr docutils literal"><span class="pre">input</span></code>. If <span class="math">\(padding_idx < 0\)</span>, the padding_idx to use in lookup is <span class="math">\(size[0] + dim\)</span>.</li> <li><strong>param_attr</strong> (<em>ParamAttr</em>) – Parameters for this layer</li> <li><strong>dtype</strong> (<em>np.dtype|core.DataType|str</em>) – The type of data : float32, float_16, int etc</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The tensor variable storing the embeddings of the supplied inputs.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">dict_size</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">dataset</span><span class="o">.</span><span class="n">ids</span><span class="p">)</span> <span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'ids'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">fc</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">embedding</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">data</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="p">[</span><span class="n">dict_size</span><span class="p">,</span> <span class="mi">16</span><span class="p">])</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="dynamic-lstm"> <h3>dynamic_lstm<a class="headerlink" href="#dynamic-lstm" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">dynamic_lstm</code><span class="sig-paren">(</span><em>input</em>, <em>size</em>, <em>param_attr=None</em>, <em>bias_attr=None</em>, <em>use_peepholes=True</em>, <em>is_reverse=False</em>, <em>gate_activation='sigmoid'</em>, <em>cell_activation='tanh'</em>, <em>candidate_activation='tanh'</em>, <em>dtype='float32'</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p><strong>Dynamic LSTM Layer</strong></p> <p>The defalut implementation is diagonal/peephole connection (<a class="reference external" href="https://arxiv.org/pdf/1402.1128.pdf">https://arxiv.org/pdf/1402.1128.pdf</a>), the formula is as follows:</p> <div class="math"> \[ \begin{align}\begin{aligned}i_t & = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i)\\f_t & = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f)\\\tilde{c_t} & = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c)\\o_t & = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o)\\c_t & = f_t \odot c_{t-1} + i_t \odot \tilde{c_t}\\h_t & = o_t \odot act_h(c_t)\end{aligned}\end{align} \]</div> <p>where the <span class="math">\(W\)</span> terms denote weight matrices (e.g. <span class="math">\(W_{xi}\)</span> is the matrix of weights from the input gate to the input), <span class="math">\(W_{ic}, W_{fc}, W_{oc}\)</span> are diagonal weight matrices for peephole connections. In our implementation, we use vectors to reprenset these diagonal weight matrices. The <span class="math">\(b\)</span> terms denote bias vectors (<span class="math">\(b_i\)</span> is the input gate bias vector), <span class="math">\(\sigma\)</span> is the non-linear activations, such as logistic sigmoid function, and <span class="math">\(i, f, o\)</span> and <span class="math">\(c\)</span> are the input gate, forget gate, output gate, and cell activation vectors, respectively, all of which have the same size as the cell output activation vector <span class="math">\(h\)</span>.</p> <p>The <span class="math">\(\odot\)</span> is the element-wise product of the vectors. <span class="math">\(act_g\)</span> and <span class="math">\(act_h\)</span> are the cell input and cell output activation functions and <cite>tanh</cite> is usually used for them. <span class="math">\(\tilde{c_t}\)</span> is also called candidate hidden state, which is computed based on the current input and the previous hidden state.</p> <p>Set <cite>use_peepholes</cite> to <cite>False</cite> to disable peephole connection. The formula is omitted here, please refer to the paper <a class="reference external" href="http://www.bioinf.jku.at/publications/older/2604.pdf">http://www.bioinf.jku.at/publications/older/2604.pdf</a> for details.</p> <p>Note that these <span class="math">\(W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}\)</span> operations on the input <span class="math">\(x_{t}\)</span> are NOT included in this operator. Users can choose to use fully-connect layer before LSTM layer.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input of dynamic_lstm layer, which supports variable-time length input sequence. The underlying tensor in this Variable is a matrix with shape (T X 4D), where T is the total time steps in this mini-batch, D is the hidden size.</li> <li><strong>size</strong> (<em>int</em>) – 4 * hidden size.</li> <li><strong>param_attr</strong> (<em>ParamAttr|None</em>) – <p>The parameter attribute for the learnable hidden-hidden weights.</p> <ul> <li>Weights = {<span class="math">\(W_{ch}, W_{ih}, W_{fh}, W_{oh}\)</span>}</li> <li>The shape is (D x 4D), where D is the hidden size.</li> </ul> </li> <li><strong>bias_attr</strong> (<em>ParamAttr|None</em>) – <p>The bias attribute for the learnable bias weights, which contains two parts, input-hidden bias weights and peephole connections weights if setting <cite>use_peepholes</cite> to <cite>True</cite>.</p> <ol class="arabic"> <li><cite>use_peepholes = False</cite></li> </ol> <blockquote> <div><ul> <li>Biases = {<span class="math">\(b_c, b_i, b_f, b_o\)</span>}.</li> <li>The shape is (1 x 4D).</li> </ul> </div></blockquote> <ol class="arabic" start="2"> <li><cite>use_peepholes = True</cite></li> </ol> <blockquote> <div><ul> <li>Biases = { <span class="math">\(b_c, b_i, b_f, b_o, W_{ic}, W_{fc}, W_{oc}\)</span>}.</li> <li>The shape is (1 x 7D).</li> </ul> </div></blockquote> </li> <li><strong>use_peepholes</strong> (<em>bool</em>) – Whether to enable diagonal/peephole connections, default <cite>True</cite>.</li> <li><strong>is_reverse</strong> (<em>bool</em>) – Whether to compute reversed LSTM, default <cite>False</cite>.</li> <li><strong>gate_activation</strong> (<em>str</em>) – The activation for input gate, forget gate and output gate. Choices = [“sigmoid”, “tanh”, “relu”, “identity”], default “sigmoid”.</li> <li><strong>cell_activation</strong> (<em>str</em>) – The activation for cell output. Choices = [“sigmoid”, “tanh”, “relu”, “identity”], default “tanh”.</li> <li><strong>candidate_activation</strong> (<em>str</em>) – The activation for candidate hidden state. Choices = [“sigmoid”, “tanh”, “relu”, “identity”], default “tanh”.</li> <li><strong>dtype</strong> (<em>str</em>) – Data type. Choices = [“float32”, “float64”], default “float32”.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The hidden state, and cell state of LSTM. The shape of both is (T x D), and lod is the same with the <cite>input</cite>.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">tuple</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">hidden_dim</span> <span class="o">=</span> <span class="mi">512</span> <span class="n">forward_proj</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">input_seq</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="n">hidden_dim</span> <span class="o">*</span> <span class="mi">4</span><span class="p">,</span> <span class="n">act</span><span class="o">=</span><span class="bp">None</span><span class="p">,</span> <span class="n">bias_attr</span><span class="o">=</span><span class="bp">None</span><span class="p">)</span> <span class="n">forward</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">dynamic_lstm</span><span class="p">(</span> <span class="nb">input</span><span class="o">=</span><span class="n">forward_proj</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="n">hidden_dim</span> <span class="o">*</span> <span class="mi">4</span><span class="p">,</span> <span class="n">use_peepholes</span><span class="o">=</span><span class="bp">False</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="dynamic-lstmp"> <h3>dynamic_lstmp<a class="headerlink" href="#dynamic-lstmp" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">dynamic_lstmp</code><span class="sig-paren">(</span><em>input</em>, <em>size</em>, <em>proj_size</em>, <em>param_attr=None</em>, <em>bias_attr=None</em>, <em>use_peepholes=True</em>, <em>is_reverse=False</em>, <em>gate_activation='sigmoid'</em>, <em>cell_activation='tanh'</em>, <em>candidate_activation='tanh'</em>, <em>proj_activation='tanh'</em>, <em>dtype='float32'</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p><strong>Dynamic LSTMP Layer</strong></p> <p>LSTMP (LSTM with recurrent projection) layer has a separate projection layer after the LSTM layer, projecting the original hidden state to a lower-dimensional one, which is proposed to reduce the number of total parameters and furthermore computational complexity for the LSTM, espeacially for the case that the size of output units is relative large (<a class="reference external" href="https://research.google.com/pubs/archive/43905.pdf">https://research.google.com/pubs/archive/43905.pdf</a>).</p> <p>The formula is as follows:</p> <div class="math"> \[ \begin{align}\begin{aligned}i_t & = \sigma(W_{ix}x_{t} + W_{ir}r_{t-1} + W_{ic}c_{t-1} + b_i)\\f_t & = \sigma(W_{fx}x_{t} + W_{fr}r_{t-1} + W_{fc}c_{t-1} + b_f)\\\tilde{c_t} & = act_g(W_{cx}x_t + W_{cr}r_{t-1} + b_c)\\o_t & = \sigma(W_{ox}x_{t} + W_{or}r_{t-1} + W_{oc}c_t + b_o)\\c_t & = f_t \odot c_{t-1} + i_t \odot \tilde{c_t}\\h_t & = o_t \odot act_h(c_t)\\r_t & = \overline{act_h}(W_{rh}h_t)\end{aligned}\end{align} \]</div> <p>In the above formula:</p> <ul class="simple"> <li><span class="math">\(W\)</span>: Denotes weight matrices (e.g. <span class="math">\(W_{xi}\)</span> is the matrix of weights from the input gate to the input).</li> <li><span class="math">\(W_{ic}\)</span>, <span class="math">\(W_{fc}\)</span>, <span class="math">\(W_{oc}\)</span>: Diagonal weight matrices for peephole connections. In our implementation, we use vectors to reprenset these diagonal weight matrices.</li> <li><span class="math">\(b\)</span>: Denotes bias vectors (e.g. <span class="math">\(b_i\)</span> is the input gate bias vector).</li> <li><span class="math">\(\sigma\)</span>: The activation, such as logistic sigmoid function.</li> <li><span class="math">\(i, f, o\)</span> and <span class="math">\(c\)</span>: The input gate, forget gate, output gate, and cell activation vectors, respectively, all of which have the same size as the cell output activation vector <span class="math">\(h\)</span>.</li> <li><span class="math">\(h\)</span>: The hidden state.</li> <li><span class="math">\(r\)</span>: The recurrent projection of the hidden state.</li> <li><span class="math">\(\tilde{c_t}\)</span>: The candidate hidden state, whose computation is based on the current input and previous hidden state.</li> <li><span class="math">\(\odot\)</span>: The element-wise product of the vectors.</li> <li><span class="math">\(act_g\)</span> and <span class="math">\(act_h\)</span>: The cell input and cell output activation functions and <cite>tanh</cite> is usually used for them.</li> <li><span class="math">\(\overline{act_h}\)</span>: The activation function for the projection output, usually using <cite>identity</cite> or same as <span class="math">\(act_h\)</span>.</li> </ul> <p>Set <cite>use_peepholes</cite> to <cite>False</cite> to disable peephole connection. The formula is omitted here, please refer to the paper <a class="reference external" href="http://www.bioinf.jku.at/publications/older/2604.pdf">http://www.bioinf.jku.at/publications/older/2604.pdf</a> for details.</p> <p>Note that these <span class="math">\(W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}\)</span> operations on the input <span class="math">\(x_{t}\)</span> are NOT included in this operator. Users can choose to use fully-connected layer before LSTMP layer.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input of dynamic_lstmp layer, which supports variable-time length input sequence. The underlying tensor in this Variable is a matrix with shape (T X 4D), where T is the total time steps in this mini-batch, D is the hidden size.</li> <li><strong>size</strong> (<em>int</em>) – 4 * hidden size.</li> <li><strong>proj_size</strong> (<em>int</em>) – The size of projection output.</li> <li><strong>param_attr</strong> (<em>ParamAttr|None</em>) – <p>The parameter attribute for the learnable hidden-hidden weight and projection weight.</p> <ul> <li>Hidden-hidden weight = {<span class="math">\(W_{ch}, W_{ih}, W_{fh}, W_{oh}\)</span>}.</li> <li>The shape of hidden-hidden weight is (P x 4D), where P is the projection size and D the hidden size.</li> <li>Projection weight = {<span class="math">\(W_{rh}\)</span>}.</li> <li>The shape of projection weight is (D x P).</li> </ul> </li> <li><strong>bias_attr</strong> (<em>ParamAttr|None</em>) – <p>The bias attribute for the learnable bias weights, which contains two parts, input-hidden bias weights and peephole connections weights if setting <cite>use_peepholes</cite> to <cite>True</cite>.</p> <ol class="arabic"> <li><cite>use_peepholes = False</cite></li> </ol> <blockquote> <div><ul> <li>Biases = {<span class="math">\(b_c, b_i, b_f, b_o\)</span>}.</li> <li>The shape is (1 x 4D).</li> </ul> </div></blockquote> <ol class="arabic" start="2"> <li><cite>use_peepholes = True</cite></li> </ol> <blockquote> <div><ul> <li>Biases = { <span class="math">\(b_c, b_i, b_f, b_o, W_{ic}, W_{fc}, W_{oc}\)</span>}.</li> <li>The shape is (1 x 7D).</li> </ul> </div></blockquote> </li> <li><strong>use_peepholes</strong> (<em>bool</em>) – Whether to enable diagonal/peephole connections, default <cite>True</cite>.</li> <li><strong>is_reverse</strong> (<em>bool</em>) – Whether to compute reversed LSTM, default <cite>False</cite>.</li> <li><strong>gate_activation</strong> (<em>str</em>) – The activation for input gate, forget gate and output gate. Choices = [“sigmoid”, “tanh”, “relu”, “identity”], default “sigmoid”.</li> <li><strong>cell_activation</strong> (<em>str</em>) – The activation for cell output. Choices = [“sigmoid”, “tanh”, “relu”, “identity”], default “tanh”.</li> <li><strong>candidate_activation</strong> (<em>str</em>) – The activation for candidate hidden state. Choices = [“sigmoid”, “tanh”, “relu”, “identity”], default “tanh”.</li> <li><strong>proj_activation</strong> (<em>str</em>) – The activation for projection output. Choices = [“sigmoid”, “tanh”, “relu”, “identity”], default “tanh”.</li> <li><strong>dtype</strong> (<em>str</em>) – Data type. Choices = [“float32”, “float64”], default “float32”.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The projection of hidden state, and cell state of LSTMP. The shape of projection is (T x P), for the cell state which is (T x D), and both LoD is the same with the <cite>input</cite>.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">tuple</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">hidden_dim</span><span class="p">,</span> <span class="n">proj_dim</span> <span class="o">=</span> <span class="mi">512</span><span class="p">,</span> <span class="mi">256</span> <span class="n">fc_out</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">input_seq</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="n">hidden_dim</span> <span class="o">*</span> <span class="mi">4</span><span class="p">,</span> <span class="n">act</span><span class="o">=</span><span class="bp">None</span><span class="p">,</span> <span class="n">bias_attr</span><span class="o">=</span><span class="bp">None</span><span class="p">)</span> <span class="n">proj_out</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">dynamic_lstmp</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">fc_out</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="n">hidden_dim</span> <span class="o">*</span> <span class="mi">4</span><span class="p">,</span> <span class="n">proj_size</span><span class="o">=</span><span class="n">proj_dim</span><span class="p">,</span> <span class="n">use_peepholes</span><span class="o">=</span><span class="bp">False</span><span class="p">,</span> <span class="n">is_reverse</span><span class="o">=</span><span class="bp">True</span><span class="p">,</span> <span class="n">cell_activation</span><span class="o">=</span><span class="s2">"tanh"</span><span class="p">,</span> <span class="n">proj_activation</span><span class="o">=</span><span class="s2">"tanh"</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="dynamic-gru"> <h3>dynamic_gru<a class="headerlink" href="#dynamic-gru" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">dynamic_gru</code><span class="sig-paren">(</span><em>input</em>, <em>size</em>, <em>param_attr=None</em>, <em>bias_attr=None</em>, <em>is_reverse=False</em>, <em>gate_activation='sigmoid'</em>, <em>candidate_activation='tanh'</em>, <em>h_0=None</em><span class="sig-paren">)</span></dt> <dd><p><strong>Dynamic GRU Layer</strong></p> <p>Refer to <a class="reference external" href="https://arxiv.org/abs/1412.3555">Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling</a></p> <p>The formula is as follows:</p> <div class="math"> \[ \begin{align}\begin{aligned}u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\\tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\h_t & = (1-u_t) \odot h_{t-1} + u_t \odot \tilde{h_t}\end{aligned}\end{align} \]</div> <p>The <span class="math">\(\odot\)</span> is the element-wise product of the vectors. <span class="math">\(act_g\)</span> is the update gate and reset gate activation function and <span class="math">\(sigmoid\)</span> is usually used for it. <span class="math">\(act_c\)</span> is the activation function for candidate hidden state and <span class="math">\(tanh\)</span> is usually used for it.</p> <p>Note that these <span class="math">\(W_{ux}x_{t}, W_{rx}x_{t}, W_{cx}x_{t}\)</span> operations on the input <span class="math">\(x_{t}\)</span> are NOT included in this operator. Users can choose to use fully-connect layer before GRU layer.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input of dynamic_gru layer, which supports variable-time length input sequence. The underlying tensor in this Variable is a matrix with shape <span class="math">\((T \times 3D)\)</span>, where <span class="math">\(T\)</span> is the total time steps in this mini-batch, <span class="math">\(D\)</span> is the hidden size.</li> <li><strong>size</strong> (<em>int</em>) – The dimension of the gru cell.</li> <li><strong>param_attr</strong> (<em>ParamAttr|None</em>) – <p>The parameter attribute for the learnable hidden-hidden weight matrix. Note:</p> <ul> <li>The shape of the weight matrix is <span class="math">\((T \times 3D)\)</span>, where <span class="math">\(D\)</span> is the hidden size.</li> <li>All elements in the weight matrix can be divided into two parts. The first part are weights of the update gate and reset gate with shape <span class="math">\((D \times 2D)\)</span>, and the second part are weights for candidate hidden state with shape <span class="math">\((D \times D)\)</span>.</li> </ul> </li> <li><strong>bias_attr</strong> (<em>ParamAttr</em>) – The parameter attribute for learnable the hidden-hidden bias.</li> <li><strong>is_reverse</strong> (<em>bool</em>) – Whether to compute reversed GRU, default <code class="xref py py-attr docutils literal"><span class="pre">False</span></code>.</li> <li><strong>gate_activation</strong> (<em>str</em>) – The activation for update gate and reset gate. Choices = [“sigmoid”, “tanh”, “relu”, “identity”], default “sigmoid”.</li> <li><strong>activation</strong> (<em>str</em>) – The activation for candidate hidden state. Choices = [“sigmoid”, “tanh”, “relu”, “identity”], default “tanh”.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The hidden state of GRU. The shape is (T times D), and lod is the same with the input.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">hidden_dim</span> <span class="o">=</span> <span class="mi">512</span> <span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">data</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="n">hidden_dim</span> <span class="o">*</span> <span class="mi">3</span><span class="p">)</span> <span class="n">hidden</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">dynamic_gru</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="n">hidden_dim</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="gru-unit"> <h3>gru_unit<a class="headerlink" href="#gru-unit" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">gru_unit</code><span class="sig-paren">(</span><em>input</em>, <em>hidden</em>, <em>size</em>, <em>weight=None</em>, <em>bias=None</em>, <em>activation='tanh'</em>, <em>gate_activation='sigmoid'</em><span class="sig-paren">)</span></dt> <dd><p>GRU unit layer. The equation of a gru step is:</p> <blockquote> <div><div class="math"> \[ \begin{align}\begin{aligned}u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)\\r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)\\m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)\\h_t & = dot((1-u_t), m_t) + dot(u_t, h_{t-1})\end{aligned}\end{align} \]</div> </div></blockquote> <p>The inputs of gru unit includes <span class="math">\(z_t\)</span>, <span class="math">\(h_{t-1}\)</span>. In terms of the equation above, the <span class="math">\(z_t\)</span> is split into 3 parts - <span class="math">\(xu_t\)</span>, <span class="math">\(xr_t\)</span> and <span class="math">\(xm_t\)</span>. This means that in order to implement a full GRU unit operator for an input, a fully connected layer has to be applied, such that <span class="math">\(z_t = W_{fc}x_t\)</span>.</p> <p>The terms <span class="math">\(u_t\)</span> and <span class="math">\(r_t\)</span> represent the update and reset gates of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is an intermediate candidate hidden output, which is denoted by <span class="math">\(m_t\)</span>. This layer has three outputs <span class="math">\(h_t\)</span>, <span class="math">\(dot(r_t, h_{t-1})\)</span> and concatenation of <span class="math">\(u_t\)</span>, <span class="math">\(r_t\)</span> and <span class="math">\(m_t\)</span>.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The fc transformed input value of current step.</li> <li><strong>hidden</strong> (<em>Variable</em>) – The hidden value of lstm unit from previous step.</li> <li><strong>size</strong> (<em>integer</em>) – The input dimension value.</li> <li><strong>weight</strong> (<em>ParamAttr</em>) – The weight parameters for gru unit. Default: None</li> <li><strong>bias</strong> (<em>ParamAttr</em>) – The bias parameters for gru unit. Default: None</li> <li><strong>activation</strong> (<em>string</em>) – The activation type for cell (actNode). Default: ‘tanh’</li> <li><strong>gate_activation</strong> (<em>string</em>) – The activation type for gates (actGate). Default: ‘sigmoid’</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The hidden value, reset-hidden value and gate values.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">tuple</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="c1"># assuming we have x_t_data and prev_hidden of size=10</span> <span class="n">x_t</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x_t_data</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="mi">30</span><span class="p">)</span> <span class="n">hidden_val</span><span class="p">,</span> <span class="n">r_h_val</span><span class="p">,</span> <span class="n">gate_val</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">gru_unit</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x_t</span><span class="p">,</span> <span class="n">hidden</span> <span class="o">=</span> <span class="n">prev_hidden</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="linear-chain-crf"> <h3>linear_chain_crf<a class="headerlink" href="#linear-chain-crf" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">linear_chain_crf</code><span class="sig-paren">(</span><em>input</em>, <em>label</em>, <em>param_attr=None</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="crf-decoding"> <h3>crf_decoding<a class="headerlink" href="#crf-decoding" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">crf_decoding</code><span class="sig-paren">(</span><em>input</em>, <em>param_attr</em>, <em>label=None</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="cos-sim"> <h3>cos_sim<a class="headerlink" href="#cos-sim" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">cos_sim</code><span class="sig-paren">(</span><em>X</em>, <em>Y</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>This function performs the cosine similarity between two tensors X and Y and returns that as the output.</p> </dd></dl> </div> <div class="section" id="cross-entropy"> <h3>cross_entropy<a class="headerlink" href="#cross-entropy" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">cross_entropy</code><span class="sig-paren">(</span><em>input</em>, <em>label</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p><strong>Cross Entropy Layer</strong></p> <p>This layer computes the cross entropy between <cite>input</cite> and <cite>label</cite>. It supports both standard cross-entropy and soft-label cross-entropy loss computation.</p> <ol class="arabic"> <li><dl class="first docutils"> <dt>One-hot cross-entropy:</dt> <dd><p class="first"><cite>soft_label = False</cite>, <cite>Label[i, 0]</cite> indicates the class index for sample i:</p> <div class="last math"> \[Y[i] = -\log(X[i, Label[i]])\]</div> </dd> </dl> </li> <li><dl class="first docutils"> <dt>Soft-label cross-entropy:</dt> <dd><p class="first"><cite>soft_label = True</cite>, <cite>Label[i, j]</cite> indicates the soft label of class j for sample i:</p> <div class="last math"> \[Y[i] = \sum_j{-Label[i, j] * log(X[i, j])}\]</div> </dd> </dl> <p>Please make sure that in this case the summation of each row of <cite>label</cite> equals one.</p> </li> <li><dl class="first docutils"> <dt>One-hot cross-entropy with vecterized <cite>label</cite>:</dt> <dd><p class="first last">As a special case of 2), when each row of ‘label’ has only one non-zero element which is equal to 1, soft-label cross-entropy degenerates to a one-hot cross-entropy with one-hot label representation.</p> </dd> </dl> </li> </ol> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable|list</em>) – a 2-D tensor with shape [N x D], where N is the batch size and D is the number of classes. This input is a probability computed by the previous operator, which is almost always the result of a softmax operator.</li> <li><strong>label</strong> (<em>Variable|list</em>) – the ground truth which is a 2-D tensor. When <cite>soft_label</cite> is set to <cite>False</cite>, <cite>label</cite> is a tensor<int64> with shape [N x 1]. When <cite>soft_label</cite> is set to <cite>True</cite>, <cite>label</cite> is a tensor<float/double> with shape [N x D].</li> <li><strong>soft_label</strong> (bool, via <cite>**kwargs</cite>) – a flag indicating whether to interpretate the given labels as soft labels, default <cite>False</cite>.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">A 2-D tensor with shape [N x 1], the cross entropy loss.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">Raises:</th><td class="field-body"><p class="first"><cite>ValueError</cite> – 1) the 1st dimension of <cite>input</cite> and <cite>label</cite> are not equal. 2) when <cite>soft_label == True</cite>, and the 2nd dimension of</p> <blockquote> <div><p><cite>input</cite> and <cite>label</cite> are not equal.</p> </div></blockquote> <ol class="last arabic simple" start="3"> <li>when <cite>soft_label == False</cite>, and the 2nd dimension of <cite>label</cite> is not 1.</li> </ol> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">predict</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">net</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="n">classdim</span><span class="p">,</span> <span class="n">act</span><span class="o">=</span><span class="s1">'softmax'</span><span class="p">)</span> <span class="n">cost</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">cross_entropy</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">predict</span><span class="p">,</span> <span class="n">label</span><span class="o">=</span><span class="n">label</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="square-error-cost"> <h3>square_error_cost<a class="headerlink" href="#square-error-cost" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">square_error_cost</code><span class="sig-paren">(</span><em>input</em>, <em>label</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p><strong>Square error cost layer</strong></p> <p>This layer accepts input predictions and target label and returns the squared error cost.</p> <p>For predictions, <span class="math">\(X\)</span>, and target labels, <span class="math">\(Y\)</span>, the equation is:</p> <div class="math"> \[Out = (X - Y)^2\]</div> <p>In the above equation:</p> <blockquote> <div><ul class="simple"> <li><span class="math">\(X\)</span>: Input predictions, a tensor.</li> <li><span class="math">\(Y\)</span>: Input labels, a tensor.</li> <li><span class="math">\(Out\)</span>: Output value, same shape with <span class="math">\(X\)</span>.</li> </ul> </div></blockquote> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – Input tensor, has predictions.</li> <li><strong>label</strong> (<em>Variable</em>) – Label tensor, has target labels.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first"><dl class="docutils"> <dt>The tensor variable storing the element-wise squared error</dt> <dd><p class="first last">difference of input and label.</p> </dd> </dl> </p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">y</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'y'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">y_predict</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'y_predict'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">cost</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">square_error_cost</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">y_predict</span><span class="p">,</span> <span class="n">label</span><span class="o">=</span><span class="n">y</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="accuracy"> <h3>accuracy<a class="headerlink" href="#accuracy" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">accuracy</code><span class="sig-paren">(</span><em>input</em>, <em>label</em>, <em>k=1</em>, <em>correct=None</em>, <em>total=None</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>This function computes the accuracy using the input and label. The output is the top_k inputs and their indices.</p> </dd></dl> </div> <div class="section" id="chunk-eval"> <h3>chunk_eval<a class="headerlink" href="#chunk-eval" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">chunk_eval</code><span class="sig-paren">(</span><em>input</em>, <em>label</em>, <em>chunk_scheme</em>, <em>num_chunk_types</em>, <em>excluded_chunk_types=None</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>This function computes and outputs the precision, recall and F1-score of chunk detection.</p> </dd></dl> </div> <div class="section" id="sequence-conv"> <h3>sequence_conv<a class="headerlink" href="#sequence-conv" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sequence_conv</code><span class="sig-paren">(</span><em>input</em>, <em>num_filters</em>, <em>filter_size=3</em>, <em>filter_stride=1</em>, <em>padding=None</em>, <em>bias_attr=None</em>, <em>param_attr=None</em>, <em>act=None</em><span class="sig-paren">)</span></dt> <dd><p>This function creates the op for sequence_conv, using the inputs and other convolutional configurations for the filters and stride as given in the input parameters to the function.</p> </dd></dl> </div> <div class="section" id="conv2d"> <h3>conv2d<a class="headerlink" href="#conv2d" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">conv2d</code><span class="sig-paren">(</span><em>input</em>, <em>num_filters</em>, <em>filter_size</em>, <em>stride=None</em>, <em>padding=None</em>, <em>groups=None</em>, <em>param_attr=None</em>, <em>bias_attr=None</em>, <em>use_cudnn=True</em>, <em>act=None</em><span class="sig-paren">)</span></dt> <dd><p><strong>Convlution2D Layer</strong></p> <p>The convolution2D layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input(Input) and Output(Output) are in NCHW format. Where N is batch size, C is the number of channels, H is the height of the feature, and W is the width of the feature. The details of convolution layer, please refer UFLDL’s <a class="reference external" href="http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/">convolution,</a> . If bias attribution and activation type are provided, bias is added to the output of the convolution, and the corresponding activation function is applied to the final result.</p> <p>For each input <span class="math">\(X\)</span>, the equation is:</p> <div class="math"> \[Out = \sigma (W \ast X + b)\]</div> <p>In the above equation:</p> <ul class="simple"> <li><span class="math">\(X\)</span>: Input value, a tensor with NCHW format.</li> <li><span class="math">\(W\)</span>: Filter value, a tensor with MCHW format.</li> <li><span class="math">\(\ast\)</span>: Convolution operation.</li> <li><span class="math">\(b\)</span>: Bias value, a 2-D tensor with shape [M, 1].</li> <li><span class="math">\(\sigma\)</span>: Activation function.</li> <li><dl class="first docutils"> <dt><span class="math">\(Out\)</span>: Output value, the shape of <span class="math">\(Out\)</span> and <span class="math">\(X\)</span> may be</dt> <dd>different.</dd> </dl> </li> </ul> <p class="rubric">Example</p> <ul> <li><p class="first">Input:</p> <p>Input shape: $(N, C_{in}, H_{in}, W_{in})$</p> <p>Filter shape: $(C_{out}, C_{in}, H_f, W_f)$</p> </li> <li><p class="first">Output: Output shape: $(N, C_{out}, H_{out}, W_{out})$</p> </li> </ul> <p>Where</p> <div class="math"> \[\]</div> <p>H_{out}&= frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \ W_{out}&= frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input image with [N, C, H, W] format.</li> <li><strong>num_filters</strong> (<em>int</em>) – The number of filter. It is as same as the output image channel.</li> <li><strong>filter_size</strong> (<em>int|tuple|None</em>) – The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise, the filter will be a square.</li> <li><strong>stride</strong> (<em>int|tuple</em>) – The stride size. If stride is a tuple, it must contain two integers, (stride_H, stride_W). Otherwise, the stride_H = stride_W = stride. Default: stride = 1.</li> <li><strong>padding</strong> (<em>int|tuple</em>) – The padding size. If padding is a tuple, it must contain two integers, (padding_H, padding_W). Otherwise, the padding_H = padding_W = padding. Default: padding = 0.</li> <li><strong>groups</strong> (<em>int</em>) – The groups number of the Conv2d Layer. According to grouped convolution in Alex Krizhevsky’s Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups=1</li> <li><strong>param_attr</strong> (<em>ParamAttr</em>) – The parameters to the Conv2d Layer. Default: None</li> <li><strong>bias_attr</strong> (<em>ParamAttr</em>) – Bias parameter for the Conv2d layer. Default: None</li> <li><strong>use_cudnn</strong> (<em>bool</em>) – Use cudnn kernel or not, it is valid only when the cudnn library is installed. Default: True</li> <li><strong>act</strong> (<em>str</em>) – Activation type. Default: None</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first"><dl class="docutils"> <dt>The tensor variable storing the convolution and</dt> <dd><p class="first last">non-linearity activation result.</p> </dd> </dl> </p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first">Variable</p> </td> </tr> <tr class="field-even field"><th class="field-name">Raises:</th><td class="field-body"><p class="first last"><code class="xref py py-exc docutils literal"><span class="pre">ValueError</span></code> – If the shapes of input, filter_size, stride, padding and groups mismatch.</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span> <span class="n">name</span><span class="o">=</span><span class="s1">'data'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">3</span><span class="p">,</span> <span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">conv2d</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">conv2d</span><span class="p">(</span> <span class="nb">input</span><span class="o">=</span><span class="n">data</span><span class="p">,</span> <span class="n">num_filters</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">filter_size</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">act</span><span class="o">=</span><span class="s2">"relu"</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="sequence-pool"> <h3>sequence_pool<a class="headerlink" href="#sequence-pool" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sequence_pool</code><span class="sig-paren">(</span><em>input</em>, <em>pool_type</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>This function add the operator for sequence pooling. It pools features of all time-steps of each instance, and is applied on top of the input using pool_type mentioned in the parameters.</p> <p>It supports four pool_type:</p> <ul class="simple"> <li>average: <span class="math">\(Out[i] = \frac{\sum_i X_i}{N}\)</span></li> <li>sum: <span class="math">\(Out[i] = \sum_jX_{ij}\)</span></li> <li>sqrt: <span class="math">\(Out[i] = \frac{\sum_jX_{ij}}{\sqrt{len(X_i)}}\)</span></li> <li>max: <span class="math">\(Out[i] = max(X_i)\)</span></li> </ul> <div class="highlight-text"><div class="highlight"><pre><span></span>x is a 1-level LoDTensor: x.lod = [[0, 2, 5, 7]] x.data = [1, 3, 2, 4, 6, 5, 1] x.dims = [7, 1] then output is a Tensor: out.dim = [3, 1] with condition len(x.lod[-1]) - 1 == out.dims[0] for different pool_type: average: out.data = [2, 4, 3], where 2=(1+3)/2, 4=(2+4+6)/3, 3=(5+1)/2 sum : out.data = [4, 12, 6], where 4=1+3, 12=2+4+6, 6=5+1 sqrt : out.data = [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2), 6.93=(2+4+6)/sqrt(3), 4.24=(5+1)/sqrt(2) max : out.data = [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1) </pre></div> </div> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>variable</em>) – The input variable which is a LoDTensor.</li> <li><strong>pool_type</strong> (<em>string</em>) – The pooling type of sequence_pool. It supports average, sum, sqrt and max.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">The sequence pooling variable which is a Tensor.</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">7</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">,</span> <span class="n">lod_level</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">avg_x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">sequence_pool</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">pool_type</span><span class="o">=</span><span class="s1">'average'</span><span class="p">)</span> <span class="n">sum_x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">sequence_pool</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">pool_type</span><span class="o">=</span><span class="s1">'sum'</span><span class="p">)</span> <span class="n">sqrt_x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">sequence_pool</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">pool_type</span><span class="o">=</span><span class="s1">'sqrt'</span><span class="p">)</span> <span class="n">max_x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">sequence_pool</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">pool_type</span><span class="o">=</span><span class="s1">'max'</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="pool2d"> <h3>pool2d<a class="headerlink" href="#pool2d" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">pool2d</code><span class="sig-paren">(</span><em>input</em>, <em>pool_size</em>, <em>pool_type</em>, <em>pool_stride=None</em>, <em>pool_padding=None</em>, <em>global_pooling=False</em>, <em>use_cudnn=True</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>This function adds the operator for pooling in 2 dimensions, using the pooling configurations mentioned in input parameters.</p> </dd></dl> </div> <div class="section" id="batch-norm"> <h3>batch_norm<a class="headerlink" href="#batch-norm" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">batch_norm</code><span class="sig-paren">(</span><em>input</em>, <em>act=None</em>, <em>is_test=False</em>, <em>momentum=0.9</em>, <em>epsilon=1e-05</em>, <em>param_attr=None</em>, <em>bias_attr=None</em>, <em>data_layout='NCHW'</em>, <em>name=None</em>, <em>moving_mean_name=None</em>, <em>moving_variance_name=None</em><span class="sig-paren">)</span></dt> <dd><p>This function helps create an operator to implement the BatchNorm layer using the configurations from the input parameters.</p> </dd></dl> </div> <div class="section" id="beam-search-decode"> <h3>beam_search_decode<a class="headerlink" href="#beam-search-decode" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">beam_search_decode</code><span class="sig-paren">(</span><em>ids</em>, <em>scores</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="conv2d-transpose"> <h3>conv2d_transpose<a class="headerlink" href="#conv2d-transpose" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">conv2d_transpose</code><span class="sig-paren">(</span><em>input</em>, <em>num_filters</em>, <em>output_size=None</em>, <em>filter_size=None</em>, <em>padding=None</em>, <em>stride=None</em>, <em>dilation=None</em>, <em>param_attr=None</em>, <em>use_cudnn=True</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p><strong>Convlution2D transpose layer</strong></p> <p>The convolution2D transpose layer calculates the output based on the input, filter, and dilations, strides, paddings. Input(Input) and output(Output) are in NCHW format. Where N is batch size, C is the number of channels, H is the height of the feature, and W is the width of the feature. Parameters(dilations, strides, paddings) are two elements. These two elements represent height and width, respectively. The details of convolution transpose layer, please refer to the following explanation and references <a class="reference external" href="http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf">therein</a>.</p> <p>For each input <span class="math">\(X\)</span>, the equation is:</p> <div class="math"> \[Out = W \ast X\]</div> <p>In the above equation:</p> <ul class="simple"> <li><span class="math">\(X\)</span>: Input value, a tensor with NCHW format.</li> <li><span class="math">\(W\)</span>: Filter value, a tensor with MCHW format.</li> <li><span class="math">\(\ast\)</span> : Convolution transpose operation.</li> <li><dl class="first docutils"> <dt><span class="math">\(Out\)</span>: Output value, the shape of <span class="math">\(Out\)</span> and <span class="math">\(X\)</span> may be</dt> <dd>different.</dd> </dl> </li> </ul> <p class="rubric">Example</p> <ul> <li><p class="first">Input:</p> <p>Input shape: $(N, C_{in}, H_{in}, W_{in})$</p> <p>Filter shape: $(C_{in}, C_{out}, H_f, W_f)$</p> </li> <li><p class="first">Output:</p> <p>Output shape: $(N, C_{out}, H_{out}, W_{out})$</p> </li> </ul> <p>Where</p> <div class="math"> \[\begin{split}H_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\ W_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1\end{split}\]</div> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input image with [N, C, H, W] format.</li> <li><strong>num_filters</strong> (<em>int</em>) – The number of the filter. It is as same as the output image channel.</li> <li><strong>output_size</strong> (<em>int|tuple|None</em>) – The output image size. If output size is a tuple, it must contain two integers, (image_H, image_W). This parameter only works when filter_size is None.</li> <li><strong>filter_size</strong> (<em>int|tuple|None</em>) – The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise, the filter will be a square. None if use output size to calculate filter_size.</li> <li><strong>padding</strong> (<em>int|tuple</em>) – The padding size. If padding is a tuple, it must contain two integers, (padding_H, padding_W). Otherwise, the padding_H = padding_W = padding. Default: padding = 0.</li> <li><strong>stride</strong> (<em>int|tuple</em>) – The stride size. If stride is a tuple, it must contain two integers, (stride_H, stride_W). Otherwise, the stride_H = stride_W = stride. Default: stride = 1.</li> <li><strong>dilation</strong> (<em>int|tuple</em>) – The dilation size. If dilation is a tuple, it must contain two integers, (dilation_H, dilation_W). Otherwise, the dilation_H = dilation_W = dilation. Default: dilation = 1.</li> <li><strong>param_attr</strong> (<em>ParamAttr</em>) – The parameters to the Conv2d_transpose Layer. Default: None</li> <li><strong>use_cudnn</strong> (<em>bool</em>) – Use cudnn kernel or not, it is valid only when the cudnn library is installed. Default: True</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The tensor variable storing the convolution transpose result.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first">Variable</p> </td> </tr> <tr class="field-even field"><th class="field-name">Raises:</th><td class="field-body"><p class="first last"><code class="xref py py-exc docutils literal"><span class="pre">ValueError</span></code> – If the shapes of input, filter_size, stride, padding and groups mismatch.</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span> <span class="n">name</span><span class="o">=</span><span class="s1">'data'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">3</span><span class="p">,</span> <span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">conv2d_transpose</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">conv2d_transpose</span><span class="p">(</span> <span class="nb">input</span><span class="o">=</span><span class="n">data</span><span class="p">,</span> <span class="n">num_filters</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">filter_size</span><span class="o">=</span><span class="mi">3</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="sequence-expand"> <h3>sequence_expand<a class="headerlink" href="#sequence-expand" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sequence_expand</code><span class="sig-paren">(</span><em>x</em>, <em>y</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>Sequence Expand Layer. This layer will expand the input variable <strong>x</strong> according to LoD information of <strong>y</strong>. And the following examples will explain how sequence_expand works:</p> <div class="highlight-text"><div class="highlight"><pre><span></span>* Case 1 x is a LoDTensor: x.lod = [[0, 2, 3], [0, 1, 3, 4]] x.data = [a, b, c, d] x.dims = [4, 1] y is a LoDTensor: y.lod = [[0, 2, 4], [0, 3, 6, 7, 8]] with condition len(y.lod[-1]) - 1 == x.dims[0] then output is a 2-level LoDTensor: out.lod = [[0, 2, 4], [0, 3, 6, 7, 8]] out.data = [a, a, a, b, b, b, c, d] out.dims = [8, 1] * Case 2 x is a Tensor: x.data = [a, b, c] x.dims = [3, 1] y is a LoDTensor: y.lod = [[0, 2, 3, 6]] with condition len(y.lod[-1]) - 1 == x.dims[0] then output is a 1-level LoDTensor: out.lod = [[0, 2, 3, 6]] out.data = [a, a, b, c, c, c] out.dims = [6, 1] </pre></div> </div> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>Variable</em>) – The input variable which is a Tensor or LoDTensor.</li> <li><strong>y</strong> (<em>Variable</em>) – The input variable which is a LoDTensor.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The expanded variable which is a LoDTensor.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">10</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">y</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'y'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">10</span><span class="p">,</span> <span class="mi">20</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">,</span> <span class="n">lod_level</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">out</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">sequence_expand</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="o">=</span><span class="n">y</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="lstm-unit"> <h3>lstm_unit<a class="headerlink" href="#lstm-unit" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">lstm_unit</code><span class="sig-paren">(</span><em>x_t</em>, <em>hidden_t_prev</em>, <em>cell_t_prev</em>, <em>forget_bias=0.0</em>, <em>param_attr=None</em>, <em>bias_attr=None</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>Lstm unit layer. The equation of a lstm step is:</p> <blockquote> <div><div class="math"> \[ \begin{align}\begin{aligned}i_t & = \sigma(W_{x_i}x_{t} + W_{h_i}h_{t-1} + b_i)\\f_t & = \sigma(W_{x_f}x_{t} + W_{h_f}h_{t-1} + b_f)\\c_t & = f_tc_{t-1} + i_t tanh (W_{x_c}x_t + W_{h_c}h_{t-1} + b_c)\\o_t & = \sigma(W_{x_o}x_{t} + W_{h_o}h_{t-1} + b_o)\\h_t & = o_t tanh(c_t)\end{aligned}\end{align} \]</div> </div></blockquote> <p>The inputs of lstm unit include <span class="math">\(x_t\)</span>, <span class="math">\(h_{t-1}\)</span> and <span class="math">\(c_{t-1}\)</span>. The 2nd dimensions of <span class="math">\(h_{t-1}\)</span> and <span class="math">\(c_{t-1}\)</span> should be same. The implementation separates the linear transformation and non-linear transformation apart. Here, we take <span class="math">\(i_t\)</span> as an example. The linear transformation is applied by calling a <cite>fc</cite> layer and the equation is:</p> <blockquote> <div><div class="math"> \[L_{i_t} = W_{x_i}x_{t} + W_{h_i}h_{t-1} + b_i\]</div> </div></blockquote> <p>The non-linear transformation is applied by calling <cite>lstm_unit_op</cite> and the equation is:</p> <blockquote> <div><div class="math"> \[i_t = \sigma(L_{i_t})\]</div> </div></blockquote> <p>This layer has two outputs including <span class="math">\(h_t\)</span> and <span class="math">\(o_t\)</span>.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x_t</strong> (<em>Variable</em>) – The input value of current step, a 2-D tensor with shape M x N, M for batch size and N for input size.</li> <li><strong>hidden_t_prev</strong> (<em>Variable</em>) – The hidden value of lstm unit, a 2-D tensor with shape M x S, M for batch size and S for size of lstm unit.</li> <li><strong>cell_t_prev</strong> (<em>Variable</em>) – The cell value of lstm unit, a 2-D tensor with shape M x S, M for batch size and S for size of lstm unit.</li> <li><strong>forget_bias</strong> (<em>float</em>) – The forget bias of lstm unit.</li> <li><strong>param_attr</strong> (<em>ParamAttr</em>) – The attributes of parameter weights, used to set initializer, name etc.</li> <li><strong>bias_attr</strong> (<em>ParamAttr</em>) – The attributes of bias weights, if not False, bias weights will be created and be set to default value.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The hidden value and cell value of lstm unit.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first">tuple</p> </td> </tr> <tr class="field-even field"><th class="field-name">Raises:</th><td class="field-body"><p class="first last"><code class="xref py py-exc docutils literal"><span class="pre">ValueError</span></code> – The ranks of <strong>x_t</strong>, <strong>hidden_t_prev</strong> and <strong>cell_t_prev</strong> not be 2 or the 1st dimensions of <strong>x_t</strong>, <strong>hidden_t_prev</strong> and <strong>cell_t_prev</strong> not be the same or the 2nd dimensions of <strong>hidden_t_prev</strong> and <strong>cell_t_prev</strong> not be the same.</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x_t</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x_t_data</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span> <span class="n">prev_hidden</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">prev_hidden_data</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="mi">30</span><span class="p">)</span> <span class="n">prev_cell</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">prev_cell_data</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="mi">30</span><span class="p">)</span> <span class="n">hidden_value</span><span class="p">,</span> <span class="n">cell_value</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">lstm_unit</span><span class="p">(</span><span class="n">x_t</span><span class="o">=</span><span class="n">x_t</span><span class="p">,</span> <span class="n">hidden_t_prev</span><span class="o">=</span><span class="n">prev_hidden</span><span class="p">,</span> <span class="n">cell_t_prev</span><span class="o">=</span><span class="n">prev_cell</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="reduce-sum"> <h3>reduce_sum<a class="headerlink" href="#reduce-sum" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">reduce_sum</code><span class="sig-paren">(</span><em>input</em>, <em>dim=None</em>, <em>keep_dim=False</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>Computes the sum of tensor elements over the given dimension.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input variable which is a Tensor or LoDTensor.</li> <li><strong>dim</strong> (<em>int|None</em>) – The dimension along which the sum is performed. If <code class="xref py py-attr docutils literal"><span class="pre">None</span></code>, sum all elements of <code class="xref py py-attr docutils literal"><span class="pre">input</span></code> and return a Tensor variable with a single element, otherwise must be in the range <span class="math">\([-rank(input), rank(input))\)</span>. If <span class="math">\(dim < 0\)</span>, the dimension to reduce is <span class="math">\(rank + dim\)</span>.</li> <li><strong>keep_dim</strong> (<em>bool</em>) – Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the <code class="xref py py-attr docutils literal"><span class="pre">input</span></code> unless <code class="xref py py-attr docutils literal"><span class="pre">keep_dim</span></code> is true.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The reduced Tensor variable.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="c1"># x is a Tensor variable with following elements:</span> <span class="c1"># [[0.2, 0.3, 0.5, 0.9]</span> <span class="c1"># [0.1, 0.2, 0.6, 0.7]]</span> <span class="c1"># Each example is followed by the correspending output tensor.</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="c1"># [3.5]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="c1"># [0.3, 0.5, 1.1, 1.6]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span> <span class="c1"># [1.9, 1.6]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">keep_dim</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span> <span class="c1"># [[1.9], [1.6]]</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="reduce-mean"> <h3>reduce_mean<a class="headerlink" href="#reduce-mean" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">reduce_mean</code><span class="sig-paren">(</span><em>input</em>, <em>dim=None</em>, <em>keep_dim=False</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>Computes the mean of tensor elements over the given dimension.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input variable which is a Tensor or LoDTensor.</li> <li><strong>dim</strong> (<em>int|None</em>) – The dimension along which the mean is computed. If <code class="xref py py-attr docutils literal"><span class="pre">None</span></code>, compute the mean over all elements of <code class="xref py py-attr docutils literal"><span class="pre">input</span></code> and return a Tensor variable with a single element, otherwise must be in the range <span class="math">\([-rank(input), rank(input))\)</span>. If <span class="math">\(dim < 0\)</span>, the dimension to reduce is <span class="math">\(rank + dim\)</span>.</li> <li><strong>keep_dim</strong> (<em>bool</em>) – Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the <code class="xref py py-attr docutils literal"><span class="pre">input</span></code> unless <code class="xref py py-attr docutils literal"><span class="pre">keep_dim</span></code> is true.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The reduced Tensor variable.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="c1"># x is a Tensor variable with following elements:</span> <span class="c1"># [[0.2, 0.3, 0.5, 0.9]</span> <span class="c1"># [0.1, 0.2, 0.6, 0.7]]</span> <span class="c1"># Each example is followed by the correspending output tensor.</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_mean</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="c1"># [0.4375]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_mean</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="c1"># [0.15, 0.25, 0.55, 0.8]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_mean</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span> <span class="c1"># [0.475, 0.4]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_mean</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">keep_dim</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span> <span class="c1"># [[0.475], [0.4]]</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="reduce-max"> <h3>reduce_max<a class="headerlink" href="#reduce-max" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">reduce_max</code><span class="sig-paren">(</span><em>input</em>, <em>dim=None</em>, <em>keep_dim=False</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>Computes the maximum of tensor elements over the given dimension.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input variable which is a Tensor or LoDTensor.</li> <li><strong>dim</strong> (<em>int|None</em>) – The dimension along which the maximum is computed. If <code class="xref py py-attr docutils literal"><span class="pre">None</span></code>, compute the maximum over all elements of <code class="xref py py-attr docutils literal"><span class="pre">input</span></code> and return a Tensor variable with a single element, otherwise must be in the range <span class="math">\([-rank(input), rank(input))\)</span>. If <span class="math">\(dim < 0\)</span>, the dimension to reduce is <span class="math">\(rank + dim\)</span>.</li> <li><strong>keep_dim</strong> (<em>bool</em>) – Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the <code class="xref py py-attr docutils literal"><span class="pre">input</span></code> unless <code class="xref py py-attr docutils literal"><span class="pre">keep_dim</span></code> is true.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The reduced Tensor variable.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="c1"># x is a Tensor variable with following elements:</span> <span class="c1"># [[0.2, 0.3, 0.5, 0.9]</span> <span class="c1"># [0.1, 0.2, 0.6, 0.7]]</span> <span class="c1"># Each example is followed by the correspending output tensor.</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_max</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="c1"># [0.9]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_max</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="c1"># [0.2, 0.3, 0.6, 0.9]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_max</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span> <span class="c1"># [0.9, 0.7]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_max</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">keep_dim</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span> <span class="c1"># [[0.9], [0.7]]</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="reduce-min"> <h3>reduce_min<a class="headerlink" href="#reduce-min" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">reduce_min</code><span class="sig-paren">(</span><em>input</em>, <em>dim=None</em>, <em>keep_dim=False</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>Computes the minimum of tensor elements over the given dimension.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input variable which is a Tensor or LoDTensor.</li> <li><strong>dim</strong> (<em>int|None</em>) – The dimension along which the minimum is computed. If <code class="xref py py-attr docutils literal"><span class="pre">None</span></code>, compute the minimum over all elements of <code class="xref py py-attr docutils literal"><span class="pre">input</span></code> and return a Tensor variable with a single element, otherwise must be in the range <span class="math">\([-rank(input), rank(input))\)</span>. If <span class="math">\(dim < 0\)</span>, the dimension to reduce is <span class="math">\(rank + dim\)</span>.</li> <li><strong>keep_dim</strong> (<em>bool</em>) – Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the <code class="xref py py-attr docutils literal"><span class="pre">input</span></code> unless <code class="xref py py-attr docutils literal"><span class="pre">keep_dim</span></code> is true.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The reduced Tensor variable.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="c1"># x is a Tensor variable with following elements:</span> <span class="c1"># [[0.2, 0.3, 0.5, 0.9]</span> <span class="c1"># [0.1, 0.2, 0.6, 0.7]]</span> <span class="c1"># Each example is followed by the correspending output tensor.</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_min</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="c1"># [0.1]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_min</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="c1"># [0.1, 0.2, 0.5, 0.7]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_min</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span> <span class="c1"># [0.2, 0.1]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">reduce_min</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">keep_dim</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span> <span class="c1"># [[0.2], [0.1]]</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="sequence-first-step"> <h3>sequence_first_step<a class="headerlink" href="#sequence-first-step" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sequence_first_step</code><span class="sig-paren">(</span><em>input</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>This funciton get the first step of sequence.</p> <div class="highlight-text"><div class="highlight"><pre><span></span>x is a 1-level LoDTensor: x.lod = [[0, 2, 5, 7]] x.data = [1, 3, 2, 4, 6, 5, 1] x.dims = [7, 1] then output is a Tensor: out.dim = [3, 1] with condition len(x.lod[-1]) - 1 == out.dims[0] out.data = [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1) </pre></div> </div> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>input</strong> (<em>variable</em>) – The input variable which is a LoDTensor.</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">The sequence’s first step variable which is a Tensor.</td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">7</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">,</span> <span class="n">lod_level</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">x_first_step</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">sequence_first_step</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="sequence-last-step"> <h3>sequence_last_step<a class="headerlink" href="#sequence-last-step" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sequence_last_step</code><span class="sig-paren">(</span><em>input</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>This funciton get the last step of sequence.</p> <div class="highlight-text"><div class="highlight"><pre><span></span>x is a 1-level LoDTensor: x.lod = [[0, 2, 5, 7]] x.data = [1, 3, 2, 4, 6, 5, 1] x.dims = [7, 1] then output is a Tensor: out.dim = [3, 1] with condition len(x.lod[-1]) - 1 == out.dims[0] out.data = [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1) </pre></div> </div> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>input</strong> (<em>variable</em>) – The input variable which is a LoDTensor.</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">The sequence’s last step variable which is a Tensor.</td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">7</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">,</span> <span class="n">lod_level</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">x_last_step</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">sequence_last_step</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="dropout"> <h3>dropout<a class="headerlink" href="#dropout" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">dropout</code><span class="sig-paren">(</span><em>x</em>, <em>dropout_prob</em>, <em>is_test=False</em>, <em>seed=None</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Computes dropout.</p> <p>Drop or keep each element of <cite>x</cite> independently. Dropout is a regularization technique for reducing overfitting by preventing neuron co-adaption during training. The dropout operator randomly set (according to the given dropout probability) the outputs of some units to zero, while others are remain unchanged.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>variable</em>) – The input tensor.</li> <li><strong>dropout_prob</strong> (<em>float</em>) – Probability of setting units to zero.</li> <li><strong>is_test</strong> (<em>bool</em>) – A flag indicating whether it is in test phrase or not.</li> <li><strong>seed</strong> (<em>int</em>) – A Python integer used to create random seeds. If this parameter is set to None, a random seed is used. NOTE: If an integer seed is given, always the same output units will be dropped. DO NOT use a fixed seed in training.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">A tensor variable.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s2">"data"</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s2">"float32"</span><span class="p">)</span> <span class="n">droped</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">dropout</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">dropout_rate</span><span class="o">=</span><span class="mf">0.5</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="split"> <h3>split<a class="headerlink" href="#split" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">split</code><span class="sig-paren">(</span><em>input</em>, <em>num_or_sections</em>, <em>dim=-1</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>Split the input tensor into multiple sub-tensors.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input variable which is a Tensor or LoDTensor.</li> <li><strong>num_or_sections</strong> (<em>int|list</em>) – If <code class="xref py py-attr docutils literal"><span class="pre">num_or_sections</span></code> is an integer, then the integer indicates the number of equal sized sub-tensors that the tensor will be divided into. If <code class="xref py py-attr docutils literal"><span class="pre">num_or_sections</span></code> is a list of integers, the length of list indicates the number of sub-tensors and the integers indicate the sizes of sub-tensors’ <code class="xref py py-attr docutils literal"><span class="pre">dim</span></code> dimension orderly.</li> <li><strong>dim</strong> (<em>int</em>) – The dimension along which to split. If <span class="math">\(dim < 0\)</span>, the dimension to split along is <span class="math">\(rank(input) + dim\)</span>.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The list of segmented tensor variables.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">List</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="c1"># x is a Tensor variable with shape [3, 9, 5]:</span> <span class="n">x0</span><span class="p">,</span> <span class="n">x1</span><span class="p">,</span> <span class="n">x2</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">num_or_sections</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">x0</span><span class="o">.</span><span class="n">shape</span> <span class="c1"># [3, 3, 5]</span> <span class="n">x1</span><span class="o">.</span><span class="n">shape</span> <span class="c1"># [3, 3, 5]</span> <span class="n">x2</span><span class="o">.</span><span class="n">shape</span> <span class="c1"># [3, 3, 5]</span> <span class="n">x0</span><span class="p">,</span> <span class="n">x1</span><span class="p">,</span> <span class="n">x2</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">num_or_sections</span><span class="o">=</span><span class="p">[</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">],</span> <span class="n">dim</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">x0</span><span class="o">.</span><span class="n">shape</span> <span class="c1"># [3, 2, 5]</span> <span class="n">x1</span><span class="o">.</span><span class="n">shape</span> <span class="c1"># [3, 3, 5]</span> <span class="n">x2</span><span class="o">.</span><span class="n">shape</span> <span class="c1"># [3, 4, 5]</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="ctc-greedy-decoder"> <h3>ctc_greedy_decoder<a class="headerlink" href="#ctc-greedy-decoder" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">ctc_greedy_decoder</code><span class="sig-paren">(</span><em>input</em>, <em>blank</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>This op is used to decode sequences by greedy policy by below steps: 1. Get the indexes of max value for each row in input. a.k.a.</p> <blockquote> <div>numpy.argmax(input, axis=0).</div></blockquote> <ol class="arabic simple" start="2"> <li>For each sequence in result of step1, merge repeated tokens between two blanks and delete all blanks.</li> </ol> <p>A simple example as below:</p> <div class="highlight-text"><div class="highlight"><pre><span></span>Given: input.data = [[0.6, 0.1, 0.3, 0.1], [0.3, 0.2, 0.4, 0.1], [0.1, 0.5, 0.1, 0.3], [0.5, 0.1, 0.3, 0.1], [0.5, 0.1, 0.3, 0.1], [0.2, 0.2, 0.2, 0.4], [0.2, 0.2, 0.1, 0.5], [0.5, 0.1, 0.3, 0.1]] input.lod = [[0, 4, 8]] Then: output.data = [[2], [1], [3]] output.lod = [[0, 2, 3]] </pre></div> </div> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – (LoDTensor<float>), the probabilities of variable-length sequences, which is a 2-D Tensor with LoD information. It’s shape is [Lp, num_classes + 1], where Lp is the sum of all input sequences’ length and num_classes is the true number of classes. (not including the blank label).</li> <li><strong>blank</strong> (<em>int</em>) – the blank label index of Connectionist Temporal Classification (CTC) loss, which is in thehalf-opened interval [0, num_classes + 1).</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">CTC greedy decode result.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">8</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">cost</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">ctc_greedy_decoder</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">blank</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="edit-distance"> <h3>edit_distance<a class="headerlink" href="#edit-distance" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">edit_distance</code><span class="sig-paren">(</span><em>input</em>, <em>label</em>, <em>normalized=False</em>, <em>ignored_tokens=None</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>EditDistance operator computes the edit distances between a batch of hypothesis strings and their references. Edit distance, also called Levenshtein distance, measures how dissimilar two strings are by counting the minimum number of operations to transform one string into anthor. Here the operations include insertion, deletion, and substitution.</p> <p>For example, given hypothesis string A = “kitten” and reference B = “sitting”, the edit distance is 3 for A will be transformed into B at least after two substitutions and one insertion:</p> <p>“kitten” -> “sitten” -> “sittin” -> “sitting”</p> <p>Input(Hyps) is a LoDTensor consisting of all the hypothesis strings with the total number denoted by <cite>batch_size</cite>, and the separation is specified by the LoD information. And the <cite>batch_size</cite> reference strings are arranged in order in the same way in the LoDTensor Input(Refs).</p> <p>Output(Out) contains the <cite>batch_size</cite> results and each stands for the edit distance for a pair of strings respectively. If Attr(normalized) is true, the edit distance will be divided by the length of reference string.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The indices for hypothesis strings.</li> <li><strong>label</strong> (<em>Variable</em>) – The indices for reference strings.</li> <li><strong>normalized</strong> (<em>bool</em>) – Indicated whether to normalize the edit distance by the length of reference string.</li> <li><strong>ignored_tokens</strong> (<em>list of int</em>) – Tokens that should be removed before calculating edit distance.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">sequence-to-sequence edit distance in shape [batch_size, 1].</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">8</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">y</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'y'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">7</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">cost</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">edit_distance</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span><span class="n">label</span><span class="o">=</span><span class="n">y</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="l2-normalize"> <h3>l2_normalize<a class="headerlink" href="#l2-normalize" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">l2_normalize</code><span class="sig-paren">(</span><em>x</em>, <em>axis</em>, <em>epsilon=1e-12</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p><strong>L2 normalize Layer</strong></p> <p>The l2 normalize layer normalizes <cite>x</cite> along dimension <cite>axis</cite> using an L2 norm. For a 1-D tensor (<cite>dim</cite> is fixed to 0), this layer computes</p> <p>output = x / sqrt(max(sum(x**2), epsilon))</p> <p>For <cite>x</cite> with more dimensions, this layer independently normalizes each 1-D slice along dimension <cite>axis</cite>.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>Variable|list</em>) – The input tensor to l2_normalize layer.</li> <li><strong>axis</strong> (<em>int</em>) – Dimension along which to normalize the input.</li> <li><strong>epsilon</strong> (<em>float</em>) – A lower bound value for <cite>x</cite>‘s l2 norm. sqrt(epsilon) will be used as the divisor if the l2 norm of <cite>x</cite> is less than sqrt(epsilon).</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The output tensor variable.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s2">"data"</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">17</span><span class="p">,</span> <span class="mi">13</span><span class="p">),</span> <span class="n">dtype</span><span class="o">=</span><span class="s2">"float32"</span><span class="p">)</span> <span class="n">normed</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">l2_normalize</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="n">data</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="matmul"> <h3>matmul<a class="headerlink" href="#matmul" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">matmul</code><span class="sig-paren">(</span><em>x</em>, <em>y</em>, <em>transpose_x=False</em>, <em>transpose_y=False</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>Applies matrix multiplication to two tensors.</p> <p>Currently, the input tensors’ rank can be any, but when the rank of any inputs is bigger than 3, this two inputs’ rank should be equal.</p> <p>The actual behavior depends on the shapes of <span class="math">\(x\)</span>, <span class="math">\(y\)</span> and the flag values of <code class="xref py py-attr docutils literal"><span class="pre">transpose_x</span></code>, <code class="xref py py-attr docutils literal"><span class="pre">transpose_y</span></code>. Specifically:</p> <ul class="simple"> <li>If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is rank-1 of shape <span class="math">\([D]\)</span>, then for <span class="math">\(x\)</span> it is treated as <span class="math">\([1, D]\)</span> in nontransposed form and as <span class="math">\([D, 1]\)</span> in transposed form, whereas for <span class="math">\(y\)</span> it is the opposite: It is treated as <span class="math">\([D, 1]\)</span> in nontransposed form and as <span class="math">\([1, D]\)</span> in transposed form.</li> <li>After transpose, the two tensors are 2-D or n-D and matrix multiplication performs in the following way.<ul> <li>If both are 2-D, they are multiplied like conventional matrices.</li> <li>If either is n-D, it is treated as a stack of matrices residing in the last two dimensions and a batched matrix multiply supporting broadcast applies on the two tensors.</li> </ul> </li> </ul> <p>Also note that if the raw tensor <span class="math">\(x\)</span> or <span class="math">\(y\)</span> is rank-1 and nontransposed, the prepended or appended dimension <span class="math">\(1\)</span> will be removed after matrix multiplication.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> (<em>Variable</em>) – The input variable which is a Tensor or LoDTensor.</li> <li><strong>y</strong> (<em>Variable</em>) – The input variable which is a Tensor or LoDTensor.</li> <li><strong>transpose_x</strong> (<em>bool</em>) – Whether to transpose <span class="math">\(x\)</span> before multiplication.</li> <li><strong>transpose_y</strong> (<em>bool</em>) – Whether to transpose <span class="math">\(y\)</span> before multiplication.</li> <li><strong>name</strong> (<em>str|None</em>) – A name for this layer(optional). If set None, the layer will be named automatically.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The product Tensor variable.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="c1"># Examples to clarify shapes of the inputs and output</span> <span class="c1"># x: [B, ..., M, K], y: [B, ..., K, N]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span> <span class="c1"># out: [B, ..., M, N]</span> <span class="c1"># x: [B, M, K], y: [B, K, N]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span> <span class="c1"># out: [B, M, N]</span> <span class="c1"># x: [B, M, K], y: [K, N]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span> <span class="c1"># out: [B, M, N]</span> <span class="c1"># x: [M, K], y: [K, N]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span> <span class="c1"># out: [M, N]</span> <span class="c1"># x: [B, M, K], y: [K]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span> <span class="c1"># out: [B, M]</span> <span class="c1"># x: [K], y: [K]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span> <span class="c1"># out: [1]</span> <span class="c1"># x: [M], y: [N]</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="bp">True</span><span class="p">,</span> <span class="bp">True</span><span class="p">)</span> <span class="c1"># out: [M, N]</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="warpctc"> <h3>warpctc<a class="headerlink" href="#warpctc" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">warpctc</code><span class="sig-paren">(</span><em>input</em>, <em>label</em>, <em>blank=0</em>, <em>norm_by_times=False</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>An operator integrating the open source Warp-CTC library (<a class="reference external" href="https://github.com/baidu-research/warp-ctc">https://github.com/baidu-research/warp-ctc</a>) to compute Connectionist Temporal Classification (CTC) loss. It can be aliased as softmax with CTC, since a native softmax activation is interated to the Warp-CTC library, to to normlize values for each row of the input tensor.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – (LodTensor, default: LoDTensor<float>), the unscaled probabilities of variable-length sequences, which is a 2-D Tensor with LoD information. It’s shape is [Lp, num_classes + 1], where Lp is the sum of all input sequences’ length and num_classes is the true number of classes. (not including the blank label).</li> <li><strong>label</strong> (<em>Variable</em>) – (LodTensor, default: LoDTensor<int>), the ground truth of variable-length sequence, which is a 2-D Tensor with LoD information. It is of the shape [Lg, 1], where Lg is th sum of all labels’ length.</li> <li><strong>blank</strong> – (int, default: 0), the blank label index of Connectionist Temporal Classification (CTC) loss, which is in the half-opened interval [0, num_classes + 1).</li> <li><strong>norm_by_times</strong> – (bool, default: false), whether to normalize</li> <li><strong>gradients by the number of time-step</strong><strong>, </strong><strong>which is also the</strong> (<em>the</em>) – </li> <li><strong>length. There is no need to normalize the gradients</strong> (<em>sequence's</em>) – </li> <li><strong>warpctc layer was follewed by a mean_op.</strong> (<em>if</em>) – </li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The Connectionist Temporal Classification (CTC) loss, which is a 2-D Tensor of the shape [batch_size, 1].</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> </dd></dl> </div> <div class="section" id="sequence-reshape"> <h3>sequence_reshape<a class="headerlink" href="#sequence-reshape" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sequence_reshape</code><span class="sig-paren">(</span><em>input</em>, <em>new_dim</em><span class="sig-paren">)</span></dt> <dd><p><strong>Sequence Reshape Layer</strong></p> <p>This layer will rearrange the input sequences. The new dimension is set by user. Length of each sequence is computed according to original length, original dimension and new dimension. The following example will help to illustrate the function of this layer:</p> <div class="highlight-text"><div class="highlight"><pre><span></span>x is a LoDTensor: x.lod = [[0, 2, 6]] x.data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]] x.dims = [6, 2] set new_dim = 4 then out is a LoDTensor: out.lod = [[0, 1, 3]] out.data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] out.dims = [3, 4] </pre></div> </div> <p>Currently, only 1-level LoDTensor is supported and please make sure (original length * original dimension) can be divided by new dimension with no remainder for each sequence.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – (LodTensor, default: LoDTensor<float>), a 2-D LoDTensor with shape being [N, M] where M for dimension.</li> <li><strong>new_dim</strong> (<em>int</em>) – New dimension which the input LoDTensor is reshaped to.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">Reshaped LoDTensor according to new dimension.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">5</span><span class="p">,</span> <span class="mi">20</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">,</span> <span class="n">lod_level</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">x_reshaped</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">sequence_reshape</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">new_dim</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="transpose"> <h3>transpose<a class="headerlink" href="#transpose" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">transpose</code><span class="sig-paren">(</span><em>x</em>, <em>perm</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p><strong>transpose Layer</strong></p> <p>Permute the dimensions of <cite>input</cite> according to <cite>perm</cite>.</p> <p>The <cite>i</cite>-th dimension of the returned tensor will correspond to the perm[i]-th dimension of <cite>input</cite>.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – (Tensor), A Tensor.</li> <li><strong>perm</strong> (<em>list</em>) – A permutation of the dimensions of <cite>input</cite>.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">A transposed Tensor.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">5</span><span class="p">,</span> <span class="mi">10</span><span class="p">,</span> <span class="mi">15</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">x_transposed</span> <span class="o">=</span> <span class="n">layers</span><span class="o">.</span><span class="n">transpose</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">perm</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="im2sequence"> <h3>im2sequence<a class="headerlink" href="#im2sequence" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">im2sequence</code><span class="sig-paren">(</span><em>input</em>, <em>filter_size=1</em>, <em>stride=1</em>, <em>padding=0</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd><p>Extracts image patches from the input tensor to form a tensor of shape {input.batch_size * output_height * output_width, filter_size_H * filter_size_W * input.channels} which is similar with im2col. This op use filter / kernel to scan images and convert these images to sequences. After expanding, the number of time step are output_height * output_width for an image, in which output_height and output_width are calculated by below equation:</p> <div class="math"> \[output\_size = 1 + (2 * padding + img\_size - block\_size + stride - 1) / stride\]</div> <p>And the dimension of each time step is block_y * block_x * input.channels.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – The input should be a tensor in NCHW format.</li> <li><strong>filter_size</strong> (<em>int|tuple|None</em>) – The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise, the filter will be a square.</li> <li><strong>stride</strong> (<em>int|tuple</em>) – The stride size. If stride is a tuple, it must contain two integers, (stride_H, stride_W). Otherwise, the stride_H = stride_W = stride. Default: stride = 1.</li> <li><strong>padding</strong> (<em>int|tuple</em>) – The padding size. If padding is a tuple, it can contain two integers like (padding_H, padding_W) which means padding_up = padding_down = padding_H and padding_left = padding_right = padding_W. Or it can use (padding_up, padding_left, padding_down, padding_right) to indicate paddings of four direction. Otherwise, a scalar padding means padding_up = padding_down = padding_left = padding_right = padding Default: padding = 0.</li> <li><strong>name</strong> (<em>int</em>) – The name of this layer. It is optional.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The output is a LoDTensor with shape {input.batch_size * output_height * output_width, filter_size_H * filter_size_W * input.channels}. If we regard output as a matrix, each row of this matrix is a step of a sequence.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">output</p> </td> </tr> </tbody> </table> <p>Examples:</p> <p>As an example:</p> <blockquote> <div><div class="highlight-text"><div class="highlight"><pre><span></span>Given: x = [[[[ 6. 2. 1.] [ 8. 3. 5.] [ 0. 2. 6.]] [[ 2. 4. 4.] [ 6. 3. 0.] [ 6. 4. 7.]]] [[[ 6. 7. 1.] [ 5. 7. 9.] [ 2. 4. 8.]] [[ 1. 2. 1.] [ 1. 3. 5.] [ 9. 0. 8.]]]] x.dims = {2, 2, 3, 3} And: filter = [2, 2] stride = [1, 1] padding = [0, 0] Then: output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.] [ 2. 1. 3. 5. 4. 4. 3. 0.] [ 8. 3. 0. 2. 6. 3. 6. 4.] [ 3. 5. 2. 6. 3. 0. 4. 7.] [ 6. 7. 5. 7. 1. 2. 1. 3.] [ 7. 1. 7. 9. 2. 1. 3. 5.] [ 5. 7. 2. 4. 1. 3. 9. 0.] [ 7. 9. 4. 8. 3. 5. 0. 8.]] output.dims = {8, 9} output.lod = [[0, 4, 8]] </pre></div> </div> <p>The simple usage is:</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">output</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">im2sequence</span><span class="p">(</span> <span class="nb">input</span><span class="o">=</span><span class="n">layer</span><span class="p">,</span> <span class="n">stride</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">filter_size</span><span class="o">=</span><span class="p">[</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span> </pre></div> </div> </div></blockquote> </dd></dl> </div> <div class="section" id="nce"> <h3>nce<a class="headerlink" href="#nce" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">nce</code><span class="sig-paren">(</span><em>input</em>, <em>label</em>, <em>num_total_classes</em>, <em>sample_weight=None</em>, <em>param_attr=None</em>, <em>bias_attr=None</em>, <em>num_neg_samples=None</em><span class="sig-paren">)</span></dt> <dd><p>Compute and return the noise-contrastive estimation training loss. See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](<a class="reference external" href="http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf">http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf</a>). By default this operator uses a uniform distribution for sampling.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> – (Tensor) A tensor of shape [batch_size, dim]. Duplicable: False Optional: False</li> <li><strong>label</strong> – (Tensor) A tensor of shape [batch_size, num_true_class]. ‘num_true_class’ is the number of target classes in each sample.The number of target classes per sample should be same. If you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class.) Duplicable: False Optional: False</li> <li><strong>weight</strong> – (Tensor) A tensor of shape [num_class, dim]. ‘num_class’ is the total number of class. Duplicable: False Optional: False</li> <li><strong>bias</strong> – (Tensor) A tensor of shape [num_class, 1]. ‘num_class’ is the total number of class. It is a dispensable input. Duplicable: False Optional: True</li> <li><strong>sample_weight</strong> – (Tensor) A tensor of shape [batch_size, 1] storing a weight for each sample. And it is a dispensable input. The default value of sample is 1. Duplicable: False Optional: True</li> <li><strong>num_total_classes</strong> (<em>INT</em>) – Total number of classes in all samples.</li> <li><strong>num_neg_samples</strong> (<em>INT</em>) – The number of negative classes. The default value is 10.</li> <li><strong>custom_neg_classes</strong> (<em>INTS</em>) – This attribute only be used in unitest. Classes in this list wiil be used as negative classes for every samples. Under normal conditions, user should avoid setting this attribute.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">(Tensor) A tensor of shape [batch_size, 1]. Cost of samples.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="beam-search"> <h3>beam_search<a class="headerlink" href="#beam-search" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">beam_search</code><span class="sig-paren">(</span><em>pre_ids</em>, <em>ids</em>, <em>scores</em>, <em>beam_size</em>, <em>end_id</em>, <em>level=0</em><span class="sig-paren">)</span></dt> <dd><p>This function implements the beam search algorithm.</p> </dd></dl> </div> <div class="section" id="row-conv"> <h3>row_conv<a class="headerlink" href="#row-conv" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">row_conv</code><span class="sig-paren">(</span><em>input</em>, <em>future_context_size</em>, <em>param_attr=None</em>, <em>act=None</em><span class="sig-paren">)</span></dt> <dd><p>Row Conv Operator. This layer will apply lookahead convolution to <strong>input</strong>. The input variable should be a 2D LoDTensor with shape [T, D]. Parameters with shape [future_context_size + 1, D] will be created. The math equation of row convolution is as follows:</p> <div class="math"> \[Out_{i} = \sum_{j = i} ^ {i + \tau} X_{j} \odot W_{i - j}\]</div> <p>In the above equation:</p> <ul class="simple"> <li><span class="math">\(Out_{i}\)</span>: The i-th row of output variable with shape [1, D].</li> <li><span class="math">\(\tau\)</span>: Future context size.</li> <li><span class="math">\(X_{j}\)</span>: The j-th row of input variable with shape [1, D].</li> <li><span class="math">\(W_{i-j}\)</span>: The (i-j)-th row of parameters with shape [1, D].</li> </ul> <p>More details about row_conv please refer to the paper (<a class="reference external" href="http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf">http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf</a>) and the design document (<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645">https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645</a>).</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – Input variable, a 2D LoDTensor with shape [T, D].</li> <li><strong>future_context_size</strong> (<em>int</em>) – Future context size. Please note, the shape of convolution kernel is [future_context_size + 1, D].</li> <li><strong>param_attr</strong> (<em>ParamAttr</em>) – Attributes of parameters, including name, initializer etc.</li> <li><strong>act</strong> (<em>str</em>) – Non-linear activation to be applied to output variable.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The output tensor with same shape as input tensor.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">16</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">,</span> <span class="n">lod_level</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span> <span class="n">out</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">row_conv</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">x</span><span class="p">,</span> <span class="n">future_context_size</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="multiplex"> <h3>multiplex<a class="headerlink" href="#multiplex" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">multiplex</code><span class="sig-paren">(</span><em>inputs</em>, <em>index</em><span class="sig-paren">)</span></dt> <dd><p><strong>Multiplex Layer</strong></p> <p>Referring to the given index variable, this layer selects rows from the input variables to construct a multiplex variable. Assuming that there are <span class="math">\(m\)</span> input variables and <span class="math">\(I_i\)</span> represents the i-th input variable and <span class="math">\(i\)</span> is in [0, <span class="math">\(m\)</span>). All input variables are tensors with same shape [<span class="math">\(d_0\)</span>, <span class="math">\(d_1\)</span>, ..., <span class="math">\(d_R\)</span>]. Please note that rank of the input tensor should be at least 2. Each input variable will be treated as a 2-D matrix with shape [<span class="math">\(M\)</span>, <span class="math">\(N\)</span>] where <span class="math">\(M\)</span> for <span class="math">\(d_0\)</span> and <span class="math">\(N\)</span> for <span class="math">\(d_1\)</span> * <span class="math">\(d_2\)</span> * ... * <span class="math">\(d_R\)</span>. Let <span class="math">\(I_i[j]\)</span> be the j-th row of the i-th input variable. The given index variable should be a 2-D tensor with shape [<span class="math">\(M\)</span>, 1]. Let <cite>ID[i]</cite> be the i-th index value of the index variable. Then the output variable will be a tensor with shape [<span class="math">\(d_0\)</span>, <span class="math">\(d_1\)</span>, ..., <span class="math">\(d_R\)</span>]. If we treat the output tensor as a 2-D matrix with shape [<span class="math">\(M\)</span>, <span class="math">\(N\)</span>] and let <span class="math">\(O[i]\)</span> be the i-th row of the matrix, then <cite>O[i]</cite> is equal to <span class="math">\(I_{ID[i]}[i]\)</span>.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>inputs</strong> (<em>list</em>) – A list of variables to gather from. All variables have the same shape and the rank is at least 2.</li> <li><strong>index</strong> (<em>Variable</em>) – Tensor<int32>, index variable which is a 2-D tensor with shape [M, 1] where M is the batch size.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">Multiplex variable gathered from input variables.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">x1</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x1'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">4</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">x2</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'x2'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">4</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float32'</span><span class="p">)</span> <span class="n">index</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">'index'</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'int32'</span><span class="p">)</span> <span class="n">out</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">multiplex</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">],</span> <span class="n">index</span><span class="o">=</span><span class="n">index</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> </div> <div class="section" id="ops"> <h2>ops<a class="headerlink" href="#ops" title="永久链接至标题">¶</a></h2> <div class="section" id="mean"> <h3>mean<a class="headerlink" href="#mean" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">mean</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Mean Operator.</p> <p>Out is a scalar which is the mean of all elements in X.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – The input of mean op Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">The output of mean op</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="mul"> <h3>mul<a class="headerlink" href="#mul" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">mul</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Mul Operator.</p> <p>This operator is used to perform matrix multiplication for input $X$ and $Y$.</p> <p>The equation is:</p> <p>$$Out = X * Y$$</p> <p>Both the input $X$ and $Y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $X$.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor), The first input tensor of mul op. Duplicable: False Optional: False</li> <li><strong>y</strong> – (Tensor), The second input tensor of mul op. Duplicable: False Optional: False</li> <li><strong>x_num_col_dims</strong> (<em>INT</em>) – (int, default 1), The mul_op can take tensors with more than two dimensions as its inputs. If the input $X$ is a tensor with more than two dimensions, $X$ will be flattened into a two-dimensional matrix first. The flattening rule is: the first <cite>num_col_dims</cite> will be flattened to form the first dimension of the final matrix (the height of the matrix), and the rest <cite>rank(X) - num_col_dims</cite> dimensions are flattened to form the second dimension of the final matrix (the width of the matrix). As a result, height of the flattened matrix is equal to the product of $X$’s first <cite>x_num_col_dims</cite> dimensions’ sizes, and width of the flattened matrix is equal to the product of $X$’s last <cite>rank(x) - num_col_dims</cite> dimensions’ size. For example, suppose $X$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6], and <cite>x_num_col_dims</cite> = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30].</li> <li><strong>y_num_col_dims</strong> (<em>INT</em>) – (int, default 1), The mul_op can take tensors with more than two, dimensions as its inputs. If the input $Y$ is a tensor with more than two dimensions, $Y$ will be flattened into a two-dimensional matrix first. The attribute <cite>y_num_col_dims</cite> determines how $Y$ is flattened. See comments of <cite>x_num_col_dims</cite> for more details.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">(Tensor), The output tensor of mul op.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="reshape"> <h3>reshape<a class="headerlink" href="#reshape" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">reshape</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Reshape Operator.</p> <p>Reshape Input(X) into the shape specified by Attr(shape).</p> <p>An example: Given a 2-D tensor X with 2 rows and 2 columns : [[1, 2], [3, 4]]</p> <p>and target shape = [1, 4], the reshape operator will transform the tensor X into a 2-D tensor: [[1, 2, 3, 4]]</p> <p>One dimension in the target shape can be set -1, representing that its size is unknown. In this case, the real dimension will be infered from the original shape of Input(X) and other dimensions in the target shape.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – The input tensor of reshape operator. Duplicable: False Optional: False</li> <li><strong>shape</strong> (<em>INTS</em>) – (vector<int>) Target shape of reshape operator.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">The output tensor of reshape operator.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="scale"> <h3>scale<a class="headerlink" href="#scale" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">scale</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Scale operator</p> <p>$$Out = scale*X$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor) Input tensor of scale operator. Duplicable: False Optional: False</li> <li><strong>scale</strong> (<em>FLOAT</em>) – (float, default 1.0)The scaling factor of the scale operator.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">(Tensor) Output tensor of scale operator.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="sigmoid-cross-entropy-with-logits"> <h3>sigmoid_cross_entropy_with_logits<a class="headerlink" href="#sigmoid-cross-entropy-with-logits" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sigmoid_cross_entropy_with_logits</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>SigmoidCrossEntropyWithLogits Operator.</p> <p>This measures the element-wise probability error in classification tasks in which each class is independent. This can be thought of as predicting labels for a data-point, where labels are not mutually exclusive. For example, a news article can be about politics, technology or sports at the same time or none of these.</p> <p>The logistic loss is given as follows:</p> <blockquote> <div>$$loss = -Labels * log(sigma(X)) - (1 - Labels) * log(1 - sigma(X))$$</div></blockquote> <p>We know that $$sigma(X) = (1 / (1 + exp(-X)))$$. By substituting this we get:</p> <blockquote> <div>$$loss = X - X * Labels + log(1 + exp(-X))$$</div></blockquote> <p>For stability and to prevent overflow of $$exp(-X)$$ when X < 0, we reformulate the loss as follows:</p> <blockquote> <div>$$loss = max(X, 0) - X * Labels + log(1 + exp(-<a href="#id3"><span class="problematic" id="id4">|X|</span></a>))$$</div></blockquote> <p>Both the input <cite>X</cite> and <cite>Labels</cite> can carry the LoD (Level of Details) information. However the output only shares the LoD with input <cite>X</cite>.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor, default Tensor<float>), a 2-D tensor with shape N x D, where N is the batch size and D is the number of classes. This input is a tensor of logits computed by the previous operator. Logits are unscaled log probabilities given as log(p/(1-p)). Duplicable: False Optional: False</li> <li><strong>label</strong> – (Tensor, default Tensor<float>), a 2-D tensor of the same type and shape as X. This input is a tensor of probabalistic labels for each logit Duplicable: False Optional: False</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">(Tensor, default Tensor<float>), a 2-D tensor with shape N x D of elementwise logistic losses.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="elementwise-add"> <h3>elementwise_add<a class="headerlink" href="#elementwise-add" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">elementwise_add</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Limited Elementwise Add Operator.</p> <p>The equation is:</p> <p>$$Out = X + Y$$</p> <p>$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be smaller than or equal to the dimensions of $X$.</p> <p>There are two cases for this operator: 1. The shape of $Y$ is same with $X$; 2. The shape of $Y$ is a subset of $X$.</p> <p>For case 2: $Y$ will be broadcasted to match the shape of $X$ and axis should be set to index of the start dimension to broadcast $Y$ onto $X$.</p> <dl class="docutils"> <dt>For example</dt> <dd><div class="first last highlight-python"><div class="highlight"><pre><span></span><span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">5</span><span class="p">,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">1</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span> </pre></div> </div> </dd> </dl> <p>Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input $X$.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor), The first input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>y</strong> – (Tensor), The second input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>axis</strong> (<em>INT</em>) – (int, default -1). The start dimension index for broadcasting Y onto X.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">The output of elementwise op.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="elementwise-div"> <h3>elementwise_div<a class="headerlink" href="#elementwise-div" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">elementwise_div</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Limited Elementwise Div Operator.</p> <p>The equation is:</p> <p>$$Out = X / Y$$</p> <p>$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be smaller than or equal to the dimensions of $X$.</p> <p>There are two cases for this operator: 1. The shape of $Y$ is same with $X$; 2. The shape of $Y$ is a subset of $X$.</p> <p>For case 2: $Y$ will be broadcasted to match the shape of $X$ and axis should be set to index of the start dimension to broadcast $Y$ onto $X$.</p> <dl class="docutils"> <dt>For example</dt> <dd><div class="first last highlight-python"><div class="highlight"><pre><span></span><span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">5</span><span class="p">,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">1</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span> </pre></div> </div> </dd> </dl> <p>Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input $X$.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor), The first input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>y</strong> – (Tensor), The second input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>axis</strong> (<em>INT</em>) – (int, default -1). The start dimension index for broadcasting Y onto X.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">The output of elementwise op.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="elementwise-sub"> <h3>elementwise_sub<a class="headerlink" href="#elementwise-sub" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">elementwise_sub</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Limited Elementwise Sub Operator.</p> <p>The equation is:</p> <p>$$Out = X - Y$$</p> <p>$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be smaller than or equal to the dimensions of $X$.</p> <p>There are two cases for this operator: 1. The shape of $Y$ is same with $X$; 2. The shape of $Y$ is a subset of $X$.</p> <p>For case 2: $Y$ will be broadcasted to match the shape of $X$ and axis should be set to index of the start dimension to broadcast $Y$ onto $X$.</p> <dl class="docutils"> <dt>For example</dt> <dd><div class="first last highlight-python"><div class="highlight"><pre><span></span><span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">5</span><span class="p">,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">1</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span> </pre></div> </div> </dd> </dl> <p>Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input $X$.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor), The first input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>y</strong> – (Tensor), The second input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>axis</strong> (<em>INT</em>) – (int, default -1). The start dimension index for broadcasting Y onto X.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">The output of elementwise op.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="elementwise-mul"> <h3>elementwise_mul<a class="headerlink" href="#elementwise-mul" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">elementwise_mul</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Limited Elementwise Mul Operator.</p> <p>The equation is:</p> <p>$$Out = X odotY$$</p> <p>$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be smaller than or equal to the dimensions of $X$.</p> <p>There are two cases for this operator: 1. The shape of $Y$ is same with $X$; 2. The shape of $Y$ is a subset of $X$.</p> <p>For case 2: $Y$ will be broadcasted to match the shape of $X$ and axis should be set to index of the start dimension to broadcast $Y$ onto $X$.</p> <dl class="docutils"> <dt>For example</dt> <dd><div class="first last highlight-python"><div class="highlight"><pre><span></span><span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">5</span><span class="p">,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">1</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span> </pre></div> </div> </dd> </dl> <p>Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input $X$.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor), The first input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>y</strong> – (Tensor), The second input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>axis</strong> (<em>INT</em>) – (int, default -1). The start dimension index for broadcasting Y onto X.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">The output of elementwise op.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="elementwise-max"> <h3>elementwise_max<a class="headerlink" href="#elementwise-max" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">elementwise_max</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Limited Elementwise Max Operator.</p> <p>The equation is:</p> <p>$$Out = max(X, Y)$$</p> <p>$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be smaller than or equal to the dimensions of $X$.</p> <p>There are two cases for this operator: 1. The shape of $Y$ is same with $X$; 2. The shape of $Y$ is a subset of $X$.</p> <p>For case 2: $Y$ will be broadcasted to match the shape of $X$ and axis should be set to index of the start dimension to broadcast $Y$ onto $X$.</p> <dl class="docutils"> <dt>For example</dt> <dd><div class="first last highlight-python"><div class="highlight"><pre><span></span><span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">5</span><span class="p">,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">1</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span> </pre></div> </div> </dd> </dl> <p>Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input $X$.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor), The first input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>y</strong> – (Tensor), The second input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>axis</strong> (<em>INT</em>) – (int, default -1). The start dimension index for broadcasting Y onto X.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">The output of elementwise op.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="elementwise-min"> <h3>elementwise_min<a class="headerlink" href="#elementwise-min" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">elementwise_min</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Limited Elementwise Max Operator.</p> <p>The equation is:</p> <p>$$Out = min(X, Y)$$</p> <p>$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be smaller than or equal to the dimensions of $X$.</p> <p>There are two cases for this operator: 1. The shape of $Y$ is same with $X$; 2. The shape of $Y$ is a subset of $X$.</p> <p>For case 2: $Y$ will be broadcasted to match the shape of $X$ and axis should be set to index of the start dimension to broadcast $Y$ onto $X$.</p> <dl class="docutils"> <dt>For example</dt> <dd><div class="first last highlight-python"><div class="highlight"><pre><span></span><span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">5</span><span class="p">,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">1</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span> </pre></div> </div> </dd> </dl> <p>Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input $X$.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor), The first input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>y</strong> – (Tensor), The second input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>axis</strong> (<em>INT</em>) – (int, default -1). The start dimension index for broadcasting Y onto X.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">The output of elementwise op.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="elementwise-pow"> <h3>elementwise_pow<a class="headerlink" href="#elementwise-pow" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">elementwise_pow</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Limited Elementwise Pow Operator.</p> <p>The equation is:</p> <p>$$Out = X ^ Y$$</p> <p>$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be smaller than or equal to the dimensions of $X$.</p> <p>There are two cases for this operator: 1. The shape of $Y$ is same with $X$; 2. The shape of $Y$ is a subset of $X$.</p> <p>For case 2: $Y$ will be broadcasted to match the shape of $X$ and axis should be set to index of the start dimension to broadcast $Y$ onto $X$.</p> <dl class="docutils"> <dt>For example</dt> <dd><div class="first last highlight-python"><div class="highlight"><pre><span></span><span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">5</span><span class="p">,)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">1</span> <span class="n">shape</span><span class="p">(</span><span class="n">X</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">shape</span><span class="p">(</span><span class="n">Y</span><span class="p">)</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span><span class="p">),</span> <span class="k">with</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span> </pre></div> </div> </dd> </dl> <p>Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input $X$.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor), The first input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>y</strong> – (Tensor), The second input tensor of elementwise op. Duplicable: False Optional: False</li> <li><strong>axis</strong> (<em>INT</em>) – (int, default -1). The start dimension index for broadcasting Y onto X.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">The output of elementwise op.</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="clip"> <h3>clip<a class="headerlink" href="#clip" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">clip</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Clip Operator.</p> <p>The clip operator limits the value of given input within an interval. The interval is specified with arguments ‘min’ and ‘max’:</p> <p>$$ Out = min(max(X, min), max) $$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor)The input of clip op.The number of dimensions must be between [1, 9]. Duplicable: False Optional: False</li> <li><strong>min</strong> (<em>FLOAT</em>) – (float)Minimum value, under which element is replaced by min.</li> <li><strong>max</strong> (<em>FLOAT</em>) – (float)Maximum value, above which element is replaced by max</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">(Tensor)The output of clip op with shape as input(X)</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="clip-by-norm"> <h3>clip_by_norm<a class="headerlink" href="#clip-by-norm" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">clip_by_norm</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>ClipByNorm Operator.</p> <p>This operator limits the L2 norm of the input $X$ within $max_norm$. If the L2 norm of $X$ is less than or equal to $max_norm$, $Out$ will be the same as $X$. If the L2 norm of $X$ is greater than $max_norm$, $X$ will be linearly scaled to make the L2 norm of $Out$ equal to $max_norm$, as shown in the following formula:</p> <p>$$ Out = frac{max_norm * X}{norm(X)}, $$</p> <p>where $norm(X)$ represents the L2 norm of $X$.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – (Tensor) The input of clip_by_norm op.The number of dimensions must be between [1, 9]. Duplicable: False Optional: False</li> <li><strong>max_norm</strong> (<em>FLOAT</em>) – (float) The maximum norm value.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">(Tensor) The output of clip_by_norm op with shape as input(X)</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="sequence-softmax"> <h3>sequence_softmax<a class="headerlink" href="#sequence-softmax" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sequence_softmax</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Sequence Softmax Operator.</p> <p>SequenceSoftmaxOp computes the softmax activation among all time-steps for each sequence. The dimension of each time-step should be 1. Thus, the shape of input Tensor can be either [N, 1] or [N], where N is the sum of the length of all sequences.</p> <p>The algorithm works as follows:</p> <blockquote> <div>for i-th sequence in a mini-batch:</div></blockquote> <p>$$ Out(X[lod[i]:lod[i+1]], :) = frac{exp(X[lod[i]:lod[i+1], :])} {sum(exp(X[lod[i]:lod[i+1], :]))} $$</p> <p>For example, for a mini-batch of 3 sequences with variable-length, each containing 2, 3, 2 time-steps, the lod of which is [0, 2, 5, 7], then softmax will be computed among X[0:2, :], X[2:5, :], X[5:7, :] and N turns out to be 7.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – (LoDTensor) 1-D or 2-D input LoDTensor with the 2-nd dimension of length 1. Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">(LoDTensor) 1-D or 2-D output LoDTensor with the 2-nd dimension of length 1.</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="sigmoid"> <h3>sigmoid<a class="headerlink" href="#sigmoid" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sigmoid</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Sigmoid Activation Operator</p> <p>$$out = frac{1}{1 + e^{-x}}$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Sigmoid operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Sigmoid operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="logsigmoid"> <h3>logsigmoid<a class="headerlink" href="#logsigmoid" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">logsigmoid</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Logsigmoid Activation Operator</p> <p>$$out = log frac{1}{1 + e^{-x}}$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of LogSigmoid operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of LogSigmoid operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="exp"> <h3>exp<a class="headerlink" href="#exp" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">exp</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Exp Activation Operator.</p> <p>$out = e^x$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Exp operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Exp operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="relu"> <h3>relu<a class="headerlink" href="#relu" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">relu</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Relu Activation Operator.</p> <p>$out = max(x, 0)$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Relu operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Relu operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="tanh"> <h3>tanh<a class="headerlink" href="#tanh" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">tanh</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Tanh Activation Operator.</p> <p>$$out = frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Tanh operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Tanh operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="tanh-shrink"> <h3>tanh_shrink<a class="headerlink" href="#tanh-shrink" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">tanh_shrink</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>TanhShrink Activation Operator.</p> <p>$$out = x - frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of TanhShrink operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of TanhShrink operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="softshrink"> <h3>softshrink<a class="headerlink" href="#softshrink" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">softshrink</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Softshrink Activation Operator.</p> <p>$$ out = begin{cases}</p> <blockquote> <div>x - lambda, text{if } x > lambda \ x + lambda, text{if } x < -lambda \ 0, text{otherwise} end{cases}</div></blockquote> <p>$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of Softshrink operator Duplicable: False Optional: False</li> <li><strong>lambda</strong> (<em>FLOAT</em>) – non-negative offset</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of Softshrink operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="sqrt"> <h3>sqrt<a class="headerlink" href="#sqrt" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sqrt</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Sqrt Activation Operator.</p> <p>$out = sqrt{x}$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Sqrt operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Sqrt operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="abs"> <h3>abs<a class="headerlink" href="#abs" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">abs</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Abs Activation Operator.</p> <p>$out = <a href="#id1"><span class="problematic" id="id2">|</span></a>x|$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Abs operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Abs operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="ceil"> <h3>ceil<a class="headerlink" href="#ceil" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">ceil</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Ceil Activation Operator.</p> <p>$out = ceil(x)$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Ceil operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Ceil operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="floor"> <h3>floor<a class="headerlink" href="#floor" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">floor</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Floor Activation Operator.</p> <p>$out = floor(x)$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Floor operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Floor operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="round"> <h3>round<a class="headerlink" href="#round" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">round</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Round Activation Operator.</p> <p>$out = [x]$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Round operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Round operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="reciprocal"> <h3>reciprocal<a class="headerlink" href="#reciprocal" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">reciprocal</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Reciprocal Activation Operator.</p> <p>$$out = frac{1}{x}$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Reciprocal operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Reciprocal operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="log"> <h3>log<a class="headerlink" href="#log" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">log</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Log Activation Operator.</p> <p>$out = ln(x)$</p> <p>Natural logarithm of x.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Log operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Log operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="square"> <h3>square<a class="headerlink" href="#square" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">square</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Square Activation Operator.</p> <p>$out = x^2$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Square operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Square operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="softplus"> <h3>softplus<a class="headerlink" href="#softplus" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">softplus</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Softplus Activation Operator.</p> <p>$out = ln(1 + e^{x})$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Softplus operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Softplus operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="softsign"> <h3>softsign<a class="headerlink" href="#softsign" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">softsign</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Softsign Activation Operator.</p> <p>$$out = frac{x}{1 + <a href="#id5"><span class="problematic" id="id6">|x|</span></a>}$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>x</strong> – Input of Softsign operator Duplicable: False Optional: False</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">Output of Softsign operator</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="brelu"> <h3>brelu<a class="headerlink" href="#brelu" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">brelu</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>BRelu Activation Operator.</p> <p>$out = max(min(x, t_{min}), t_{max})$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of BRelu operator Duplicable: False Optional: False</li> <li><strong>t_min</strong> (<em>FLOAT</em>) – The min marginal value of BRelu</li> <li><strong>t_max</strong> (<em>FLOAT</em>) – The max marginal value of BRelu</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of BRelu operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="leaky-relu"> <h3>leaky_relu<a class="headerlink" href="#leaky-relu" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">leaky_relu</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>LeakyRelu Activation Operator.</p> <p>$out = max(x, alpha * x)$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of LeakyRelu operator Duplicable: False Optional: False</li> <li><strong>alpha</strong> (<em>FLOAT</em>) – The small negative slope</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of LeakyRelu operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="soft-relu"> <h3>soft_relu<a class="headerlink" href="#soft-relu" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">soft_relu</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>SoftRelu Activation Operator.</p> <p>$out = ln(1 + exp(max(min(x, threshold), threshold))$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of SoftRelu operator Duplicable: False Optional: False</li> <li><strong>threshold</strong> (<em>FLOAT</em>) – The threshold value of SoftRelu</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of SoftRelu operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="elu"> <h3>elu<a class="headerlink" href="#elu" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">elu</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>ELU Activation Operator.</p> <p>Applies the following element-wise computation on the input according to <a class="reference external" href="https://arxiv.org/abs/1511.07289">https://arxiv.org/abs/1511.07289</a>.</p> <p>$out = max(0, x) + min(0, alpha * (e^x - 1))$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of ELU operator Duplicable: False Optional: False</li> <li><strong>alpha</strong> (<em>FLOAT</em>) – The alpha value of ELU</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of ELU operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="relu6"> <h3>relu6<a class="headerlink" href="#relu6" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">relu6</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Relu6 Activation Operator.</p> <p>$out = min(max(0, x), 6)$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of Relu6 operator Duplicable: False Optional: False</li> <li><strong>threshold</strong> (<em>FLOAT</em>) – The threshold value of Relu6</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of Relu6 operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="pow"> <h3>pow<a class="headerlink" href="#pow" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">pow</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Pow Activation Operator.</p> <p>$out = x^{factor}$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of Pow operator Duplicable: False Optional: False</li> <li><strong>factor</strong> (<em>FLOAT</em>) – The exponential factor of Pow</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of Pow operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="stanh"> <h3>stanh<a class="headerlink" href="#stanh" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">stanh</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>STanh Activation Operator.</p> <p>$$out = b * frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of STanh operator Duplicable: False Optional: False</li> <li><strong>scale_a</strong> (<em>FLOAT</em>) – The scale parameter of a for the input</li> <li><strong>scale_b</strong> (<em>FLOAT</em>) – The scale parameter of b for the input</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of STanh operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="hard-shrink"> <h3>hard_shrink<a class="headerlink" href="#hard-shrink" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">hard_shrink</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>HardShrink Activation Operator.</p> <p>$$ out = begin{cases}</p> <blockquote> <div>x, text{if } x > lambda \ x, text{if } x < -lambda \ 0, text{otherwise} end{cases}</div></blockquote> <p>$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of HardShrink operator Duplicable: False Optional: False</li> <li><strong>threshold</strong> (<em>FLOAT</em>) – The value of threshold for HardShrink</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of HardShrink operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="thresholded-relu"> <h3>thresholded_relu<a class="headerlink" href="#thresholded-relu" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">thresholded_relu</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>ThresholdedRelu Activation Operator.</p> <p>$$ out = begin{cases}</p> <blockquote> <div>x, text{if } x > threshold \ 0, text{otherwise} end{cases}</div></blockquote> <p>$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of ThresholdedRelu operator Duplicable: False Optional: False</li> <li><strong>threshold</strong> (<em>FLOAT</em>) – The threshold location of activation</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of ThresholdedRelu operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="hard-sigmoid"> <h3>hard_sigmoid<a class="headerlink" href="#hard-sigmoid" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">hard_sigmoid</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>HardSigmoid Activation Operator.</p> <p>Segment-wise linear approximation of sigmoid(<a class="reference external" href="https://arxiv.org/abs/1603.00391">https://arxiv.org/abs/1603.00391</a>), which is much faster than sigmoid.</p> <p>$out = max(0, min(1, slope * x + shift))$</p> <p>The slope should be positive. The offset can be either positive or negative. The default slope and shift are set according to the above reference. It is recommended to use the defaults for this activation.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of HardSigmoid operator Duplicable: False Optional: False</li> <li><strong>slope</strong> (<em>FLOAT</em>) – Slope for linear approximation of sigmoid</li> <li><strong>offset</strong> (<em>FLOAT</em>) – Offset for linear approximation of sigmoid</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of HardSigmoid operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="swish"> <h3>swish<a class="headerlink" href="#swish" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">swish</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span></dt> <dd><p>Swish Activation Operator.</p> <p>$$out = frac{x}{1 + e^{- beta x}}$$</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>x</strong> – Input of Swish operator Duplicable: False Optional: False</li> <li><strong>beta</strong> (<em>FLOAT</em>) – Constant beta of swish operator</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first last">Output of Swish operator</p> </td> </tr> </tbody> </table> </dd></dl> </div> </div> <div class="section" id="tensor"> <h2>tensor<a class="headerlink" href="#tensor" title="永久链接至标题">¶</a></h2> <div class="section" id="create-tensor"> <h3>create_tensor<a class="headerlink" href="#create-tensor" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">create_tensor</code><span class="sig-paren">(</span><em>dtype</em>, <em>name=None</em>, <em>persistable=False</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="create-parameter"> <h3>create_parameter<a class="headerlink" href="#create-parameter" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">create_parameter</code><span class="sig-paren">(</span><em>shape</em>, <em>dtype</em>, <em>name=None</em>, <em>attr=None</em>, <em>is_bias=False</em>, <em>default_initializer=None</em><span class="sig-paren">)</span></dt> <dd><p>Create a parameter :param shape: shape of the parameter :type shape: list[int] :param dtype: element type of the parameter :type dtype: string :param attr: attributes of the parameter :type attr: ParamAttr :param is_bias: This can affect which default initializer is chosen</p> <blockquote> <div>when default_initializer is None. If is_bias, initializer.Constant(0.0) will be used. Otherwise, Xavier() will be used.</div></blockquote> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>default_initializer</strong> (<em>Initializer</em>) – initializer for the parameter</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body">the created parameter</td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body">Parameter</td> </tr> </tbody> </table> </dd></dl> </div> <div class="section" id="create-global-var"> <h3>create_global_var<a class="headerlink" href="#create-global-var" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">create_global_var</code><span class="sig-paren">(</span><em>shape</em>, <em>value</em>, <em>dtype</em>, <em>persistable=False</em>, <em>name=None</em><span class="sig-paren">)</span></dt> <dd></dd></dl> </div> <div class="section" id="cast"> <h3>cast<a class="headerlink" href="#cast" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">cast</code><span class="sig-paren">(</span><em>x</em>, <em>dtype</em><span class="sig-paren">)</span></dt> <dd><p>This function takes in the input with input_dtype and casts it to the output_dtype as the output.</p> </dd></dl> </div> <div class="section" id="concat"> <h3>concat<a class="headerlink" href="#concat" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">concat</code><span class="sig-paren">(</span><em>input</em>, <em>axis=0</em><span class="sig-paren">)</span></dt> <dd><p><strong>Concat</strong></p> <p>This function concatenates the input along the axis mentioned and returns that as the output.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>list</em>) – List of tensors to be concatenated</li> <li><strong>axis</strong> (<em>int</em>) – Integer axis along which the tensors will be concatenated</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">Output variable of the concatenation</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> </dd></dl> </div> <div class="section" id="sums"> <h3>sums<a class="headerlink" href="#sums" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">sums</code><span class="sig-paren">(</span><em>input</em>, <em>out=None</em><span class="sig-paren">)</span></dt> <dd><p>This function performs the sum operation on the input and returns the result as the output.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><strong>input</strong> (<em>Variable|list</em>) – The input tensor that has the elements that need to be summed up.</td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><dl class="docutils"> <dt>The tensor type variable that has the sum of input</dt> <dd>written to it.</dd> </dl> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body">Variable</td> </tr> </tbody> </table> <p class="rubric">Examples</p> </dd></dl> </div> <div class="section" id="assign"> <h3>assign<a class="headerlink" href="#assign" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">assign</code><span class="sig-paren">(</span><em>input</em>, <em>output</em><span class="sig-paren">)</span></dt> <dd><p><strong>Assign</strong></p> <p>This function copies the <em>input</em> Variable to the <em>output</em> Variable.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable|numpy.ndarray</em>) – The source variable</li> <li><strong>output</strong> (<em>Variable</em>) – The destination variable</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The destination variable that was supplied as the <em>output</em>.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> </dd></dl> </div> <div class="section" id="fill-constant-batch-size-like"> <h3>fill_constant_batch_size_like<a class="headerlink" href="#fill-constant-batch-size-like" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">fill_constant_batch_size_like</code><span class="sig-paren">(</span><em>input</em>, <em>shape</em>, <em>dtype</em>, <em>value</em>, <em>input_dim_idx=0</em>, <em>output_dim_idx=0</em><span class="sig-paren">)</span></dt> <dd><p><strong>fill_constant_batch_size_like</strong></p> <p>This function creates a tensor of specified <em>shape</em>, <em>dtype</em> and batch size, and initializes this with a constant supplied in <em>value</em>. The batch size is obtained from the <cite>input</cite> tensor.</p> <p>It also sets <em>stop_gradient</em> to True.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>input</strong> (<em>Variable</em>) – Tensor whose dimensions will be used to get batch size</li> <li><strong>shape</strong> (<em>tuple|list|None</em>) – Shape of output tensor</li> <li><strong>dtype</strong> (<em>np.dtype|core.DataType|str</em>) – Data type of output tensor</li> <li><strong>value</strong> (<em>float</em>) – Constant value to initialize the output tensor</li> <li><strong>input_dim_idx</strong> (<em>int</em>) – Index of input’s batch size dimension</li> <li><strong>output_dim_idx</strong> (<em>int</em>) – Index of output’s batch size dimension</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The tensor variable storing the output</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fill_constant_batch_size_like</span><span class="p">(</span> <span class="nb">input</span><span class="o">=</span><span class="n">like</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">value</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'int64'</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="fill-constant"> <h3>fill_constant<a class="headerlink" href="#fill-constant" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">fill_constant</code><span class="sig-paren">(</span><em>shape</em>, <em>dtype</em>, <em>value</em>, <em>force_cpu=False</em>, <em>out=None</em><span class="sig-paren">)</span></dt> <dd><p><strong>fill_constant</strong></p> <p>This function creates a tensor with specified <cite>shape</cite> and <cite>dtype</cite>, and initializes it with a constant specifed by <cite>value</cite>.</p> <p>The attribute <cite>stop_gradient</cite> of the created tensor is set to True.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>shape</strong> (<em>tuple|list|None</em>) – Shape of the output tensor.</li> <li><strong>dtype</strong> (<em>np.dtype|core.DataType|str</em>) – Data type of the output tensor.</li> <li><strong>value</strong> (<em>float</em>) – The constant value used to initialize the output tensor.</li> <li><strong>out</strong> (<em>Variable</em>) – The output tensor.</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The tensor variable storing the output.</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fill_constant</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">value</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'int64'</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="ones"> <h3>ones<a class="headerlink" href="#ones" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">ones</code><span class="sig-paren">(</span><em>shape</em>, <em>dtype</em>, <em>force_cpu=False</em><span class="sig-paren">)</span></dt> <dd><p><strong>ones</strong></p> <p>This function creates a tensor of specified <em>shape</em> and <em>dtype</em>, and initializes this with 1.</p> <p>It also sets <em>stop_gradient</em> to True.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>shape</strong> (<em>tuple|list|None</em>) – Shape of output tensor</li> <li><strong>dtype</strong> (<em>np.dtype|core.DataType|str</em>) – Data type of output tensor</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The tensor variable storing the output</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">ones</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'int64'</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> <div class="section" id="zeros"> <h3>zeros<a class="headerlink" href="#zeros" title="永久链接至标题">¶</a></h3> <dl class="function"> <dt> <code class="descclassname">paddle.v2.fluid.layers.</code><code class="descname">zeros</code><span class="sig-paren">(</span><em>shape</em>, <em>dtype</em>, <em>force_cpu=False</em><span class="sig-paren">)</span></dt> <dd><p><strong>zeros</strong></p> <p>This function creates a tensor of specified <em>shape</em> and <em>dtype</em>, and initializes this with 0.</p> <p>It also sets <em>stop_gradient</em> to True.</p> <table class="docutils field-list" frame="void" rules="none"> <col class="field-name" /> <col class="field-body" /> <tbody valign="top"> <tr class="field-odd field"><th class="field-name">参数:</th><td class="field-body"><ul class="first simple"> <li><strong>shape</strong> (<em>tuple|list|None</em>) – Shape of output tensor</li> <li><strong>dtype</strong> (<em>np.dtype|core.DataType|str</em>) – Data type of output tensor</li> </ul> </td> </tr> <tr class="field-even field"><th class="field-name">返回:</th><td class="field-body"><p class="first">The tensor variable storing the output</p> </td> </tr> <tr class="field-odd field"><th class="field-name">返回类型:</th><td class="field-body"><p class="first last">Variable</p> </td> </tr> </tbody> </table> <p class="rubric">Examples</p> <div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'int64'</span><span class="p">)</span> </pre></div> </div> </dd></dl> </div> </div> </div> </div> </div> <footer> <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation"> <a href="data_feeder.html" class="btn btn-neutral float-right" title="data_feeder" accesskey="n">Next <span class="fa fa-arrow-circle-right"></span></a> <a href="../fluid.html" class="btn btn-neutral" title="Fluid" accesskey="p"><span class="fa fa-arrow-circle-left"></span> Previous</a> </div> <hr/> <div role="contentinfo"> <p> © Copyright 2016, PaddlePaddle developers. </p> </div> Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. </footer> </div> </div> </section> </div> <script type="text/javascript"> var DOCUMENTATION_OPTIONS = { URL_ROOT:'../../../', VERSION:'', COLLAPSE_INDEX:false, FILE_SUFFIX:'.html', HAS_SOURCE: true, SOURCELINK_SUFFIX: ".txt", }; </script> <script type="text/javascript" src="../../../_static/jquery.js"></script> <script type="text/javascript" src="../../../_static/underscore.js"></script> <script type="text/javascript" src="../../../_static/doctools.js"></script> <script type="text/javascript" src="../../../_static/translations.js"></script> <script type="text/javascript" src="https://cdn.bootcss.com/mathjax/2.7.0/MathJax.js"></script> <script type="text/javascript" src="../../../_static/js/theme.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script> <script src="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/js/perfect-scrollbar.jquery.min.js"></script> <script src="../../../_static/js/paddle_doc_init.js"></script> </body> </html>