提交 952a0bf8 编写于 作者: T Travis CI

Deploy to GitHub Pages: 3dd8c9be

上级 f78acdfe
......@@ -73,6 +73,12 @@ img_pool_layer
:members: img_pool_layer
:noindex:
maxout_layer
------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: maxout_layer
:noindex:
Norm Layer
==========
......
......@@ -1552,12 +1552,12 @@ var _hmt = _hmt || [];
<dt><a href="source/cuda/matrix/matrix.html#c.HL_MATRIX_BASE_CUH_">HL_MATRIX_BASE_CUH_ (C macro)</a>
</dt>
</dl></td>
<td style="width: 33%" valign="top"><dl>
<dt><a href="source/cuda/matrix/matrix.html#_CPPv230hl_matrix_classification_errorP4realPiP4realii">hl_matrix_classification_error (C++ function)</a>
</dt>
</dl></td>
<td style="width: 33%" valign="top"><dl>
<dt><a href="source/cuda/matrix/matrix.html#_CPPv220hl_matrix_column_maxP4realP4realii">hl_matrix_column_max (C++ function)</a>
</dt>
......@@ -1691,6 +1691,14 @@ var _hmt = _hmt || [];
</dt>
<dt><a href="source/cuda/rnn/rnn.html#_CPPv218hl_maxout_backwardP4realPK4realPKi6size_t6size_t6size_t6size_t">hl_maxout_backward (C++ function)</a>
</dt>
<dt><a href="source/cuda/rnn/rnn.html#_CPPv217hl_maxout_forwardPK4realP4realPi6size_t6size_t6size_t6size_t">hl_maxout_forward (C++ function)</a>
</dt>
<dt><a href="source/cuda/rnn/rnn.html#_CPPv219hl_maxpool_backwardKiPK4realPK4realPK4realKiKiKiKiKiKiKiKiKiKiKi4real4realP4real">hl_maxpool_backward (C++ function)</a>
</dt>
......@@ -5203,7 +5211,7 @@ var _hmt = _hmt || [];
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9CpuMatrix6colMaxER6Matrix">paddle::CpuMatrix::colMax (C++ function)</a>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9CpuMatrix6colMaxER6Matrix">paddle::CpuMatrix::colMax (C++ function)</a>, <a href="source/math/matrix/matrix.html#_CPPv2N6paddle9CpuMatrix6colMaxER7IVectorR6Matrix">[1]</a>
</dt>
......@@ -5291,6 +5299,14 @@ var _hmt = _hmt || [];
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9CpuMatrix14maxoutBackwardER6MatrixR7IVector6size_t6size_t">paddle::CpuMatrix::maxoutBackward (C++ function)</a>
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9CpuMatrix13maxoutForwardER6MatrixR7IVector6size_t6size_t">paddle::CpuMatrix::maxoutForward (C++ function)</a>
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9CpuMatrix15maxPoolBackwardER6Matrix6size_t6size_tR6MatrixR6Matrix6size_t6size_t6size_t6size_t6size_t6size_t4real4real6size_t6size_t">paddle::CpuMatrix::maxPoolBackward (C++ function)</a>
</dt>
......@@ -7307,7 +7323,7 @@ var _hmt = _hmt || [];
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9GpuMatrix6colMaxER6Matrix">paddle::GpuMatrix::colMax (C++ function)</a>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9GpuMatrix6colMaxER6Matrix">paddle::GpuMatrix::colMax (C++ function)</a>, <a href="source/math/matrix/matrix.html#_CPPv2N6paddle9GpuMatrix6colMaxER7IVectorR6Matrix">[1]</a>
</dt>
......@@ -7391,6 +7407,14 @@ var _hmt = _hmt || [];
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9GpuMatrix14maxoutBackwardER6MatrixR7IVector6size_t6size_t">paddle::GpuMatrix::maxoutBackward (C++ function)</a>
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9GpuMatrix13maxoutForwardER6MatrixR7IVector6size_t6size_t">paddle::GpuMatrix::maxoutForward (C++ function)</a>
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle9GpuMatrix15maxPoolBackwardER6Matrix6size_t6size_tR6MatrixR6Matrix6size_t6size_t6size_t6size_t6size_t6size_t4real4real6size_t6size_t">paddle::GpuMatrix::maxPoolBackward (C++ function)</a>
</dt>
......@@ -9283,7 +9307,7 @@ var _hmt = _hmt || [];
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle6Matrix6colMaxER6Matrix">paddle::Matrix::colMax (C++ function)</a>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle6Matrix6colMaxER6Matrix">paddle::Matrix::colMax (C++ function)</a>, <a href="source/math/matrix/matrix.html#_CPPv2N6paddle6Matrix6colMaxER7IVectorR6Matrix">[1]</a>
</dt>
......@@ -9423,6 +9447,14 @@ var _hmt = _hmt || [];
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle6Matrix14maxoutBackwardER6MatrixR7IVector6size_t6size_t">paddle::Matrix::maxoutBackward (C++ function)</a>
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle6Matrix13maxoutForwardER6MatrixR7IVector6size_t6size_t">paddle::Matrix::maxoutForward (C++ function)</a>
</dt>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle6Matrix15maxPoolBackwardER6Matrix6size_t6size_tR6MatrixR6Matrix6size_t6size_t6size_t6size_t6size_t6size_t4real4real6size_t6size_t">paddle::Matrix::maxPoolBackward (C++ function)</a>
</dt>
......@@ -9638,6 +9670,8 @@ var _hmt = _hmt || [];
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle6MatrixD0Ev">paddle::Matrix::~Matrix (C++ function)</a>
</dt>
</dl></td>
<td style="width: 33%" valign="top"><dl>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle12MatrixOffsetE">paddle::MatrixOffset (C++ class)</a>
</dt>
......@@ -9650,8 +9684,6 @@ var _hmt = _hmt || [];
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle12MatrixOffset5aRow_E">paddle::MatrixOffset::aRow_ (C++ member)</a>
</dt>
</dl></td>
<td style="width: 33%" valign="top"><dl>
<dt><a href="source/math/matrix/matrix.html#_CPPv2N6paddle12MatrixOffset5bCol_E">paddle::MatrixOffset::bCol_ (C++ member)</a>
</dt>
......
无法预览此类型文件
此差异已折叠。
......@@ -298,7 +298,7 @@ var _hmt = _hmt || [];
<dd><ul class="breatheparameterlist first last">
<li><code class="first docutils literal"><span class="pre">frameCnt</span></code> - <p>batch size of input image. </p>
</li>
<li><code class="first docutils literal"><span class="pre">outGrad</span></code> - <p>input data. </p>
<li><code class="first docutils literal"><span class="pre">outGrad</span></code> - <p>output grad data. </p>
</li>
<li><code class="first docutils literal"><span class="pre">channels</span></code> - <p>number of channel. </p>
</li>
......@@ -404,6 +404,60 @@ var _hmt = _hmt || [];
</p>
</dd></dl>
<dl class="function">
<dt id="_CPPv217hl_maxout_forwardPK4realP4realPi6size_t6size_t6size_t6size_t">
<span id="hl_maxout_forward__realCP.realP.iP.s.s.s.s"></span><span class="target" id="paddlehl__cnn_8h_1a194fd1a9bed05cb00bf7bf59b649b053"></span>void <code class="descname">hl_maxout_forward</code><span class="sig-paren">(</span><em class="property">const</em> real *<em>inData</em>, real *<em>outData</em>, int *<em>idData</em>, size_t <em>batchSize</em>, size_t <em>size</em>, size_t <em>featLen</em>, size_t <em>groups</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv217hl_maxout_forwardPK4realP4realPi6size_t6size_t6size_t6size_t" title="Permalink to this definition"></a></dt>
<dd><p>MaxOut forward. </p>
<p><dl class="docutils">
<dt><strong>Parameters</strong></dt>
<dd><ul class="breatheparameterlist first last">
<li><code class="first docutils literal"><span class="pre">inData</span></code> - <p>input data. </p>
</li>
<li><code class="first docutils literal"><span class="pre">outData</span></code> - <p>output data. </p>
</li>
<li><code class="first docutils literal"><span class="pre">idData</span></code> - <p>output maxId. </p>
</li>
<li><code class="first docutils literal"><span class="pre">batchSize</span></code> - <p>batchSize. </p>
</li>
<li><code class="first docutils literal"><span class="pre">size</span></code> - <p>number of channels * image height * image width. </p>
</li>
<li><code class="first docutils literal"><span class="pre">featLen</span></code> - <p>feature length = image height * image width. </p>
</li>
<li><code class="first docutils literal"><span class="pre">groups</span></code> - <p>number of groups. </p>
</li>
</ul>
</dd>
</dl>
</p>
</dd></dl>
<dl class="function">
<dt id="_CPPv218hl_maxout_backwardP4realPK4realPKi6size_t6size_t6size_t6size_t">
<span id="hl_maxout_backward__realP.realCP.iCP.s.s.s.s"></span><span class="target" id="paddlehl__cnn_8h_1a258d5fbc3c34047d5dc6ac068e81730e"></span>void <code class="descname">hl_maxout_backward</code><span class="sig-paren">(</span>real *<em>inGrad</em>, <em class="property">const</em> real *<em>outGrad</em>, <em class="property">const</em> int *<em>idData</em>, size_t <em>batchSize</em>, size_t <em>size</em>, size_t <em>featLen</em>, size_t <em>groups</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv218hl_maxout_backwardP4realPK4realPKi6size_t6size_t6size_t6size_t" title="Permalink to this definition"></a></dt>
<dd><p>MaxOut backward. </p>
<p><dl class="docutils">
<dt><strong>Parameters</strong></dt>
<dd><ul class="breatheparameterlist first last">
<li><code class="first docutils literal"><span class="pre">inGrad</span></code> - <p>input grad data. </p>
</li>
<li><code class="first docutils literal"><span class="pre">outGrad</span></code> - <p>output grad data. </p>
</li>
<li><code class="first docutils literal"><span class="pre">idData</span></code> - <p>output maxId. </p>
</li>
<li><code class="first docutils literal"><span class="pre">batchSize</span></code> - <p>batchSize. </p>
</li>
<li><code class="first docutils literal"><span class="pre">size</span></code> - <p>number of channels * image height * image width. </p>
</li>
<li><code class="first docutils literal"><span class="pre">featLen</span></code> - <p>feature length = image height * image width. </p>
</li>
<li><code class="first docutils literal"><span class="pre">groups</span></code> - <p>number of groups. </p>
</li>
</ul>
</dd>
</dl>
</p>
</dd></dl>
</div>
<div class="breathe-sectiondef container">
<p class="breathe-sectiondef-title rubric">Defines</p>
......
......@@ -78,7 +78,7 @@ var _hmt = _hmt || [];
<dt id="_CPPv2N6paddle5LayerE">
<span id="paddle::Layer"></span><span class="target" id="paddleclasspaddle_1_1Layer"></span><em class="property">class </em><code class="descclassname">paddle::</code><code class="descname">Layer</code><a class="headerlink" href="#_CPPv2N6paddle5LayerE" title="Permalink to this definition">¶</a></dt>
<dd><p>Base class for layer. Define necessary variables and functions for every layer. </p>
<p>Subclassed by <a class="reference internal" href="#paddleclasspaddle_1_1AddtoLayer"><span class="std std-ref">paddle::AddtoLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1AgentLayer"><span class="std std-ref">paddle::AgentLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1BatchNormBaseLayer"><span class="std std-ref">paddle::BatchNormBaseLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1BlockExpandLayer"><span class="std std-ref">paddle::BlockExpandLayer</span></a>, paddle::BootBiasLayer, <a class="reference internal" href="#paddleclasspaddle_1_1ConcatenateLayer"><span class="std std-ref">paddle::ConcatenateLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ConcatenateLayer2"><span class="std std-ref">paddle::ConcatenateLayer2</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ConvBaseLayer"><span class="std std-ref">paddle::ConvBaseLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ConvexCombinationLayer"><span class="std std-ref">paddle::ConvexCombinationLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ConvShiftLayer"><span class="std std-ref">paddle::ConvShiftLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CosSimLayer"><span class="std std-ref">paddle::CosSimLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CosSimVecMatLayer"><span class="std std-ref">paddle::CosSimVecMatLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CostLayer"><span class="std std-ref">paddle::CostLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CRFLayer"><span class="std std-ref">paddle::CRFLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CTCLayer"><span class="std std-ref">paddle::CTCLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1DataLayer"><span class="std std-ref">paddle::DataLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1DataNormLayer"><span class="std std-ref">paddle::DataNormLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1EosIdCheckLayer"><span class="std std-ref">paddle::EosIdCheckLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ExpandLayer"><span class="std std-ref">paddle::ExpandLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1FeatureMapExpandLayer"><span class="std std-ref">paddle::FeatureMapExpandLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1FullyConnectedLayer"><span class="std std-ref">paddle::FullyConnectedLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1GatedRecurrentLayer"><span class="std std-ref">paddle::GatedRecurrentLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1GatherAgentLayer"><span class="std std-ref">paddle::GatherAgentLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1GetOutputLayer"><span class="std std-ref">paddle::GetOutputLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1GruStepLayer"><span class="std std-ref">paddle::GruStepLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1HierarchicalSigmoidLayer"><span class="std std-ref">paddle::HierarchicalSigmoidLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1InterpolationLayer"><span class="std std-ref">paddle::InterpolationLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1LambdaCost"><span class="std std-ref">paddle::LambdaCost</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1LstmLayer"><span class="std std-ref">paddle::LstmLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1LstmStepLayer"><span class="std std-ref">paddle::LstmStepLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1MaxIdLayer"><span class="std std-ref">paddle::MaxIdLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1MixedLayer"><span class="std std-ref">paddle::MixedLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1MultiplexLayer"><span class="std std-ref">paddle::MultiplexLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1NCELayer"><span class="std std-ref">paddle::NCELayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1NormLayer"><span class="std std-ref">paddle::NormLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1OuterProdLayer"><span class="std std-ref">paddle::OuterProdLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ParameterReluLayer"><span class="std std-ref">paddle::ParameterReluLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1PoolLayer"><span class="std std-ref">paddle::PoolLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1PowerLayer"><span class="std std-ref">paddle::PowerLayer</span></a>, paddle::PrintLayer, <a class="reference internal" href="#paddleclasspaddle_1_1RankingCost"><span class="std std-ref">paddle::RankingCost</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1RecurrentLayer"><span class="std std-ref">paddle::RecurrentLayer</span></a>, paddle::RecurrentLayerGroup, <a class="reference internal" href="#paddleclasspaddle_1_1ResizeLayer"><span class="std std-ref">paddle::ResizeLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SamplingIdLayer"><span class="std std-ref">paddle::SamplingIdLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ScalingLayer"><span class="std std-ref">paddle::ScalingLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ScatterAgentLayer"><span class="std std-ref">paddle::ScatterAgentLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SelectiveFullyConnectedLayer"><span class="std std-ref">paddle::SelectiveFullyConnectedLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SequenceConcatLayer"><span class="std std-ref">paddle::SequenceConcatLayer</span></a>, paddle::SequencePoolLayer, <a class="reference internal" href="#paddleclasspaddle_1_1SequenceReshapeLayer"><span class="std std-ref">paddle::SequenceReshapeLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SlopeInterceptLayer"><span class="std std-ref">paddle::SlopeInterceptLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SubSequenceLayer"><span class="std std-ref">paddle::SubSequenceLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SumToOneNormLayer"><span class="std std-ref">paddle::SumToOneNormLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1TensorLayer"><span class="std std-ref">paddle::TensorLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1TransLayer"><span class="std std-ref">paddle::TransLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ValidationLayer"><span class="std std-ref">paddle::ValidationLayer</span></a></p>
<p>Subclassed by <a class="reference internal" href="#paddleclasspaddle_1_1AddtoLayer"><span class="std std-ref">paddle::AddtoLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1AgentLayer"><span class="std std-ref">paddle::AgentLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1BatchNormBaseLayer"><span class="std std-ref">paddle::BatchNormBaseLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1BlockExpandLayer"><span class="std std-ref">paddle::BlockExpandLayer</span></a>, paddle::BootBiasLayer, <a class="reference internal" href="#paddleclasspaddle_1_1ConcatenateLayer"><span class="std std-ref">paddle::ConcatenateLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ConcatenateLayer2"><span class="std std-ref">paddle::ConcatenateLayer2</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ConvBaseLayer"><span class="std std-ref">paddle::ConvBaseLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ConvexCombinationLayer"><span class="std std-ref">paddle::ConvexCombinationLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ConvShiftLayer"><span class="std std-ref">paddle::ConvShiftLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CosSimLayer"><span class="std std-ref">paddle::CosSimLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CosSimVecMatLayer"><span class="std std-ref">paddle::CosSimVecMatLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CostLayer"><span class="std std-ref">paddle::CostLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CRFLayer"><span class="std std-ref">paddle::CRFLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1CTCLayer"><span class="std std-ref">paddle::CTCLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1DataLayer"><span class="std std-ref">paddle::DataLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1DataNormLayer"><span class="std std-ref">paddle::DataNormLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1EosIdCheckLayer"><span class="std std-ref">paddle::EosIdCheckLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ExpandLayer"><span class="std std-ref">paddle::ExpandLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1FeatureMapExpandLayer"><span class="std std-ref">paddle::FeatureMapExpandLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1FullyConnectedLayer"><span class="std std-ref">paddle::FullyConnectedLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1GatedRecurrentLayer"><span class="std std-ref">paddle::GatedRecurrentLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1GatherAgentLayer"><span class="std std-ref">paddle::GatherAgentLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1GetOutputLayer"><span class="std std-ref">paddle::GetOutputLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1GruStepLayer"><span class="std std-ref">paddle::GruStepLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1HierarchicalSigmoidLayer"><span class="std std-ref">paddle::HierarchicalSigmoidLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1InterpolationLayer"><span class="std std-ref">paddle::InterpolationLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1LambdaCost"><span class="std std-ref">paddle::LambdaCost</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1LstmLayer"><span class="std std-ref">paddle::LstmLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1LstmStepLayer"><span class="std std-ref">paddle::LstmStepLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1MaxIdLayer"><span class="std std-ref">paddle::MaxIdLayer</span></a>, paddle::MaxOutLayer, <a class="reference internal" href="#paddleclasspaddle_1_1MixedLayer"><span class="std std-ref">paddle::MixedLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1MultiplexLayer"><span class="std std-ref">paddle::MultiplexLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1NCELayer"><span class="std std-ref">paddle::NCELayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1NormLayer"><span class="std std-ref">paddle::NormLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1OuterProdLayer"><span class="std std-ref">paddle::OuterProdLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ParameterReluLayer"><span class="std std-ref">paddle::ParameterReluLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1PoolLayer"><span class="std std-ref">paddle::PoolLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1PowerLayer"><span class="std std-ref">paddle::PowerLayer</span></a>, paddle::PrintLayer, <a class="reference internal" href="#paddleclasspaddle_1_1RankingCost"><span class="std std-ref">paddle::RankingCost</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1RecurrentLayer"><span class="std std-ref">paddle::RecurrentLayer</span></a>, paddle::RecurrentLayerGroup, <a class="reference internal" href="#paddleclasspaddle_1_1ResizeLayer"><span class="std std-ref">paddle::ResizeLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SamplingIdLayer"><span class="std std-ref">paddle::SamplingIdLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ScalingLayer"><span class="std std-ref">paddle::ScalingLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ScatterAgentLayer"><span class="std std-ref">paddle::ScatterAgentLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SelectiveFullyConnectedLayer"><span class="std std-ref">paddle::SelectiveFullyConnectedLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SequenceConcatLayer"><span class="std std-ref">paddle::SequenceConcatLayer</span></a>, paddle::SequencePoolLayer, <a class="reference internal" href="#paddleclasspaddle_1_1SequenceReshapeLayer"><span class="std std-ref">paddle::SequenceReshapeLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SlopeInterceptLayer"><span class="std std-ref">paddle::SlopeInterceptLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SubSequenceLayer"><span class="std std-ref">paddle::SubSequenceLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1SumToOneNormLayer"><span class="std std-ref">paddle::SumToOneNormLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1TensorLayer"><span class="std std-ref">paddle::TensorLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1TransLayer"><span class="std std-ref">paddle::TransLayer</span></a>, <a class="reference internal" href="#paddleclasspaddle_1_1ValidationLayer"><span class="std std-ref">paddle::ValidationLayer</span></a></p>
<div class="breathe-sectiondef container">
<p class="breathe-sectiondef-title rubric">Public Functions</p>
<dl class="function">
......
......@@ -1966,6 +1966,24 @@ where bit(i, j) = ((codes(i) + numClasses) &amp; 2^j) ? 1 : 0
<dl class="function">
<dt id="_CPPv2N6paddle6Matrix6colMaxER6Matrix">
<span id="paddle::Matrix::colMax__MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1Matrix_1ac89c66983c6376c64d468a2e47890a8d"></span>void <code class="descname">colMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>max</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle6Matrix6colMaxER6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>set the max of each column of this to mat </p>
</dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle6Matrix6colMaxER7IVectorR6Matrix">
<span id="paddle::Matrix::colMax__IVectorR.MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1Matrix_1a946a1df809f7067a468d7cd835dddc90"></span>void <code class="descname">colMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>maxIds</em>, <a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>maxVal</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle6Matrix6colMaxER7IVectorR6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>Get the top k elements of each column of this matrix. </p>
<p>The row ids and values of these elements are stored in maxIds and max respectively. where k is the size of maxIds. And note that the top k elements are not sorted. </p>
</dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle6Matrix13maxoutForwardER6MatrixR7IVector6size_t6size_t">
<span id="paddle::Matrix::maxoutForward__MatrixR.IVectorR.s.s"></span>virtual <span class="target" id="paddleclasspaddle_1_1Matrix_1a6ba3527c4fbea9e70b3e79cc4279d379"></span>void <code class="descname">maxoutForward</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>a</em>, <a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>id</em>, size_t <em>channels</em>, size_t <em>groups</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle6Matrix13maxoutForwardER6MatrixR7IVector6size_t6size_t" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle6Matrix14maxoutBackwardER6MatrixR7IVector6size_t6size_t">
<span id="paddle::Matrix::maxoutBackward__MatrixR.IVectorR.s.s"></span>virtual <span class="target" id="paddleclasspaddle_1_1Matrix_1a7f56ce9593dba387c49c5a182b2ed750"></span>void <code class="descname">maxoutBackward</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>a</em>, <a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>id</em>, size_t <em>channels</em>, size_t <em>groups</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle6Matrix14maxoutBackwardER6MatrixR7IVector6size_t6size_t" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="function">
......@@ -1977,7 +1995,7 @@ where bit(i, j) = ((codes(i) + numClasses) &amp; 2^j) ? 1 : 0
<dt id="_CPPv2N6paddle6Matrix6rowMaxER7IVectorR6Matrix">
<span id="paddle::Matrix::rowMax__IVectorR.MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1Matrix_1a6c3aca70393342be128eab1f2e6dc379"></span>void <code class="descname">rowMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>maxIds</em>, <a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>max</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle6Matrix6rowMaxER7IVectorR6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>Get the top k elements of each row of this matrix. </p>
<p>The column ids and values of these elements are stored in maxIds and max respectively. Note that the top k elements are not sorted. </p>
<p>The column ids and values of these elements are stored in maxIds and max respectively. where k is the size of maxIds. And note that the top k elements are not sorted. </p>
</dd></dl>
<dl class="function">
......@@ -2748,12 +2766,30 @@ where bit(i, j) = ((codes(i) + numClasses) &amp; 2^j) ? 1 : 0
<dt id="_CPPv2N6paddle9GpuMatrix6rowMaxER7IVectorR6Matrix">
<span id="paddle::GpuMatrix::rowMax__IVectorR.MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1GpuMatrix_1a1bf5a4d0d31aa3cd92e495c4b4fc3e29"></span>void <code class="descname">rowMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>maxIds</em>, <a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>max</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9GpuMatrix6rowMaxER7IVectorR6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>Get the top k elements of each row of this matrix. </p>
<p>The column ids and values of these elements are stored in maxIds and max respectively. Note that the top k elements are not sorted. </p>
<p>The column ids and values of these elements are stored in maxIds and max respectively. where k is the size of maxIds. And note that the top k elements are not sorted. </p>
</dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle9GpuMatrix6colMaxER6Matrix">
<span id="paddle::GpuMatrix::colMax__MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1GpuMatrix_1a34e4d0e75833ffa50d8aee72bbe592da"></span>void <code class="descname">colMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>max</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9GpuMatrix6colMaxER6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>set the max of each column of this to mat </p>
</dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle9GpuMatrix6colMaxER7IVectorR6Matrix">
<span id="paddle::GpuMatrix::colMax__IVectorR.MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1GpuMatrix_1a50899afbb71c85e2a55487131fe7672d"></span>void <code class="descname">colMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>maxIds</em>, <a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>maxVal</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9GpuMatrix6colMaxER7IVectorR6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>Get the top k elements of each column of this matrix. </p>
<p>The row ids and values of these elements are stored in maxIds and max respectively. where k is the size of maxIds. And note that the top k elements are not sorted. </p>
</dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle9GpuMatrix13maxoutForwardER6MatrixR7IVector6size_t6size_t">
<span id="paddle::GpuMatrix::maxoutForward__MatrixR.IVectorR.s.s"></span>virtual <span class="target" id="paddleclasspaddle_1_1GpuMatrix_1a7442fb9f327c87cdbcda52b4a00e6d72"></span>void <code class="descname">maxoutForward</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>a</em>, <a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>id</em>, size_t <em>channels</em>, size_t <em>groups</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9GpuMatrix13maxoutForwardER6MatrixR7IVector6size_t6size_t" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle9GpuMatrix14maxoutBackwardER6MatrixR7IVector6size_t6size_t">
<span id="paddle::GpuMatrix::maxoutBackward__MatrixR.IVectorR.s.s"></span>virtual <span class="target" id="paddleclasspaddle_1_1GpuMatrix_1a4a921cc1adb4953e2a406c8ca4f59410"></span>void <code class="descname">maxoutBackward</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>a</em>, <a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>id</em>, size_t <em>channels</em>, size_t <em>groups</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9GpuMatrix14maxoutBackwardER6MatrixR7IVector6size_t6size_t" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="function">
......@@ -3372,12 +3408,30 @@ where bit(i, j) = ((codes(i) + numClasses) &amp; 2^j) ? 1 : 0
<dt id="_CPPv2N6paddle9CpuMatrix6rowMaxER7IVectorR6Matrix">
<span id="paddle::CpuMatrix::rowMax__IVectorR.MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1CpuMatrix_1a8d63feac52987dddd6af7741afbf846b"></span>void <code class="descname">rowMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>maxIds</em>, <a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>max</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9CpuMatrix6rowMaxER7IVectorR6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>Get the top k elements of each row of this matrix. </p>
<p>The column ids and values of these elements are stored in maxIds and max respectively. Note that the top k elements are not sorted. </p>
<p>The column ids and values of these elements are stored in maxIds and max respectively. where k is the size of maxIds. And note that the top k elements are not sorted. </p>
</dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle9CpuMatrix6colMaxER6Matrix">
<span id="paddle::CpuMatrix::colMax__MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1CpuMatrix_1aa90557207b0204490d88ef854ddd1d1a"></span>void <code class="descname">colMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>max</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9CpuMatrix6colMaxER6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>set the max of each column of this to mat </p>
</dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle9CpuMatrix6colMaxER7IVectorR6Matrix">
<span id="paddle::CpuMatrix::colMax__IVectorR.MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1CpuMatrix_1a126ca6b5874e7cc2b66fe2ef4446b64f"></span>void <code class="descname">colMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>maxIds</em>, <a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>maxVal</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9CpuMatrix6colMaxER7IVectorR6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>Get the top k elements of each column of this matrix. </p>
<p>The row ids and values of these elements are stored in maxIds and max respectively. where k is the size of maxIds. And note that the top k elements are not sorted. </p>
</dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle9CpuMatrix13maxoutForwardER6MatrixR7IVector6size_t6size_t">
<span id="paddle::CpuMatrix::maxoutForward__MatrixR.IVectorR.s.s"></span>virtual <span class="target" id="paddleclasspaddle_1_1CpuMatrix_1ad220fe3dac59b84dadb6869f8c2e8b69"></span>void <code class="descname">maxoutForward</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>a</em>, <a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>id</em>, size_t <em>channels</em>, size_t <em>groups</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9CpuMatrix13maxoutForwardER6MatrixR7IVector6size_t6size_t" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="function">
<dt id="_CPPv2N6paddle9CpuMatrix14maxoutBackwardER6MatrixR7IVector6size_t6size_t">
<span id="paddle::CpuMatrix::maxoutBackward__MatrixR.IVectorR.s.s"></span>virtual <span class="target" id="paddleclasspaddle_1_1CpuMatrix_1a4f3d8539c81c249d82b8736b056b2dc5"></span>void <code class="descname">maxoutBackward</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>a</em>, <a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>id</em>, size_t <em>channels</em>, size_t <em>groups</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle9CpuMatrix14maxoutBackwardER6MatrixR7IVector6size_t6size_t" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="function">
......@@ -5954,7 +6008,7 @@ virtual <span class="target" id="paddleclasspaddle_1_1GpuSparseMatrix_1a1ea7be6a
<dt id="_CPPv2N6paddle15CpuSparseMatrix6rowMaxER7IVectorR6Matrix">
<span id="paddle::CpuSparseMatrix::rowMax__IVectorR.MatrixR"></span>virtual <span class="target" id="paddleclasspaddle_1_1CpuSparseMatrix_1a7724e18286ae958b8c5709f075cf6dc0"></span>void <code class="descname">rowMax</code><span class="sig-paren">(</span><a class="reference internal" href="#_CPPv2N6paddle7IVectorE" title="paddle::IVector">IVector</a> &amp;<em>maxIds</em>, <a class="reference internal" href="#_CPPv2N6paddle6MatrixE" title="paddle::Matrix">Matrix</a> &amp;<em>max</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N6paddle15CpuSparseMatrix6rowMaxER7IVectorR6Matrix" title="Permalink to this definition">¶</a></dt>
<dd><p>Get the top k elements of each row of this matrix. </p>
<p>The column ids and values of these elements are stored in maxIds and max respectively. Note that the top k elements are not sorted. </p>
<p>The column ids and values of these elements are stored in maxIds and max respectively. where k is the size of maxIds. And note that the top k elements are not sorted. </p>
</dd></dl>
<dl class="function">
......
......@@ -512,6 +512,64 @@ map should be square.</li>
</table>
</dd></dl>
</div>
<div class="section" id="maxout-layer">
<h2>maxout_layer<a class="headerlink" href="#maxout-layer" title="Permalink to this headline"></a></h2>
<dl class="function">
<dt>
<code class="descclassname">paddle.trainer_config_helpers.layers.</code><code class="descname">maxout_layer</code><span class="sig-paren">(</span><em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span></dt>
<dd><dl class="docutils">
<dt>A layer to do max out on conv layer output.</dt>
<dd><ul class="first last simple">
<li>Input: output of a conv layer.</li>
<li>Output: feature map size same as input. Channel is (input channel) / groups.</li>
</ul>
</dd>
</dl>
<p>So groups should be larger than 1, and the num of channels should be able
to devided by groups.</p>
<dl class="docutils">
<dt>Please refer to Paper:</dt>
<dd><ul class="first last simple">
<li>Maxout Networks: <a class="reference external" href="http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf">http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf</a></li>
<li>Multi-digit Number Recognition from Street View Imagery using Deep Convolutional Neural Networks: <a class="reference external" href="https://arxiv.org/pdf/1312.6082v4.pdf">https://arxiv.org/pdf/1312.6082v4.pdf</a></li>
</ul>
</dd>
</dl>
<p>The simple usage is:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">maxout</span> <span class="o">=</span> <span class="n">maxout_layer</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span>
<span class="n">num_channels</span><span class="o">=</span><span class="mi">128</span><span class="p">,</span>
<span class="n">groups</span><span class="o">=</span><span class="mi">4</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>input</strong> (<em>LayerOutput</em>) &#8211; The input layer.</li>
<li><strong>num_channels</strong> (<em>int|None</em>) &#8211; The channel number of input layer. If None will be set
automatically from previous output.</li>
<li><strong>groups</strong> (<em>int</em>) &#8211; The group number of input layer.</li>
<li><strong>size_x</strong> (<em>int|None</em>) &#8211; conv output width. If None will be set
automatically from previous output.</li>
<li><strong>size_y</strong> (<em>int|None</em>) &#8211; conv output height. If None will be set
automatically from previous output.</li>
<li><strong>name</strong> (<em>None|basestring.</em>) &#8211; The name of this layer, which can not specify.</li>
<li><strong>layer_attr</strong> (<a class="reference internal" href="attrs.html#paddle.trainer_config_helpers.attrs.ExtraLayerAttribute" title="paddle.trainer_config_helpers.attrs.ExtraLayerAttribute"><em>ExtraLayerAttribute</em></a>) &#8211; Extra Layer attribute.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">LayerOutput object.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">LayerOutput</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>
</div>
</div>
<div class="section" id="norm-layer">
......@@ -2687,6 +2745,7 @@ It is used by recurrent layer group.</p>
</li>
<li><a class="reference internal" href="#image-pooling-layer">Image Pooling Layer</a><ul>
<li><a class="reference internal" href="#img-pool-layer">img_pool_layer</a></li>
<li><a class="reference internal" href="#maxout-layer">maxout_layer</a></li>
</ul>
</li>
<li><a class="reference internal" href="#norm-layer">Norm Layer</a><ul>
......
......@@ -97,6 +97,7 @@ var _hmt = _hmt || [];
</li>
<li class="toctree-l1"><a class="reference internal" href="layers.html#image-pooling-layer">Image Pooling Layer</a><ul>
<li class="toctree-l2"><a class="reference internal" href="layers.html#img-pool-layer">img_pool_layer</a></li>
<li class="toctree-l2"><a class="reference internal" href="layers.html#maxout-layer">maxout_layer</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="layers.html#norm-layer">Norm Layer</a><ul>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册