提交 784e242b 编写于 作者: G gaoyuan

Remove redundancy codes

上级 57c355a1
...@@ -109,6 +109,12 @@ sum_to_one_norm ...@@ -109,6 +109,12 @@ sum_to_one_norm
:members: sum_to_one_norm :members: sum_to_one_norm
:noindex: :noindex:
cross_channel_norm
---------------
.. automodule:: paddle.v2.layer
:members: cross_channel_norm
:noindex:
Recurrent Layers Recurrent Layers
================ ================
......
...@@ -40,7 +40,6 @@ void CrossChannelNormLayer::forward(PassType passType) { ...@@ -40,7 +40,6 @@ void CrossChannelNormLayer::forward(PassType passType) {
normBuffer_->addScalar(*normBuffer_, 1e-6); normBuffer_->addScalar(*normBuffer_, 1e-6);
inV->square2(*dataBuffer_); inV->square2(*dataBuffer_);
for (size_t i = 0; i < batchSize; i++) { for (size_t i = 0; i < batchSize; i++) {
spatialBuffer_->zeroMem();
MatrixPtr inTmp = Matrix::create( MatrixPtr inTmp = Matrix::create(
inV->getData() + i * dataDim, channels_, spatialDim, false, useGpu_); inV->getData() + i * dataDim, channels_, spatialDim, false, useGpu_);
MatrixPtr dataTmp = Matrix::create(dataBuffer_->getData() + i * dataDim, MatrixPtr dataTmp = Matrix::create(dataBuffer_->getData() + i * dataDim,
...@@ -80,7 +79,6 @@ void CrossChannelNormLayer::backward(const UpdateCallback& callback) { ...@@ -80,7 +79,6 @@ void CrossChannelNormLayer::backward(const UpdateCallback& callback) {
scaleDiff_->zeroMem(); scaleDiff_->zeroMem();
for (size_t i = 0; i < batchSize; i++) { for (size_t i = 0; i < batchSize; i++) {
spatialBuffer_->zeroMem(); spatialBuffer_->zeroMem();
channelBuffer_->zeroMem();
// propagate to param. // propagate to param.
MatrixPtr dataBufferTmp = MatrixPtr dataBufferTmp =
Matrix::create(dataBuffer_->getData() + i * dataDim, Matrix::create(dataBuffer_->getData() + i * dataDim,
......
...@@ -66,11 +66,10 @@ public: ...@@ -66,11 +66,10 @@ public:
}; };
/** /**
* This layer applys normalize across the channels of each sample to a * This layer applys normalization across the channels of each sample to a
* conv layer's output and scale the output by a group of trainable factors * conv layer's output, and scales the output by a group of trainable factors
* which dimensions equal to the channel's number. * whose equal to the number of channels.
* - Input: One and only one input layer are accepted. The input layer must be * - Input: One and only one input layer are accepted.
* be a data output layer.
* - Output: The normalized data of the input data. * - Output: The normalized data of the input data.
* Reference: * Reference:
* Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, * Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
......
...@@ -1015,6 +1015,7 @@ def cross_channel_norm_layer(input, name=None, param_attr=None): ...@@ -1015,6 +1015,7 @@ def cross_channel_norm_layer(input, name=None, param_attr=None):
This layer applys normalize across the channels of each sample to This layer applys normalize across the channels of each sample to
a conv layer's output and scale the output by a group of trainable a conv layer's output and scale the output by a group of trainable
factors which dimensions equal to the channel's number. factors which dimensions equal to the channel's number.
:param name: The Layer Name. :param name: The Layer Name.
:type name: basestring :type name: basestring
:param input: The input layer. :param input: The input layer.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册