From 54d4968d38648d32ce777b5ce1001110d31c64af Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 1 Sep 2016 10:15:45 +0000 Subject: [PATCH] Refine code and comments for CRMNormLayer. ISSUE=4612671 git-svn-id: https://svn.baidu.com/idl/trunk/paddle@1501 1ad973e4-5ce8-4261-8a94-b56d1f490c56 --- doc/ui/api/trainer_config_helpers/layers.rst | 6 ---- doc/ui/index.md | 2 +- paddle/gserver/layers/NormProjectionLayer.cpp | 7 ++-- paddle/gserver/layers/NormProjectionLayer.h | 5 +-- paddle/math/Matrix.cpp | 8 ++--- paddle/math/Matrix.h | 30 ++++++++--------- .../paddle/trainer_config_helpers/layers.py | 33 +++---------------- 7 files changed, 28 insertions(+), 63 deletions(-) diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index a09d5e3d4d3..1583fce981f 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -82,12 +82,6 @@ img_cmrnorm_layer :members: img_cmrnorm_layer :noindex: -img_rnorm_layer ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: img_rnorm_layer - :noindex: - batch_norm_layer --------------------- .. automodule:: paddle.trainer_config_helpers.layers diff --git a/doc/ui/index.md b/doc/ui/index.md index 829994d56bb..9c1ba27bdc1 100644 --- a/doc/ui/index.md +++ b/doc/ui/index.md @@ -7,7 +7,7 @@ ## API Reference -* [Trainer Config Helpers](api/trainer_config_helpers/index.md) +* [Model Config Interface](api/trainer_config_helpers/index.md) ## Command Line Argument diff --git a/paddle/gserver/layers/NormProjectionLayer.cpp b/paddle/gserver/layers/NormProjectionLayer.cpp index f30a3e8df0f..eab6e904ee9 100644 --- a/paddle/gserver/layers/NormProjectionLayer.cpp +++ b/paddle/gserver/layers/NormProjectionLayer.cpp @@ -46,9 +46,6 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap, /* the size of inputs for norm-layer is 1 */ CHECK_EQ(config_.inputs_size(), 1); - auto& inputConfig = config_.inputs(0); - blocked_ = inputConfig.norm_conf().blocked(); - return true; } @@ -69,7 +66,7 @@ void CMRProjectionNormLayer::forward(PassType passType) { denoms_->zeroMem(); outV->crossMapNormalFwd(*input, imgSizeH_, imgSizeW_, *denoms_, channels_, - size_, scale_, pow_, blocked_); + size_, scale_, pow_); } void CMRProjectionNormLayer::backward(const UpdateCallback& callback) { @@ -86,6 +83,6 @@ void CMRProjectionNormLayer::backward(const UpdateCallback& callback) { preOutGrad->crossMapNormalBwd(*localGrad, *denoms_, *preOutV, *localOutV, channels_, imgSizeH_, imgSizeW_, size_, scale_, - pow_, blocked_); + pow_); } } // namespace paddle diff --git a/paddle/gserver/layers/NormProjectionLayer.h b/paddle/gserver/layers/NormProjectionLayer.h index a5e8dce029a..728806ea769 100644 --- a/paddle/gserver/layers/NormProjectionLayer.h +++ b/paddle/gserver/layers/NormProjectionLayer.h @@ -23,15 +23,12 @@ namespace paddle { /** * @brief response normalization across feature maps - * namely normalize in number of size_ channels + * namely normalize in number of size_ channels */ class CMRProjectionNormLayer : public ResponseNormLayer { size_t imgSizeH_, imgSizeW_; size_t outputH_, outputW_; -protected: - bool blocked_; - public: explicit CMRProjectionNormLayer(const LayerConfig& config) : ResponseNormLayer(config) {} diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 7e184007683..f3a6503d4a2 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -943,7 +943,7 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad, size_t imgSizeH, void GpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, float scale, - float pow, bool blocked) { + float pow) { size_t num = input.getHeight(); size_t height = imgSizeH; size_t width = imgSizeW; @@ -960,7 +960,7 @@ void GpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, size_t imgSizeW, size_t sizeX, float scale, - float pow, bool blocked) { + float pow) { size_t num = preOutV.getHeight(); size_t height = imgSizeH; size_t width = imgSizeW; @@ -1602,7 +1602,7 @@ void CpuMatrix::avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, void CpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, float scale, - float pow, bool blocked) { + float pow) { size_t num = input.getHeight(); size_t height = imgSizeH; size_t width = imgSizeW; @@ -1655,7 +1655,7 @@ void CpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, size_t imgSizeW, size_t size, float scale, - float pow, bool blocked) { + float pow) { LOG(FATAL) << "Not implemented"; CHECK(imgSizeH * imgSizeW * channels == preOutV.getWidth()); diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index f27773d1108..cfb30797fcf 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -585,7 +585,7 @@ public: * \f[ * a[i] = \sum_{j=-(N-1)/2}^{(N-1)/2} b_{i+j} * c_{j} * \f] - * + * * b contains M elements, * c contains N elements (N is odd), * b's index arithmetic is computed modulo M, @@ -774,7 +774,7 @@ public: virtual void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, float scale, - float pow, bool blocked) { + float pow) { LOG(FATAL) << "Not implemeted"; } @@ -782,7 +782,7 @@ public: Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, size_t imgSizeW, size_t size, float scale, - float pow, bool blocked) { + float pow) { LOG(FATAL) << "Not implemeted"; } @@ -883,7 +883,7 @@ public: * @code * this[i] = -sum(label[i][j]*log(output[i][j]) * + (1-label[i][j])*log(1-output[i][j])) - * @endcode + * @endcode */ virtual void multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label) { LOG(FATAL) << "Not implemented"; @@ -895,7 +895,7 @@ public: * @code * this[i][j] = -label[i][j]/output[i][j] * + (1-label[i][j])/(1-output[i][j]) - * @endcode + * @endcode */ virtual void multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label) { LOG(FATAL) << "Not implemented"; @@ -903,12 +903,12 @@ public: /** * @brief Calculate the classification error for multi binary labels - * + * * @code * this[i] = sum((output[i][j] >= threshold && label[i][j] == 0) * || (output[i][j] < threshold && label[i][j] == 1)) * / output->getWidth() - * @endcode + * @endcode */ virtual void classificationErrorMulti(Matrix& output, Matrix& label, real threshold) { @@ -1149,12 +1149,12 @@ public: void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, - float scale, float pow, bool blocked); + float scale, float pow); void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, - size_t imgSizeW, size_t sizeX, float scale, float pow, - bool blocked); + size_t imgSizeW, size_t sizeX, + float scale, float pow); void maxSequenceForward(Matrix& input, const IVector& sequence, IVector& index); @@ -1260,12 +1260,12 @@ public: void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, - float scale, float pow, bool blocked); + float scale, float pow); void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, - size_t imgSizeW, size_t sizeX, float scale, float pow, - bool blocked); + size_t imgSizeW, size_t sizeX, + float scale, float pow); void maxSequenceForward(Matrix& input, const IVector& sequence, IVector& index); @@ -1307,14 +1307,14 @@ public: * @code * table.row[ids[i]] += this.row[i] * @endcode - */ + */ virtual void addToRows(Matrix& table, IVector& ids); /** * @code * this[i] = table[i, id[i]] * @endcode - */ + */ virtual void selectElements(Matrix& table, IVector& ids); /** diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index cd00ffefc70..c23200dfa39 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1500,7 +1500,7 @@ def img_pool_layer(input, pool_size, name=None, def __img_norm_layer__(name, input, size, norm_type, scale, power, - num_channels, blocked, layer_attr): + num_channels, blocked=0, layer_attr): if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters @@ -1522,9 +1522,9 @@ def __img_norm_layer__(name, input, size, norm_type, scale, power, @layer_support() def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, name=None, num_channels=None, - blocked=0, layer_attr=None): + layer_attr=None): """ - Convolution cross-map-response-normalize layer. + Response normalization across feature maps. The details please refer to `Alex's paper `_. @@ -1532,7 +1532,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, :type name: None|basestring :param input: layer's input. :type input: LayerOutput - :param size: cross map response size. + :param size: Normalize in number of :math:`size` feature maps. :type size: int :param scale: The hyper-parameter. :type scale: float @@ -1547,30 +1547,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, :rtype: LayerOutput """ return __img_norm_layer__(name, input, size, "cmrnorm-projection", scale, - power, num_channels, blocked, layer_attr) - - -@wrap_name_default("rnorm") -@layer_support() -def img_rnorm_layer(input, size, scale, power, name=None, num_channels=None, - layer_attr=None): - """ - Normalize the input in local region, namely response normalization - across feature maps. - - :param name: The name of this layer. - :rtype name: None|basestring - :param input: The input of this layer. - :param size: - :param scale: - :param power: - :param num_channels: - :param layer_attr: - :return: LayerOutput object. - :rtype: LayerOutput - """ - return __img_norm_layer__(name, input, size, 'rnorm', scale, power, - num_channels, 0, layer_attr) + power, num_channels, 0, layer_attr) @wrap_bias_attr_default() -- GitLab