diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index a09d5e3d4d31cf3c45f8e6258884645490ec3be6..1583fce981fed64141acdccc0d89b46b63d13cc0 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -82,12 +82,6 @@ img_cmrnorm_layer :members: img_cmrnorm_layer :noindex: -img_rnorm_layer ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: img_rnorm_layer - :noindex: - batch_norm_layer --------------------- .. automodule:: paddle.trainer_config_helpers.layers diff --git a/doc/ui/index.md b/doc/ui/index.md index 829994d56bb65414c6bdd9d4ce2a0878ee7c21d7..9c1ba27bdc14fa9ab762ffb97424a8a5946808f9 100644 --- a/doc/ui/index.md +++ b/doc/ui/index.md @@ -7,7 +7,7 @@ ## API Reference -* [Trainer Config Helpers](api/trainer_config_helpers/index.md) +* [Model Config Interface](api/trainer_config_helpers/index.md) ## Command Line Argument diff --git a/paddle/gserver/layers/NormProjectionLayer.cpp b/paddle/gserver/layers/NormProjectionLayer.cpp index f30a3e8df0f87f90a11c75fe355843c3fcbea3d1..eab6e904ee998b876a4dd7c503eec3a9a84f7412 100644 --- a/paddle/gserver/layers/NormProjectionLayer.cpp +++ b/paddle/gserver/layers/NormProjectionLayer.cpp @@ -46,9 +46,6 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap, /* the size of inputs for norm-layer is 1 */ CHECK_EQ(config_.inputs_size(), 1); - auto& inputConfig = config_.inputs(0); - blocked_ = inputConfig.norm_conf().blocked(); - return true; } @@ -69,7 +66,7 @@ void CMRProjectionNormLayer::forward(PassType passType) { denoms_->zeroMem(); outV->crossMapNormalFwd(*input, imgSizeH_, imgSizeW_, *denoms_, channels_, - size_, scale_, pow_, blocked_); + size_, scale_, pow_); } void CMRProjectionNormLayer::backward(const UpdateCallback& callback) { @@ -86,6 +83,6 @@ void CMRProjectionNormLayer::backward(const UpdateCallback& callback) { preOutGrad->crossMapNormalBwd(*localGrad, *denoms_, *preOutV, *localOutV, channels_, imgSizeH_, imgSizeW_, size_, scale_, - pow_, blocked_); + pow_); } } // namespace paddle diff --git a/paddle/gserver/layers/NormProjectionLayer.h b/paddle/gserver/layers/NormProjectionLayer.h index a5e8dce029ae1b9b3cb633dd49e041340db35cd6..728806ea76958382a3ad06804f773c959598d043 100644 --- a/paddle/gserver/layers/NormProjectionLayer.h +++ b/paddle/gserver/layers/NormProjectionLayer.h @@ -23,15 +23,12 @@ namespace paddle { /** * @brief response normalization across feature maps - * namely normalize in number of size_ channels + * namely normalize in number of size_ channels */ class CMRProjectionNormLayer : public ResponseNormLayer { size_t imgSizeH_, imgSizeW_; size_t outputH_, outputW_; -protected: - bool blocked_; - public: explicit CMRProjectionNormLayer(const LayerConfig& config) : ResponseNormLayer(config) {} diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 7e1840076833feded5a4bdec4066f0ab1a6411ec..f3a6503d4a21ff8766f3289f8eee992d4d13045d 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -943,7 +943,7 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad, size_t imgSizeH, void GpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, float scale, - float pow, bool blocked) { + float pow) { size_t num = input.getHeight(); size_t height = imgSizeH; size_t width = imgSizeW; @@ -960,7 +960,7 @@ void GpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, size_t imgSizeW, size_t sizeX, float scale, - float pow, bool blocked) { + float pow) { size_t num = preOutV.getHeight(); size_t height = imgSizeH; size_t width = imgSizeW; @@ -1602,7 +1602,7 @@ void CpuMatrix::avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, void CpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, float scale, - float pow, bool blocked) { + float pow) { size_t num = input.getHeight(); size_t height = imgSizeH; size_t width = imgSizeW; @@ -1655,7 +1655,7 @@ void CpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, size_t imgSizeW, size_t size, float scale, - float pow, bool blocked) { + float pow) { LOG(FATAL) << "Not implemented"; CHECK(imgSizeH * imgSizeW * channels == preOutV.getWidth()); diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index f27773d1108b63f20e8f1d7eadbadfb4624ccb4f..cfb30797fcf1bcafe8080fcc0679171b2386f217 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -585,7 +585,7 @@ public: * \f[ * a[i] = \sum_{j=-(N-1)/2}^{(N-1)/2} b_{i+j} * c_{j} * \f] - * + * * b contains M elements, * c contains N elements (N is odd), * b's index arithmetic is computed modulo M, @@ -774,7 +774,7 @@ public: virtual void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, float scale, - float pow, bool blocked) { + float pow) { LOG(FATAL) << "Not implemeted"; } @@ -782,7 +782,7 @@ public: Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, size_t imgSizeW, size_t size, float scale, - float pow, bool blocked) { + float pow) { LOG(FATAL) << "Not implemeted"; } @@ -883,7 +883,7 @@ public: * @code * this[i] = -sum(label[i][j]*log(output[i][j]) * + (1-label[i][j])*log(1-output[i][j])) - * @endcode + * @endcode */ virtual void multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label) { LOG(FATAL) << "Not implemented"; @@ -895,7 +895,7 @@ public: * @code * this[i][j] = -label[i][j]/output[i][j] * + (1-label[i][j])/(1-output[i][j]) - * @endcode + * @endcode */ virtual void multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label) { LOG(FATAL) << "Not implemented"; @@ -903,12 +903,12 @@ public: /** * @brief Calculate the classification error for multi binary labels - * + * * @code * this[i] = sum((output[i][j] >= threshold && label[i][j] == 0) * || (output[i][j] < threshold && label[i][j] == 1)) * / output->getWidth() - * @endcode + * @endcode */ virtual void classificationErrorMulti(Matrix& output, Matrix& label, real threshold) { @@ -1149,12 +1149,12 @@ public: void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, - float scale, float pow, bool blocked); + float scale, float pow); void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, - size_t imgSizeW, size_t sizeX, float scale, float pow, - bool blocked); + size_t imgSizeW, size_t sizeX, + float scale, float pow); void maxSequenceForward(Matrix& input, const IVector& sequence, IVector& index); @@ -1260,12 +1260,12 @@ public: void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, - float scale, float pow, bool blocked); + float scale, float pow); void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, Matrix& localOutV, size_t channels, size_t imgSizeH, - size_t imgSizeW, size_t sizeX, float scale, float pow, - bool blocked); + size_t imgSizeW, size_t sizeX, + float scale, float pow); void maxSequenceForward(Matrix& input, const IVector& sequence, IVector& index); @@ -1307,14 +1307,14 @@ public: * @code * table.row[ids[i]] += this.row[i] * @endcode - */ + */ virtual void addToRows(Matrix& table, IVector& ids); /** * @code * this[i] = table[i, id[i]] * @endcode - */ + */ virtual void selectElements(Matrix& table, IVector& ids); /** diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index cd00ffefc707f3c1ab63b23ab4c4d089ddb6c4d4..c23200dfa39b61806ed5f9841950f191fa091d96 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1500,7 +1500,7 @@ def img_pool_layer(input, pool_size, name=None, def __img_norm_layer__(name, input, size, norm_type, scale, power, - num_channels, blocked, layer_attr): + num_channels, blocked=0, layer_attr): if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters @@ -1522,9 +1522,9 @@ def __img_norm_layer__(name, input, size, norm_type, scale, power, @layer_support() def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, name=None, num_channels=None, - blocked=0, layer_attr=None): + layer_attr=None): """ - Convolution cross-map-response-normalize layer. + Response normalization across feature maps. The details please refer to `Alex's paper `_. @@ -1532,7 +1532,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, :type name: None|basestring :param input: layer's input. :type input: LayerOutput - :param size: cross map response size. + :param size: Normalize in number of :math:`size` feature maps. :type size: int :param scale: The hyper-parameter. :type scale: float @@ -1547,30 +1547,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, :rtype: LayerOutput """ return __img_norm_layer__(name, input, size, "cmrnorm-projection", scale, - power, num_channels, blocked, layer_attr) - - -@wrap_name_default("rnorm") -@layer_support() -def img_rnorm_layer(input, size, scale, power, name=None, num_channels=None, - layer_attr=None): - """ - Normalize the input in local region, namely response normalization - across feature maps. - - :param name: The name of this layer. - :rtype name: None|basestring - :param input: The input of this layer. - :param size: - :param scale: - :param power: - :param num_channels: - :param layer_attr: - :return: LayerOutput object. - :rtype: LayerOutput - """ - return __img_norm_layer__(name, input, size, 'rnorm', scale, power, - num_channels, 0, layer_attr) + power, num_channels, 0, layer_attr) @wrap_bias_attr_default()