diff --git a/paddle/gserver/layers/NormalizeLayer.cpp b/paddle/gserver/layers/NormalizeLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..22df8adb4eecf912eeba87ff89d27c0fbc61ae14 --- /dev/null +++ b/paddle/gserver/layers/NormalizeLayer.cpp @@ -0,0 +1,182 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "Layer.h" +#include "paddle/math/BaseMatrix.h" +#include "paddle/math/Matrix.h" + +namespace paddle { +/** + * This layer applys normalize across the channels of each sample to a + * conv layer's output and scale the output by a group of trainable factors + * which dimensions equal to the channel's number. + * - Input: One and only one input layer are accepted. The input layer must be + * be a data output layer. + * - Output: The normalized data of the input data. + * Reference: + * Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, + * Cheng-Yang Fu, Alexander C. Berg. SSD: Single Shot MultiBox Detector + */ + +class NormalizeLayer : public Layer { +public: + explicit NormalizeLayer(const LayerConfig& config) : Layer(config) {} + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + void forward(PassType passType); + void backward(const UpdateCallback& callback); + +protected: + size_t channels_; + std::unique_ptr scale_; + MatrixPtr scaleDiff_; + MatrixPtr normBuffer_; + MatrixPtr dataBuffer_; + MatrixPtr channelBuffer_; + MatrixPtr spatialBuffer_; + MatrixPtr sampleBuffer_; +}; + +REGISTER_LAYER(normalize, NormalizeLayer); + +bool NormalizeLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + Layer::init(layerMap, parameterMap); + CHECK(parameters_[0]); + channels_ = config_.num_filters(); + scale_.reset(new Weight(channels_, 1, parameters_[0])); + return true; +} + +void NormalizeLayer::forward(PassType passType) { + Layer::forward(passType); + auto in = getInput(0); + MatrixPtr inV = getInputValue(0); + + size_t batchSize = inV->getHeight(); + size_t dataDim = inV->getWidth(); + CHECK_EQ(getSize(), dataDim); + + reserveOutput(batchSize, dataDim); + MatrixPtr outV = getOutputValue(); + size_t spatialDim = dataDim / channels_; + + Matrix::resizeOrCreate(dataBuffer_, batchSize, dataDim, false, useGpu_); + Matrix::resizeOrCreate(spatialBuffer_, 1, spatialDim, false, useGpu_); + Matrix::resizeOrCreate(channelBuffer_, channels_, 1, false, useGpu_); + Matrix::resizeOrCreate(sampleBuffer_, channels_, spatialDim, false, useGpu_); + Matrix::resizeOrCreate(normBuffer_, batchSize, spatialDim, false, useGpu_); + normBuffer_->zeroMem(); + spatialBuffer_->zeroMem(); + sampleBuffer_->zeroMem(); + dataBuffer_->zeroMem(); + // add eps to avoid overflow + normBuffer_->addScalar(*normBuffer_, 1e-6); + channelBuffer_->resetOne(); + inV->square2(*dataBuffer_); + for (size_t i = 0; i < batchSize; i++) { + spatialBuffer_->zeroMem(); + MatrixPtr inTmp = Matrix::create( + inV->getData() + i * dataDim, channels_, spatialDim, false, useGpu_); + MatrixPtr dataTmp = Matrix::create(dataBuffer_->getData() + i * dataDim, + channels_, + spatialDim, + false, + useGpu_); + MatrixPtr outTmp = Matrix::create( + outV->getData() + i * dataDim, channels_, spatialDim, false, useGpu_); + MatrixPtr normTmp = Matrix::create( + normBuffer_->getData() + i * spatialDim, 1, spatialDim, false, useGpu_); + // compute norm. + spatialBuffer_->sumCols(*dataTmp, 1, 1); + spatialBuffer_->sqrt2(*spatialBuffer_); + normTmp->copyFrom(*spatialBuffer_); + sampleBuffer_->mul(*channelBuffer_, *spatialBuffer_, 1., 0.); + sampleBuffer_->dotDiv(*inTmp, *sampleBuffer_); + outTmp->copyFrom(*sampleBuffer_); + + // scale the layer. + spatialBuffer_->resetOne(); + sampleBuffer_->mul(*scale_->getW(), *spatialBuffer_, 1., 0.); + outTmp->dotMul(*outTmp, *sampleBuffer_); + } +} + +void NormalizeLayer::backward(const UpdateCallback& callback) { + MatrixPtr inG = getInputGrad(0); + MatrixPtr inV = getInputValue(0); + MatrixPtr outG = getOutputGrad(); + MatrixPtr outV = getOutputValue(); + + auto in = getInput(0); + size_t batchSize = inG->getHeight(); + size_t dataDim = inG->getWidth(); + size_t spatialDim = dataDim / channels_; + + bool syncFlag = hl_get_sync_flag(); + dataBuffer_->dotMul(*outG, *outV); + Matrix::resizeOrCreate(scaleDiff_, channels_, 1, false, useGpu_); + scaleDiff_->zeroMem(); + for (size_t i = 0; i < batchSize; i++) { + spatialBuffer_->zeroMem(); + channelBuffer_->zeroMem(); + // propagate to param. + MatrixPtr dataBufferTmp = + Matrix::create(dataBuffer_->getData() + i * dataDim, + channels_, + spatialDim, + false, + useGpu_); + const MatrixPtr inValueTmp = Matrix::create( + inV->getData() + i * dataDim, channels_, spatialDim, false, useGpu_); + const MatrixPtr outGradTmp = Matrix::create( + outG->getData() + i * dataDim, channels_, spatialDim, false, useGpu_); + MatrixPtr inGradTmp = Matrix::create( + inG->getData() + i * dataDim, channels_, spatialDim, false, useGpu_); + const MatrixPtr normTmp = Matrix::create( + normBuffer_->getData() + i * spatialDim, 1, spatialDim, false, useGpu_); + channelBuffer_->sumRows(*dataBufferTmp, 1, 1); + channelBuffer_->dotDiv(*channelBuffer_, *(scale_->getW())); + // store a / scale[i] in scaleDiff_ temporary + scaleDiff_->add(*channelBuffer_, 1.); + + sampleBuffer_->dotMul(*inValueTmp, *outGradTmp); + spatialBuffer_->sumCols(*sampleBuffer_, 1., 1.); + // scale the grad + channelBuffer_->resetOne(); + sampleBuffer_->mul(*channelBuffer_, *spatialBuffer_, 1., 0.); + + inGradTmp->dotMul(*inValueTmp, *sampleBuffer_); + // divide by square of norm + spatialBuffer_->dotMul(*normTmp, *normTmp); + sampleBuffer_->mul(*channelBuffer_, *spatialBuffer_, 1., 0.); + inGradTmp->dotDiv(*inGradTmp, *sampleBuffer_); + // subtract + inGradTmp->add(*outGradTmp, -1, 1); + // divide by norm + sampleBuffer_->mul(*channelBuffer_, *normTmp, 1., 0.); + inGradTmp->dotDiv(*inGradTmp, *sampleBuffer_); + // scale the diff + spatialBuffer_->resetOne(); + sampleBuffer_->mul(*scale_->getW(), *spatialBuffer_, 1., 0.); + inGradTmp->dotMul(*inGradTmp, *sampleBuffer_); + } + // updata scale + if (scale_->getWGrad()) scale_->getWGrad()->copyFrom(*scaleDiff_); + hl_set_sync_flag(false); + hl_set_sync_flag(syncFlag); + scale_->getParameterPtr()->incUpdate(callback); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/PriorBox.cpp b/paddle/gserver/layers/PriorBox.cpp index bcf5e912a50fef2cec8ebdf1e0dad9efa43fba2f..331bc7672ec0d39a7317c39f1d14e8dcadea471a 100644 --- a/paddle/gserver/layers/PriorBox.cpp +++ b/paddle/gserver/layers/PriorBox.cpp @@ -20,7 +20,7 @@ namespace paddle { /** * @brief A layer for generating priorbox locations and variances. * - Input: Two and only two input layer are accepted. The input layer must be - * be a data output layer and a convolution output layer. + * be a data output layer and a convolution output layer. * - Output: The priorbox locations and variances of the input data. * Reference: * Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, @@ -45,27 +45,32 @@ protected: MatrixPtr buffer_; }; +REGISTER_LAYER(priorbox, PriorBoxLayer); + bool PriorBoxLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { Layer::init(layerMap, parameterMap); auto pbConf = config_.inputs(0).priorbox_conf(); + std::vector tmp; + aspectRatio_.push_back(1.); std::copy(pbConf.min_size().begin(), pbConf.min_size().end(), std::back_inserter(minSize_)); std::copy(pbConf.max_size().begin(), pbConf.max_size().end(), std::back_inserter(maxSize_)); - std::copy(pbConf.aspect_ratio().begin(), - pbConf.aspect_ratio().end(), - std::back_inserter(aspectRatio_)); std::copy(pbConf.variance().begin(), pbConf.variance().end(), std::back_inserter(variance_)); + std::copy(pbConf.aspect_ratio().begin(), + pbConf.aspect_ratio().end(), + std::back_inserter(tmp)); // flip - int inputRatioLength = aspectRatio_.size(); - for (int index = 0; index < inputRatioLength; index++) - aspectRatio_.push_back(1 / aspectRatio_[index]); - aspectRatio_.push_back(1.); + int inputRatioLength = tmp.size(); + for (int index = 0; index < inputRatioLength; index++) { + aspectRatio_.push_back(tmp[index]); + aspectRatio_.push_back(1 / tmp[index]); + } numPriors_ = aspectRatio_.size(); if (maxSize_.size() > 0) numPriors_++; return true; @@ -94,12 +99,12 @@ void PriorBoxLayer::forward(PassType passType) { for (int w = 0; w < layerWidth; ++w) { real centerX = (w + 0.5) * stepW; real centerY = (h + 0.5) * stepH; - int minSize = 0; + real minSize = 0; for (size_t s = 0; s < minSize_.size(); s++) { // first prior. minSize = minSize_[s]; - int boxWidth = minSize; - int boxHeight = minSize; + real boxWidth = minSize; + real boxHeight = minSize; // xmin, ymin, xmax, ymax. tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth; tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight; @@ -112,7 +117,7 @@ void PriorBoxLayer::forward(PassType passType) { CHECK_EQ(minSize_.size(), maxSize_.size()); // second prior. for (size_t s = 0; s < maxSize_.size(); s++) { - int maxSize = maxSize_[s]; + real maxSize = maxSize_[s]; boxWidth = boxHeight = sqrt(minSize * maxSize); tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth; tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight; @@ -145,6 +150,5 @@ void PriorBoxLayer::forward(PassType passType) { MatrixPtr outV = getOutputValue(); outV->copyFrom(buffer_->data_, dim * 2); } -REGISTER_LAYER(priorbox, PriorBoxLayer); } // namespace paddle diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 14d9db52470b2828186eca04d303135910489266..a7d3eaeaf98cb017a4ca9e81e1f58bfd17335eb0 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -1623,6 +1623,20 @@ TEST(Layer, PadLayer) { } } +TEST(Layer, NormalizeLayer) { + TestConfig config; + config.layerConfig.set_type("normalize"); + config.layerConfig.set_size(100); + config.layerConfig.set_num_filters(10); + + config.inputDefs.push_back({INPUT_DATA, "layer_0", 100, 10}); + config.layerConfig.add_inputs(); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, "normalize", 10, false, useGpu, false, 5); + } +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index da937152ee0ce788309690c7b718943bb21b5a76..c52039219575936414fb17a67f84bd1422035b98 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1619,6 +1619,16 @@ class PriorBoxLayer(LayerBase): self.config.size = size +@config_layer('normalize') +class NormalizeLayer(LayerBase): + def __init__(self, name, inputs, size, num_filters, **xargs): + super(NormalizeLayer, self).__init__(name, 'normalize', 0, inputs, + **xargs) + self.config.size = size + self.config.num_filters = num_filters + self.create_input_parameter(0, num_filters, [num_filters, 1]) + + @config_layer('data') class DataLayer(LayerBase): def __init__(self, name, size, height=None, width=None, device=None): diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index b94f8f9a783552519ca73e7cfc0937b302d3445b..1541b532d950a22a5d2e9928626d1b7a047c1fe1 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -111,6 +111,7 @@ __all__ = [ 'out_prod_layer', 'print_layer', 'priorbox_layer', + 'normalize_layer', 'spp_layer', 'pad_layer', 'eos_layer', @@ -184,6 +185,7 @@ class LayerType(object): PRINT_LAYER = "print" PRIORBOX_LAYER = "priorbox" + NORMALIZE_LAYER = "normalize" CTC_LAYER = "ctc" WARP_CTC_LAYER = "warp_ctc" @@ -998,6 +1000,35 @@ def priorbox_layer(input, size=size) +@wrap_name_default("normalize") +def normalize_layer(input, name=None, param_attr=None): + """ + Normalize a layer's output. This layer is necessary for ssd. + This layer applys normalize across the channels of each sample to + a conv layer's output and scale the output by a group of trainable + factors which dimensions equal to the channel's number. + :param name: The Layer Name. + :type name: basestring + :param input: The input layer. + :type input: LayerOutput + :param param_attr: The Parameter Attribute|list. + :type param_attr: ParameterAttribute + :return: LayerOutput + """ + Layer( + name=name, + type=LayerType.NORMALIZE_LAYER, + inputs=[Input(input.name, **param_attr.attr)], + size=input.size, + num_filters=input.num_filters) + return LayerOutput( + name, + LayerType.NORMALIZE_LAYER, + parents=input, + num_filters=input.num_filters, + size=input.size) + + @wrap_name_default("seq_pooling") @wrap_bias_attr_default(has_bias=False) @wrap_param_default(['pooling_type'], default_factory=lambda _: MaxPooling())