diff --git a/paddle/function/cross_map_normal_op.cpp b/paddle/function/cross_map_normal_op.cpp index 0391a58d89f4afe878edbc6268cb1018f6582f89..a18c0bb750acfe0902bbd7fe0eeda69ffff78a75 100644 --- a/paddle/function/cross_map_normal_op.cpp +++ b/paddle/function/cross_map_normal_op.cpp @@ -17,7 +17,6 @@ limitations under the License. */ namespace paddle { -// NCHW template <> void CrossMapNormal(real* outputs, real* denoms, @@ -36,6 +35,10 @@ void CrossMapNormal(real* outputs, CpuVector inputsV(numSamples * oneSample, inputs); CpuVector denomsV(numSamples * oneSample, denoms); + // f(x) = x * ( 1 + scale * SUM((x)^2) )^(-pow) + // x represents inputs + // f(x) represents outputs + // denoms save the intermediate result for backward denomsV = denomsV.constant(1.0); const int start = -((int)size - 1) / 2; const int end = (int)size + start; diff --git a/paddle/function/cross_map_normal_op.h b/paddle/function/cross_map_normal_op.h index f065208084f1d46b2414246b7119ce7d2666e631..e935b26e125d3b9c014d4ae52bfb9474ed31c579 100644 --- a/paddle/function/cross_map_normal_op.h +++ b/paddle/function/cross_map_normal_op.h @@ -18,6 +18,22 @@ limitations under the License. */ namespace paddle { +/** + * \brief Cross map respose normalize forward. + * The data structure of image data is NCHW. + * + * \param[out] outputs output data. + * \param[in] denoms denoms buffer. + * \param[in] inputs input data. + * \param[in] numSamples batch size of input image. + * \param[in] channels number of channel. + * \param[in] height image height. + * \param[in] width image width. + * \param[in] size size. + * \param[in] scale scale. + * \param[in] pow scale. + * + */ template void CrossMapNormal(real* outputs, real* denoms, @@ -30,6 +46,24 @@ void CrossMapNormal(real* outputs, real scale, real pow); +/** + * \brief Cross map respose normalize backward. + * The data structure of image data is NCHW. + * + * \param[out] inputsGrad input grad. + * \param[in] inputsValue input value. + * \param[out] outputsValue output value. + * \param[out] outputsGrad output grad. + * \param[in] denoms denoms buffer. + * \param[in] numSamples batch size of input image. + * \param[in] channels number of channel. + * \param[in] height image height. + * \param[in] width image width. + * \param[in] size size. + * \param[in] scale scale. + * \param[in] pow scale. + * + */ template void CrossMapNormalGrad(real* inputsGrad, real* inputsValue, diff --git a/paddle/gserver/layers/Layer.h b/paddle/gserver/layers/Layer.h index 172e558b82945296ef8a50d464c03efbfd597e0d..16f66a2205f4956291c8aef87b0100c3b685a9f2 100644 --- a/paddle/gserver/layers/Layer.h +++ b/paddle/gserver/layers/Layer.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" +#include "paddle/function/Function.h" #include "paddle/math/CpuSparseMatrix.h" #include "paddle/parameter/Parameter.h" #include "paddle/utils/ClassRegistrar.h" @@ -100,6 +101,11 @@ protected: /// Mark input grad in(true) or out(false) of backward function. std::vector markInBackward_; + /// Layer forward function + FunctionBase* forward_; + /// Layer backward function + FunctionBase* backward_; + public: /** * Wait until all input value ready. diff --git a/paddle/gserver/layers/NormProjectionLayer.cpp b/paddle/gserver/layers/NormProjectionLayer.cpp index 4ff3b805fbb061cbb2630330c1b9b4f6c2b354d6..0f6f9b91d057852d665960411c1d73942925033c 100644 --- a/paddle/gserver/layers/NormProjectionLayer.cpp +++ b/paddle/gserver/layers/NormProjectionLayer.cpp @@ -48,20 +48,17 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap, if (useGpu_) { forward_ = FunctionBase::funcRegistrar_.createByType( FUNC_NAME(CrossMapNormal, GPU)); + backward_ = FunctionBase::funcRegistrar_.createByType( + FUNC_NAME(CrossMapNormalGrad, GPU)); } else { forward_ = FunctionBase::funcRegistrar_.createByType( FUNC_NAME(CrossMapNormal, CPU)); + backward_ = FunctionBase::funcRegistrar_.createByType( + FUNC_NAME(CrossMapNormalGrad, CPU)); } forward_->init( FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_)); - if (useGpu_) { - backward_ = FunctionBase::funcRegistrar_.createByType( - FUNC_NAME(CrossMapNormalGrad, GPU)); - } else { - backward_ = FunctionBase::funcRegistrar_.createByType( - FUNC_NAME(CrossMapNormalGrad, CPU)); - } backward_->init( FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_)); @@ -74,7 +71,7 @@ void CMRProjectionNormLayer::forward(PassType passType) { /* malloc memory for the output_ if necessary */ /* note: one sample correspond to one row */ MatrixPtr input = inputLayers_[0]->getOutputValue(); - int batchSize = input->getHeight(); + size_t batchSize = input->getHeight(); int size = getSize(); resetOutput(batchSize, size); @@ -82,10 +79,7 @@ void CMRProjectionNormLayer::forward(PassType passType) { Matrix::resizeOrCreate(denoms_, batchSize, size, /* trans */ false, useGpu_); - dims_ = {(size_t)batchSize, - (size_t)channels_, - (size_t)imgSizeH_, - (size_t)imgSizeW_}; + dims_ = {batchSize, channels_, imgSizeH_, imgSizeW_}; forward_->calc( {Tensor(input->getData(), dims_)}, {Tensor(outV->getData(), dims_), Tensor(denoms_->getData(), dims_)}, diff --git a/paddle/gserver/layers/NormProjectionLayer.h b/paddle/gserver/layers/NormProjectionLayer.h index 3c4876ece609e70393e92b14d33a9bc6f13136c9..6b2c5dde0d74db4b292d5006d19ce54d3194017e 100644 --- a/paddle/gserver/layers/NormProjectionLayer.h +++ b/paddle/gserver/layers/NormProjectionLayer.h @@ -16,7 +16,6 @@ limitations under the License. */ #include #include "NormLayer.h" -#include "paddle/function/Function.h" #include "paddle/math/Matrix.h" namespace paddle { @@ -43,7 +42,5 @@ public: protected: Dims dims_; - FunctionBase* forward_; - FunctionBase* backward_; }; } // namespace paddle