提交 cee93468 编写于 作者: H hedaoyuan

add some comments

上级 f13aeb52
...@@ -17,7 +17,6 @@ limitations under the License. */ ...@@ -17,7 +17,6 @@ limitations under the License. */
namespace paddle { namespace paddle {
// NCHW
template <> template <>
void CrossMapNormal<DEVICE_TYPE_CPU>(real* outputs, void CrossMapNormal<DEVICE_TYPE_CPU>(real* outputs,
real* denoms, real* denoms,
...@@ -36,6 +35,10 @@ void CrossMapNormal<DEVICE_TYPE_CPU>(real* outputs, ...@@ -36,6 +35,10 @@ void CrossMapNormal<DEVICE_TYPE_CPU>(real* outputs,
CpuVector inputsV(numSamples * oneSample, inputs); CpuVector inputsV(numSamples * oneSample, inputs);
CpuVector denomsV(numSamples * oneSample, denoms); CpuVector denomsV(numSamples * oneSample, denoms);
// f(x) = x * ( 1 + scale * SUM((x)^2) )^(-pow)
// x represents inputs
// f(x) represents outputs
// denoms save the intermediate result for backward
denomsV = denomsV.constant(1.0); denomsV = denomsV.constant(1.0);
const int start = -((int)size - 1) / 2; const int start = -((int)size - 1) / 2;
const int end = (int)size + start; const int end = (int)size + start;
......
...@@ -18,6 +18,22 @@ limitations under the License. */ ...@@ -18,6 +18,22 @@ limitations under the License. */
namespace paddle { namespace paddle {
/**
* \brief Cross map respose normalize forward.
* The data structure of image data is NCHW.
*
* \param[out] outputs output data.
* \param[in] denoms denoms buffer.
* \param[in] inputs input data.
* \param[in] numSamples batch size of input image.
* \param[in] channels number of channel.
* \param[in] height image height.
* \param[in] width image width.
* \param[in] size size.
* \param[in] scale scale.
* \param[in] pow scale.
*
*/
template <DeviceType Device> template <DeviceType Device>
void CrossMapNormal(real* outputs, void CrossMapNormal(real* outputs,
real* denoms, real* denoms,
...@@ -30,6 +46,24 @@ void CrossMapNormal(real* outputs, ...@@ -30,6 +46,24 @@ void CrossMapNormal(real* outputs,
real scale, real scale,
real pow); real pow);
/**
* \brief Cross map respose normalize backward.
* The data structure of image data is NCHW.
*
* \param[out] inputsGrad input grad.
* \param[in] inputsValue input value.
* \param[out] outputsValue output value.
* \param[out] outputsGrad output grad.
* \param[in] denoms denoms buffer.
* \param[in] numSamples batch size of input image.
* \param[in] channels number of channel.
* \param[in] height image height.
* \param[in] width image width.
* \param[in] size size.
* \param[in] scale scale.
* \param[in] pow scale.
*
*/
template <DeviceType Device> template <DeviceType Device>
void CrossMapNormalGrad(real* inputsGrad, void CrossMapNormalGrad(real* inputsGrad,
real* inputsValue, real* inputsValue,
......
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include <functional> #include <functional>
#include <memory> #include <memory>
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/function/Function.h"
#include "paddle/math/CpuSparseMatrix.h" #include "paddle/math/CpuSparseMatrix.h"
#include "paddle/parameter/Parameter.h" #include "paddle/parameter/Parameter.h"
#include "paddle/utils/ClassRegistrar.h" #include "paddle/utils/ClassRegistrar.h"
...@@ -100,6 +101,11 @@ protected: ...@@ -100,6 +101,11 @@ protected:
/// Mark input grad in(true) or out(false) of backward function. /// Mark input grad in(true) or out(false) of backward function.
std::vector<bool> markInBackward_; std::vector<bool> markInBackward_;
/// Layer forward function
FunctionBase* forward_;
/// Layer backward function
FunctionBase* backward_;
public: public:
/** /**
* Wait until all input value ready. * Wait until all input value ready.
......
...@@ -48,20 +48,17 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap, ...@@ -48,20 +48,17 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap,
if (useGpu_) { if (useGpu_) {
forward_ = FunctionBase::funcRegistrar_.createByType( forward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormal, GPU)); FUNC_NAME(CrossMapNormal, GPU));
backward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormalGrad, GPU));
} else { } else {
forward_ = FunctionBase::funcRegistrar_.createByType( forward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormal, CPU)); FUNC_NAME(CrossMapNormal, CPU));
backward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormalGrad, CPU));
} }
forward_->init( forward_->init(
FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_)); FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_));
if (useGpu_) {
backward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormalGrad, GPU));
} else {
backward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormalGrad, CPU));
}
backward_->init( backward_->init(
FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_)); FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_));
...@@ -74,7 +71,7 @@ void CMRProjectionNormLayer::forward(PassType passType) { ...@@ -74,7 +71,7 @@ void CMRProjectionNormLayer::forward(PassType passType) {
/* malloc memory for the output_ if necessary */ /* malloc memory for the output_ if necessary */
/* note: one sample correspond to one row */ /* note: one sample correspond to one row */
MatrixPtr input = inputLayers_[0]->getOutputValue(); MatrixPtr input = inputLayers_[0]->getOutputValue();
int batchSize = input->getHeight(); size_t batchSize = input->getHeight();
int size = getSize(); int size = getSize();
resetOutput(batchSize, size); resetOutput(batchSize, size);
...@@ -82,10 +79,7 @@ void CMRProjectionNormLayer::forward(PassType passType) { ...@@ -82,10 +79,7 @@ void CMRProjectionNormLayer::forward(PassType passType) {
Matrix::resizeOrCreate(denoms_, batchSize, size, /* trans */ false, useGpu_); Matrix::resizeOrCreate(denoms_, batchSize, size, /* trans */ false, useGpu_);
dims_ = {(size_t)batchSize, dims_ = {batchSize, channels_, imgSizeH_, imgSizeW_};
(size_t)channels_,
(size_t)imgSizeH_,
(size_t)imgSizeW_};
forward_->calc( forward_->calc(
{Tensor(input->getData(), dims_)}, {Tensor(input->getData(), dims_)},
{Tensor(outV->getData(), dims_), Tensor(denoms_->getData(), dims_)}, {Tensor(outV->getData(), dims_), Tensor(denoms_->getData(), dims_)},
......
...@@ -16,7 +16,6 @@ limitations under the License. */ ...@@ -16,7 +16,6 @@ limitations under the License. */
#include <vector> #include <vector>
#include "NormLayer.h" #include "NormLayer.h"
#include "paddle/function/Function.h"
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
namespace paddle { namespace paddle {
...@@ -43,7 +42,5 @@ public: ...@@ -43,7 +42,5 @@ public:
protected: protected:
Dims dims_; Dims dims_;
FunctionBase* forward_;
FunctionBase* backward_;
}; };
} // namespace paddle } // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册