提交 cee93468 编写于 作者: H hedaoyuan

add some comments

上级 f13aeb52
......@@ -17,7 +17,6 @@ limitations under the License. */
namespace paddle {
// NCHW
template <>
void CrossMapNormal<DEVICE_TYPE_CPU>(real* outputs,
real* denoms,
......@@ -36,6 +35,10 @@ void CrossMapNormal<DEVICE_TYPE_CPU>(real* outputs,
CpuVector inputsV(numSamples * oneSample, inputs);
CpuVector denomsV(numSamples * oneSample, denoms);
// f(x) = x * ( 1 + scale * SUM((x)^2) )^(-pow)
// x represents inputs
// f(x) represents outputs
// denoms save the intermediate result for backward
denomsV = denomsV.constant(1.0);
const int start = -((int)size - 1) / 2;
const int end = (int)size + start;
......
......@@ -18,6 +18,22 @@ limitations under the License. */
namespace paddle {
/**
* \brief Cross map respose normalize forward.
* The data structure of image data is NCHW.
*
* \param[out] outputs output data.
* \param[in] denoms denoms buffer.
* \param[in] inputs input data.
* \param[in] numSamples batch size of input image.
* \param[in] channels number of channel.
* \param[in] height image height.
* \param[in] width image width.
* \param[in] size size.
* \param[in] scale scale.
* \param[in] pow scale.
*
*/
template <DeviceType Device>
void CrossMapNormal(real* outputs,
real* denoms,
......@@ -30,6 +46,24 @@ void CrossMapNormal(real* outputs,
real scale,
real pow);
/**
* \brief Cross map respose normalize backward.
* The data structure of image data is NCHW.
*
* \param[out] inputsGrad input grad.
* \param[in] inputsValue input value.
* \param[out] outputsValue output value.
* \param[out] outputsGrad output grad.
* \param[in] denoms denoms buffer.
* \param[in] numSamples batch size of input image.
* \param[in] channels number of channel.
* \param[in] height image height.
* \param[in] width image width.
* \param[in] size size.
* \param[in] scale scale.
* \param[in] pow scale.
*
*/
template <DeviceType Device>
void CrossMapNormalGrad(real* inputsGrad,
real* inputsValue,
......
......@@ -18,6 +18,7 @@ limitations under the License. */
#include <functional>
#include <memory>
#include "ModelConfig.pb.h"
#include "paddle/function/Function.h"
#include "paddle/math/CpuSparseMatrix.h"
#include "paddle/parameter/Parameter.h"
#include "paddle/utils/ClassRegistrar.h"
......@@ -100,6 +101,11 @@ protected:
/// Mark input grad in(true) or out(false) of backward function.
std::vector<bool> markInBackward_;
/// Layer forward function
FunctionBase* forward_;
/// Layer backward function
FunctionBase* backward_;
public:
/**
* Wait until all input value ready.
......
......@@ -48,20 +48,17 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap,
if (useGpu_) {
forward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormal, GPU));
backward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormalGrad, GPU));
} else {
forward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormal, CPU));
backward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormalGrad, CPU));
}
forward_->init(
FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_));
if (useGpu_) {
backward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormalGrad, GPU));
} else {
backward_ = FunctionBase::funcRegistrar_.createByType(
FUNC_NAME(CrossMapNormalGrad, CPU));
}
backward_->init(
FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_));
......@@ -74,7 +71,7 @@ void CMRProjectionNormLayer::forward(PassType passType) {
/* malloc memory for the output_ if necessary */
/* note: one sample correspond to one row */
MatrixPtr input = inputLayers_[0]->getOutputValue();
int batchSize = input->getHeight();
size_t batchSize = input->getHeight();
int size = getSize();
resetOutput(batchSize, size);
......@@ -82,10 +79,7 @@ void CMRProjectionNormLayer::forward(PassType passType) {
Matrix::resizeOrCreate(denoms_, batchSize, size, /* trans */ false, useGpu_);
dims_ = {(size_t)batchSize,
(size_t)channels_,
(size_t)imgSizeH_,
(size_t)imgSizeW_};
dims_ = {batchSize, channels_, imgSizeH_, imgSizeW_};
forward_->calc(
{Tensor(input->getData(), dims_)},
{Tensor(outV->getData(), dims_), Tensor(denoms_->getData(), dims_)},
......
......@@ -16,7 +16,6 @@ limitations under the License. */
#include <vector>
#include "NormLayer.h"
#include "paddle/function/Function.h"
#include "paddle/math/Matrix.h"
namespace paddle {
......@@ -43,7 +42,5 @@ public:
protected:
Dims dims_;
FunctionBase* forward_;
FunctionBase* backward_;
};
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册