diff --git a/paddle/gserver/layers/Layer.h b/paddle/gserver/layers/Layer.h index 16f66a2205f4956291c8aef87b0100c3b685a9f2..6dfd48fb96618102b71e9f6de79a348dc7f62647 100644 --- a/paddle/gserver/layers/Layer.h +++ b/paddle/gserver/layers/Layer.h @@ -102,9 +102,9 @@ protected: std::vector markInBackward_; /// Layer forward function - FunctionBase* forward_; + std::vector> forward_; /// Layer backward function - FunctionBase* backward_; + std::vector> backward_; public: /** @@ -132,6 +132,26 @@ public: virtual void markAllInputGrad(); protected: + /** + * Create layer function. Function is called in forward or backward. + * \param function, Layer::forward_ or Layer::backward_ + * \param name, function name + * \param config, initialization configuration for the function + */ + void createFunction(std::vector>& function, + const std::string& name, + const FuncConfig& config) { + if (useGpu_) { + function.emplace_back( + FunctionBase::funcRegistrar_.createByType(name + "-GPU")); + } else { + function.emplace_back( + FunctionBase::funcRegistrar_.createByType(name + "-CPU")); + } + auto& func = function.back(); + func->init(config); + } + /** * Notify specified layer the output grad ready. * Called in the backward function. diff --git a/paddle/gserver/layers/NormProjectionLayer.cpp b/paddle/gserver/layers/NormProjectionLayer.cpp index 0f6f9b91d057852d665960411c1d73942925033c..262d757c67e105a8d65619eed91de65d34cfe35e 100644 --- a/paddle/gserver/layers/NormProjectionLayer.cpp +++ b/paddle/gserver/layers/NormProjectionLayer.cpp @@ -45,21 +45,13 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap, /* the size of inputs for norm-layer is 1 */ CHECK_EQ(config_.inputs_size(), 1); - if (useGpu_) { - forward_ = FunctionBase::funcRegistrar_.createByType( - FUNC_NAME(CrossMapNormal, GPU)); - backward_ = FunctionBase::funcRegistrar_.createByType( - FUNC_NAME(CrossMapNormalGrad, GPU)); - } else { - forward_ = FunctionBase::funcRegistrar_.createByType( - FUNC_NAME(CrossMapNormal, CPU)); - backward_ = FunctionBase::funcRegistrar_.createByType( - FUNC_NAME(CrossMapNormalGrad, CPU)); - } - forward_->init( + createFunction( + forward_, + "CrossMapNormal", FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_)); - - backward_->init( + createFunction( + backward_, + "CrossMapNormalGrad", FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_)); return true; @@ -80,7 +72,7 @@ void CMRProjectionNormLayer::forward(PassType passType) { Matrix::resizeOrCreate(denoms_, batchSize, size, /* trans */ false, useGpu_); dims_ = {batchSize, channels_, imgSizeH_, imgSizeW_}; - forward_->calc( + forward_[0]->calc( {Tensor(input->getData(), dims_)}, {Tensor(outV->getData(), dims_), Tensor(denoms_->getData(), dims_)}, {}); @@ -98,11 +90,11 @@ void CMRProjectionNormLayer::backward(const UpdateCallback& callback) { MatrixPtr localOutV = getOutputValue(); MatrixPtr preOutV = inputLayers_[0]->getOutputValue(); - backward_->calc({Tensor(preOutV->getData(), dims_), - Tensor(localOutV->getData(), dims_), - Tensor(localGrad->getData(), dims_), - Tensor(denoms_->getData(), dims_)}, - {Tensor(preOutGrad->getData(), dims_)}, - {}); + backward_[0]->calc({Tensor(preOutV->getData(), dims_), + Tensor(localOutV->getData(), dims_), + Tensor(localGrad->getData(), dims_), + Tensor(denoms_->getData(), dims_)}, + {Tensor(preOutGrad->getData(), dims_)}, + {}); } } // namespace paddle