From c397599dfd9cd667cc28ef2480365cfce5e5ef18 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 20 Nov 2017 14:04:48 +0800 Subject: [PATCH] remove weight and bias in MKLDNN reset function, since not all layers have weight and bias. and remove some comments. --- paddle/gserver/layers/MKLDNNAddtoLayer.cpp | 22 +++++++--------- paddle/gserver/layers/MKLDNNAddtoLayer.h | 16 ------------ .../gserver/layers/MKLDNNBatchNormLayer.cpp | 16 +++++------- paddle/gserver/layers/MKLDNNBatchNormLayer.h | 16 +----------- paddle/gserver/layers/MKLDNNConcatLayer.cpp | 4 --- paddle/gserver/layers/MKLDNNConcatLayer.h | 14 ---------- paddle/gserver/layers/MKLDNNConvLayer.cpp | 12 +++------ paddle/gserver/layers/MKLDNNConvLayer.h | 26 ------------------- paddle/gserver/layers/MKLDNNFcLayer.cpp | 16 +++++------- paddle/gserver/layers/MKLDNNFcLayer.h | 16 ------------ paddle/gserver/layers/MKLDNNLayer.cpp | 4 +-- paddle/gserver/layers/MKLDNNLayer.h | 6 ++--- paddle/gserver/layers/MKLDNNPoolLayer.cpp | 4 --- paddle/gserver/layers/MKLDNNPoolLayer.h | 15 ----------- 14 files changed, 31 insertions(+), 156 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp index bbde0683d3e..1ab3032316c 100644 --- a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp +++ b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp @@ -58,25 +58,21 @@ void MKLDNNAddtoLayer::reshape( void MKLDNNAddtoLayer::resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { - resetFwdBuffers(inVals_, bias, out); + resetFwdBuffers(inVals_, biasVal_, out); in = inVals_[0]; std::shared_ptr fwdPD; std::shared_ptr biasPD; - resetFwdPD(fwdPD, biasPD, inVals_, bias, out); + resetFwdPD(fwdPD, biasPD, inVals_, biasVal_, out); - resetFwdPipeline(pipeline, fwdPD, biasPD, inVals_, bias, out); + resetFwdPipeline(pipeline, fwdPD, biasPD, inVals_, biasVal_, out); } void MKLDNNAddtoLayer::resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { - resetBwdBuffers(inGrads_, bias, out); + resetBwdBuffers(inGrads_, biasGrad_, out); in = inGrads_[0]; // backward only need share output grad to input grad @@ -89,15 +85,17 @@ void MKLDNNAddtoLayer::resetBwd(std::vector& pipeline, // backward bias bwdBias_ = nullptr; - if (bias) { + if (biasGrad_) { std::vector scales(bs_, 1.0); - std::vector srcPDs(bs_, bias->getPrimitiveDesc()); - auto biasPD = sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs); + std::vector srcPDs(bs_, + biasGrad_->getPrimitiveDesc()); + auto biasPD = + sum::primitive_desc(biasGrad_->getMemoryDesc(), scales, srcPDs); std::vector srcs; for (size_t i = 0; i < grads_.size(); ++i) { srcs.push_back(*(grads_[i])); } - bwdBias_.reset(new sum(biasPD, srcs, *bias)); + bwdBias_.reset(new sum(biasPD, srcs, *biasGrad_)); pipeline.push_back(*bwdBias_); } } diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.h b/paddle/gserver/layers/MKLDNNAddtoLayer.h index 4b0b5cb9343..1406496a7ae 100644 --- a/paddle/gserver/layers/MKLDNNAddtoLayer.h +++ b/paddle/gserver/layers/MKLDNNAddtoLayer.h @@ -54,14 +54,10 @@ public: void resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void updateWeights(const UpdateCallback& callback) override; @@ -91,11 +87,6 @@ public: } protected: - /** - * Forward functions: reset buffers(inputs, output, bias), - * reset primitive descriptor, - * reset pipeline. - */ void resetFwdBuffers(std::vector& inputs, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); @@ -110,17 +101,10 @@ protected: std::vector& inputs, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - - /** - * Backward functions: reset buffers(inputs, output, bias) - */ void resetBwdBuffers(std::vector& inputs, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - /** - * prepare for bias - */ void prepareBias(MKLDNNMatrixPtr& bias, const MatrixPtr& biasMat, const MKLDNNMatrixPtr& out, diff --git a/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp b/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp index 9077e13136e..96e5a99f337 100644 --- a/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp +++ b/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp @@ -129,8 +129,6 @@ void MKLDNNBatchNormLayer::reshape( void MKLDNNBatchNormLayer::resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { // In training phase, it will always calculate mean and var, // so useGlobalStats must be false. @@ -140,25 +138,23 @@ void MKLDNNBatchNormLayer::resetFwd(std::vector& pipeline, useGlobalStats_ = false; } - resetFwdBuffers(in, wgt, out); + resetFwdBuffers(in, wgtVal_, out); - resetFwdPD(fwdPD_, in, wgt, out); + resetFwdPD(fwdPD_, in, wgtVal_, out); - resetFwdPipeline(pipeline, fwdPD_, in, wgt, out); + resetFwdPipeline(pipeline, fwdPD_, in, wgtVal_, out); } void MKLDNNBatchNormLayer::resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { std::shared_ptr pd; - resetBwdBuffers(in, wgt, out); + resetBwdBuffers(in, wgtGrad_, out); - resetBwdPD(pd, in, wgt, out); + resetBwdPD(pd, in, wgtGrad_, out); - resetBwdPipeline(pipeline, pd, in, wgt, out); + resetBwdPipeline(pipeline, pd, in, wgtGrad_, out); } void MKLDNNBatchNormLayer::forward(PassType passType) { diff --git a/paddle/gserver/layers/MKLDNNBatchNormLayer.h b/paddle/gserver/layers/MKLDNNBatchNormLayer.h index 8f58efa39e9..a9a425ee33b 100644 --- a/paddle/gserver/layers/MKLDNNBatchNormLayer.h +++ b/paddle/gserver/layers/MKLDNNBatchNormLayer.h @@ -77,14 +77,10 @@ public: void resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void updateWeights(const UpdateCallback& callback) override; @@ -98,11 +94,7 @@ protected: * moving = moving * AvgFraction + local * (1 - AvgFraction) */ void calMovingMeanAndVar(); - /** - * Forward functions: reset buffers(input, weight, output), - * reset primitive descriptor, - * reset pipeline. - */ + void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& out); @@ -115,12 +107,6 @@ protected: MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& out); - - /** - * Backward functions: reset buffers(input, weight, output), - * reset primitive descriptor, - * reset pipeline. - */ void resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& out); diff --git a/paddle/gserver/layers/MKLDNNConcatLayer.cpp b/paddle/gserver/layers/MKLDNNConcatLayer.cpp index 8e8ccc92413..7906e180855 100644 --- a/paddle/gserver/layers/MKLDNNConcatLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConcatLayer.cpp @@ -60,8 +60,6 @@ void MKLDNNConcatLayer::reshape( void MKLDNNConcatLayer::resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { resetFwdBuffers(inVals_, out); in = inVals_[0]; @@ -74,8 +72,6 @@ void MKLDNNConcatLayer::resetFwd(std::vector& pipeline, void MKLDNNConcatLayer::resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { resetBwdBuffers(inGrads_, out); in = inGrads_[0]; diff --git a/paddle/gserver/layers/MKLDNNConcatLayer.h b/paddle/gserver/layers/MKLDNNConcatLayer.h index 8a684189718..2750a6ed294 100644 --- a/paddle/gserver/layers/MKLDNNConcatLayer.h +++ b/paddle/gserver/layers/MKLDNNConcatLayer.h @@ -51,14 +51,10 @@ public: void resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void printSizeInfo() override { @@ -99,11 +95,6 @@ public: } protected: - /** - * Forward functions: reset buffers(inputs, output, bias), - * reset primitive descriptor, - * reset pipeline. - */ void resetFwdBuffers(std::vector& inputs, MKLDNNMatrixPtr& out); void resetFwdPD(std::shared_ptr& pd, @@ -113,11 +104,6 @@ protected: std::shared_ptr& pd, std::vector& inputs, MKLDNNMatrixPtr& out); - - /** - * Backward functions: reset buffers(inputs, output, bias) - * reset primitives and pipeline - */ void resetBwdBuffers(std::vector& inputs, MKLDNNMatrixPtr& out); void resetBwdPipeline(std::vector& pipeline, diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp index 4610c8ce8e0..5d89f230d28 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp @@ -106,20 +106,16 @@ void MKLDNNConvLayer::reshape( void MKLDNNConvLayer::resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { resetFwdPD(fwdPD_); - resetFwdBuffers(fwdPD_, in, wgt, bias, out); + resetFwdBuffers(fwdPD_, in, wgtVal_, biasVal_, out); - resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out); + resetFwdPipeline(pipeline, fwdPD_, in, wgtVal_, biasVal_, out); } void MKLDNNConvLayer::resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { std::shared_ptr bwdWgtPD; std::shared_ptr bwdDataPD; @@ -128,9 +124,9 @@ void MKLDNNConvLayer::resetBwd(std::vector& pipeline, resetBwdDataPD(bwdDataPD); - resetBwdBuffers(bwdWgtPD, bwdDataPD, in, wgt, bias, out); + resetBwdBuffers(bwdWgtPD, bwdDataPD, in, wgtGrad_, biasGrad_, out); - resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out); + resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgtGrad_, biasGrad_, out); } void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) { diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h index a9bc4b5b569..900f42af847 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.h +++ b/paddle/gserver/layers/MKLDNNConvLayer.h @@ -73,14 +73,10 @@ public: void resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void updateWeights(const UpdateCallback& callback) override; @@ -107,48 +103,26 @@ protected: mkldnn::memory::dims& padL, mkldnn::memory::dims& padR); - /** - * reset the forward primitive descriptor. - */ void resetFwdPD(std::shared_ptr& pd); - /** - * reset the MKLDNNMatrix buffers used in forward. - */ void resetFwdBuffers(std::shared_ptr& pd, MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - /** - * reset the forward pipeline. - */ void resetFwdPipeline(std::vector& pipeline, std::shared_ptr& pd, MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - - /** - * reset the backward weight primitive descriptor. - */ void resetBwdWgtPD(std::shared_ptr& pd); - /** - * reset the backward data primitive descriptor. - */ void resetBwdDataPD(std::shared_ptr& pd); - /** - * reset the MKLDNNMatrix buffers used in backward. - */ void resetBwdBuffers(std::shared_ptr& wgtPD, std::shared_ptr& dataPD, MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - /** - * reset the backward pipeline. - */ void resetBwdPipeline(std::vector& pipeline, std::shared_ptr& wgtPD, std::shared_ptr& dataPD, diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index c3ce9c6a4b9..ccf11e04a37 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -88,31 +88,27 @@ void MKLDNNFcLayer::reshape( void MKLDNNFcLayer::resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { - resetFwdBuffers(in, wgt, bias, out); + resetFwdBuffers(in, wgtVal_, biasVal_, out); - resetFwdPD(fwdPD_, in, wgt, bias, out); + resetFwdPD(fwdPD_, in, wgtVal_, biasVal_, out); - resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out); + resetFwdPipeline(pipeline, fwdPD_, in, wgtVal_, biasVal_, out); } void MKLDNNFcLayer::resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { std::shared_ptr bwdWgtPD; std::shared_ptr bwdDataPD; - resetBwdBuffers(in, wgt, bias, out); + resetBwdBuffers(in, wgtGrad_, biasGrad_, out); - resetBwdWgtPD(bwdWgtPD, wgt, bias, out); + resetBwdWgtPD(bwdWgtPD, wgtGrad_, biasGrad_, out); resetBwdDataPD(bwdDataPD, in, out); - resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out); + resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgtGrad_, biasGrad_, out); } void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) { diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h index 20012e5ba26..a9c916ea132 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -56,14 +56,10 @@ public: void resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void updateWeights(const UpdateCallback& callback) override; @@ -73,11 +69,6 @@ public: void convertWeightsToPaddle() override; protected: - /** - * Forward functions: reset buffers(input, output, weight and bias), - * reset primitive descriptor, - * reset pipeline. - */ void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, @@ -93,13 +84,6 @@ protected: MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - - /** - * Backward functions: reset buffers(input, output, weight and bias), - * reset primitive descriptor for backward weight, - * reset primitive descriptor for backward data, - * reset pipeline. - */ void resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, diff --git a/paddle/gserver/layers/MKLDNNLayer.cpp b/paddle/gserver/layers/MKLDNNLayer.cpp index fe6ec7d4b72..e2234535499 100644 --- a/paddle/gserver/layers/MKLDNNLayer.cpp +++ b/paddle/gserver/layers/MKLDNNLayer.cpp @@ -58,7 +58,7 @@ void MKLDNNLayer::forward(PassType passType) { printSizeInfo(); // all cpu device output grad or value share output's shareCPUDevice(); - resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_); + resetFwd(pipelineFwd_, inVal_, outVal_); // MKLDNNLayer output value should be MKLDNNMatrix // so external output value is necessary. // Then external input value is not necessary, @@ -101,7 +101,7 @@ void MKLDNNLayer::backward(const UpdateCallback& callback) { pipelineBwd_.clear(); pipelineMergeGrad_.clear(); mergeGrad_ = nullptr; - resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_); + resetBwd(pipelineBwd_, inGrad_, outGrad_); // external output grad is not necessary // since output may be mkldnn internal buffer or merge them directly. CHECK(outGrad_) << "internal output grad is necessary"; diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index 2516433139f..d9542bfca2c 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -134,21 +134,19 @@ public: /** * reset the mkldnn forward primitve and memories * only would be called when input size changes + * weight and bias buffers should be coverd by child class itself */ virtual void resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) = 0; /** * reset the mkldnn backward primitve and memories * only would be called when needed + * weight and bias buffers should be coverd by child class itself */ virtual void resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) = 0; /** diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.cpp b/paddle/gserver/layers/MKLDNNPoolLayer.cpp index 9594c5ec4ff..79102aba00b 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.cpp +++ b/paddle/gserver/layers/MKLDNNPoolLayer.cpp @@ -75,8 +75,6 @@ void MKLDNNPoolLayer::reshape( void MKLDNNPoolLayer::resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { resetFwdBuffers(in, out); @@ -87,8 +85,6 @@ void MKLDNNPoolLayer::resetFwd(std::vector& pipeline, void MKLDNNPoolLayer::resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { std::shared_ptr pd; diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.h b/paddle/gserver/layers/MKLDNNPoolLayer.h index 511a2a91a95..972419c5af0 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.h +++ b/paddle/gserver/layers/MKLDNNPoolLayer.h @@ -57,14 +57,10 @@ public: void resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void resetBwd(std::vector& pipeline, MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; void printSizeInfo() override { @@ -75,11 +71,6 @@ public: } protected: - /** - * Forward functions: reset buffers(input, output), - * reset primitive descriptor, - * reset pipeline. - */ void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); void resetFwdPD(std::shared_ptr& pd, MKLDNNMatrixPtr in, @@ -88,12 +79,6 @@ protected: std::shared_ptr& pd, MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); - - /** - * Backward functions: reset buffers(input, output), - * reset primitive descriptor, - * reset pipeline. - */ void resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); void resetBwdPD(std::shared_ptr& pd, MKLDNNMatrixPtr& in, -- GitLab