From 49d4b39f28458fbc6a071affd66e88f86c08aee8 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 19 Sep 2017 11:07:18 +0800 Subject: [PATCH] fix typo and remove some unused code --- paddle/gserver/layers/MKLDNNConvLayer.cpp | 5 +---- paddle/gserver/layers/MKLDNNFcLayer.cpp | 4 +--- paddle/gserver/layers/MKLDNNLayer.h | 4 ++++ paddle/gserver/layers/MKLDNNPoolLayer.cpp | 1 - paddle/gserver/tests/test_MKLDNN.cpp | 1 - 5 files changed, 6 insertions(+), 9 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp index 9088744be..2647cb600 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp @@ -294,12 +294,9 @@ void MKLDNNConvLayer::resetOutValue( std::shared_ptr& pd, MKLDNNMatrixPtr& out) { out = MKLDNNMatrix::create(output_.value, pd->dst_primitive_desc()); - // change original output value from cpu matrix to mkldnn matrix - output_.value = std::dynamic_pointer_cast(out); - // create reorder if output value has cpu device and pd do not match cpuOutVal_ = nullptr; - cpuOutVal_ = nullptr; + cvtOutVal_ = nullptr; if (!outputIsOnlyMKLDNN()) { const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value; memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index f60e221a6..66b358bce 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -172,12 +172,10 @@ void MKLDNNFcLayer::resetWgtBiasValue(MKLDNNMatrixPtr& wgt, void MKLDNNFcLayer::resetOutValue(MKLDNNMatrixPtr& out) { out = MKLDNNMatrix::create(output_.value, {bs_, oc_}, format::nc, engine_); - // change original output value to mkldnn output value - output_.value = std::dynamic_pointer_cast(out); if (!outputIsOnlyMKLDNN()) { // fc cpu output value do not need create convert // just share point - getOutput(CPU_DEVICE).value->setData(output_.value->getData()); + getOutput(CPU_DEVICE).value->setData(out->getData()); } } diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index 169679c82..c4e4a6874 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -119,6 +119,10 @@ public: inputElemenCnt_ = elemenCnt; reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_); resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_); + if (outVal_) { + // change original output value to mkldnn output value + output_.value = std::dynamic_pointer_cast(outVal_); + } convertWeightsFromPaddle(); needResetBwd_ = true; } diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.cpp b/paddle/gserver/layers/MKLDNNPoolLayer.cpp index 48b2f5a4c..b62dfb7c5 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.cpp +++ b/paddle/gserver/layers/MKLDNNPoolLayer.cpp @@ -134,7 +134,6 @@ void MKLDNNPoolLayer::resetOutValue(MKLDNNMatrixPtr& out) { memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; out = MKLDNNMatrix::create( output_.value, outDims, inVal_->getFormat(), engine_); - output_.value = std::dynamic_pointer_cast(out); // create reorder if output value has cpu device and pd do not match cpuOutVal_ = nullptr; diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp index b593f65fe..7620365ef 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -162,7 +162,6 @@ void testPoolLayer(const testPoolDesc& pm) { 0}); LayerInputConfig* input = cfg.layerConfig.add_inputs(); PoolConfig* pool = input->mutable_pool_conf(); - // pool->set_pool_type(poolType); pool->set_channels(pm.ch); pool->set_img_size(pm.iw); pool->set_img_size_y(pm.ih); -- GitLab