From 3fb6451c3a387854d10f59a75cd4106e84f007de Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 2 Nov 2017 22:00:03 +0800 Subject: [PATCH] add mkldnn_addto unit test and pass it --- paddle/gserver/layers/MKLDNNLayer.cpp | 2 +- paddle/gserver/tests/MKLDNNTester.cpp | 6 ++-- paddle/gserver/tests/test_MKLDNN.cpp | 43 +++++++++++++++++++++++---- 3 files changed, 42 insertions(+), 9 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNLayer.cpp b/paddle/gserver/layers/MKLDNNLayer.cpp index 4347ab821d..5fd62f4f73 100644 --- a/paddle/gserver/layers/MKLDNNLayer.cpp +++ b/paddle/gserver/layers/MKLDNNLayer.cpp @@ -77,7 +77,7 @@ void MKLDNNLayer::forward(PassType passType) { needResetBwd_ = true; } - if (inputLayers_[0]->getType() == "data") { + if (inputLayers_[0]->getType() == "data" && inputLayers_.size() == 1) { // Update input value data when input layer is "data" type, // since the input value data address might be changed. CHECK(extInVal_); diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp index 7670cb88fb..afe1608eab 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -132,7 +132,7 @@ void MKLDNNTester::checkForward() { VLOG(MKLDNN_TESTS) << "Check Forward"; printTopDatas(); double delta = - compareMatrix(dnnLayer_->getOutputValue(), refLayer_->getOutputValue()); + compareMatrix(refLayer_->getOutputValue(), dnnLayer_->getOutputValue()); EXPECT_LE(fabs(delta), eps_); } @@ -147,7 +147,7 @@ void MKLDNNTester::checkBackwardData() { VLOG(MKLDNN_ALL) << "Reference Backward Result: InputGrad " << i; printMatrix(refDiff); - double delta = compareMatrix(dnnDiff, refDiff); + double delta = compareMatrix(refDiff, dnnDiff); EXPECT_LE(fabs(delta), eps_); if (isBN) { // the other two inputs in batch norm are for moving mean and var @@ -177,7 +177,7 @@ void MKLDNNTester::checkBackwardWgts() { << parameters_[REF][i]->getName(); printVector(ref); - double delta = compareVector(dnn, ref); + double delta = compareVector(ref, dnn); EXPECT_LE(fabs(delta), eps_); } diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp index d60b0f04a1..2e8d9f3333 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -271,20 +271,53 @@ TEST(MKLDNNLayer, BatchNormLayer) { testBatchNormLayer({16, 32, 16, 16}); } -struct testActDesc { +struct testImageDesc { int bs, ic, ih, iw; }; -static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) { +static void getAddtoConfig(TestConfig& cfg, + const testImageDesc& pm, + const size_t nInputs = 1) { cfg.biasSize = 0; cfg.layerConfig.set_type("addto"); size_t layerSize = pm.ic * pm.ih * pm.iw; cfg.layerConfig.set_size(layerSize); - cfg.inputDefs.push_back({INPUT_DATA, "layer_0", layerSize, 0}); - cfg.layerConfig.add_inputs(); + cfg.layerConfig.set_active_type("relu"); + for (size_t i = 0; i < nInputs; ++i) { + std::stringstream ss; + ss << "layer_" << i; + cfg.inputDefs.push_back({INPUT_DATA, ss.str(), layerSize, 0}); + LayerInputConfig* input = cfg.layerConfig.add_inputs(); + ImageConfig* img_conf = input->mutable_image_conf(); + img_conf->set_channels(pm.ic); + img_conf->set_img_size_y(pm.ih); + img_conf->set_img_size(pm.iw); + } +} + +void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) { + CHECK_GE(nInputs, 1); + TestConfig dnnConfig; + getAddtoConfig(dnnConfig, pm, nInputs); + dnnConfig.layerConfig.set_type("mkldnn_addto"); + // TODO(TJ): test with bias + for (auto withBias : {false}) { + if (withBias) { + dnnConfig.biasSize = pm.ic * pm.ih * pm.iw; + } else { + dnnConfig.biasSize = 0; + } + RUN_MKLDNN_TEST_LAYER(dnnConfig, "addto", pm) + } +} + +TEST(MKLDNNLayer, AddtoLayer) { + testAddtoLayer({16, 5, 14, 14}, 1); + testAddtoLayer({8, 10, 8, 8}, 2); + testAddtoLayer({4, 12, 1, 1}, 3); } -void testActivation(std::string actType, const testActDesc& pm) { +void testActivation(std::string actType, const testImageDesc& pm) { // TODO(TJ): remove me when paddle support elu activation if (actType == "mkldnn_elu") { return; -- GitLab