提交 2dc6cea6 编写于 作者: T tensor-tang

add unit tests for mkldnn branches, and fix typo

上级 1199aa68
...@@ -26,7 +26,10 @@ if(WITH_MKLDNN) ...@@ -26,7 +26,10 @@ if(WITH_MKLDNN)
test_MKLDNN.cpp test_MKLDNN.cpp
MKLDNNTester.cpp MKLDNNTester.cpp
LayerGradUtil.cpp) LayerGradUtil.cpp)
add_test(NAME test_MKLDNN COMMAND test_MKLDNN) add_test(NAME test_MKLDNN
COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python
${CMAKE_CURRENT_BINARY_DIR}/test_MKLDNN
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
endif() endif()
################ test_CRFLayerGrad #################### ################ test_CRFLayerGrad ####################
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "MKLDNNTester.h" #include "MKLDNNTester.h"
#include "paddle/gserver/layers/MKLDNNBase.h" #include "paddle/gserver/layers/MKLDNNBase.h"
#include "paddle/gserver/layers/MKLDNNLayer.h" #include "paddle/gserver/layers/MKLDNNLayer.h"
#include "paddle/trainer/Trainer.h"
namespace paddle { namespace paddle {
...@@ -315,6 +316,7 @@ void MKLDNNTester::runOnce() { ...@@ -315,6 +316,7 @@ void MKLDNNTester::runOnce() {
auto& value = para->getBuf(PARAMETER_VALUE); auto& value = para->getBuf(PARAMETER_VALUE);
real lr = 1e-3; real lr = 1e-3;
value->add(*grad, lr); value->add(*grad, lr);
grad->zeroMem();
}; };
randomTopDiffs(); randomTopDiffs();
dnnLayer_->backward(updateCallback); dnnLayer_->backward(updateCallback);
...@@ -411,4 +413,143 @@ void MKLDNNTester::run(const TestConfig& dnn, ...@@ -411,4 +413,143 @@ void MKLDNNTester::run(const TestConfig& dnn,
} }
} }
void MKLDNNTester::initArgument(DataIn& data,
const std::string& configPath,
const size_t iter) {
TrainerConfigHelper config(configPath);
size_t batchSize = config.getOptConfig().batch_size();
data.inArgs.resize(iter);
data.outGrads.resize(iter);
data.paraValues.clear();
for (const auto& layer_name : config.getModelConfig().input_layer_names()) {
auto layer_config = std::find_if(config.getModelConfig().layers().begin(),
config.getModelConfig().layers().end(),
[=](const LayerConfig& layer_config) {
return layer_config.name() == layer_name;
});
CHECK(layer_config != config.getModelConfig().layers().end());
size_t layerSize = layer_config->size();
for (size_t i = 0; i < iter; ++i) {
Argument arg;
arg.value = Matrix::create(batchSize, layerSize, false, false);
arg.grad = Matrix::create(batchSize, layerSize, false, false);
arg.value->randomizeUniform();
arg.value->add(-0.5);
arg.value->sigmoid(*arg.value);
arg.grad->zeroMem();
arg.ids = VectorT<int>::create(batchSize, false);
arg.ids->rand(layerSize);
generateSequenceStartPositions(batchSize, arg.sequenceStartPositions);
data.inArgs[i].push_back(arg);
}
}
for (const auto& layer_name : config.getModelConfig().output_layer_names()) {
auto layer_config = std::find_if(config.getModelConfig().layers().begin(),
config.getModelConfig().layers().end(),
[=](const LayerConfig& layer_config) {
return layer_config.name() == layer_name;
});
CHECK(layer_config != config.getModelConfig().layers().end());
size_t layerSize = layer_config->size();
for (size_t i = 0; i < iter; ++i) {
MatrixPtr grad = Matrix::create(batchSize, layerSize, false, false);
grad->randomizeUniform();
data.outGrads[i].push_back(grad);
}
}
for (const auto& para_config : config.getModelConfig().parameters()) {
VectorPtr value = Vector::create(para_config.size(), false);
value->randnorm(0, 2);
data.paraValues.push_back(value);
}
}
void MKLDNNTester::getOutResult(const std::string& configPath,
DataIn& in,
DataOut& out,
bool use_mkldnn,
size_t iter) {
FLAGS_use_gpu = false;
FLAGS_use_mkldnn = use_mkldnn;
*ThreadLocalRand::getSeed() = 1;
srand(1);
Trainer trainer;
auto config = std::make_shared<TrainerConfigHelper>(configPath);
trainer.init(config, false);
auto gradientMachine = trainer.getGradientMachine();
std::vector<ParameterPtr> parameters = gradientMachine->getParameters();
for (size_t i = 0; i < in.paraValues.size(); i++) {
parameters[i]->getBuf(PARAMETER_VALUE)->copyFrom(*in.paraValues[i]);
}
UpdateCallback simpleUpdate = [](Parameter* para) {
auto& grad = para->getBuf(PARAMETER_GRADIENT);
auto& value = para->getBuf(PARAMETER_VALUE);
real lr = 1e-2;
value->add(*grad, lr);
grad->zeroMem();
};
vector<Argument> outArgs;
gradientMachine->start();
out.outValues.clear();
out.paraValues.clear();
for (size_t i = 0; i < iter; ++i) {
VLOG(MKLDNN_TESTS) << "runing iteration " << i;
gradientMachine->forward(in.inArgs[i], &outArgs, PASS_TRAIN);
// save forward result
for (size_t k = 0; k < outArgs.size(); k++) {
MatrixPtr value = Matrix::create(outArgs[k].value->getHeight(),
outArgs[k].value->getWidth(),
false,
false);
value->copyFrom(*outArgs[k].value);
out.outValues.push_back(value);
}
// random backward input
for (size_t k = 0; k < outArgs.size(); k++) {
outArgs[k].grad->copyFrom(*in.outGrads[i][k]);
}
gradientMachine->backward(simpleUpdate);
}
gradientMachine->finish();
// save param value
for (size_t i = 0; i < in.paraValues.size(); i++) {
VectorPtr val = Vector::create(
parameters[i]->getBuf(PARAMETER_VALUE)->getSize(), false);
val->copyFrom(*parameters[i]->getBuf(PARAMETER_VALUE));
out.paraValues.push_back(val);
}
}
void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) {
CHECK_EQ(ref.outValues.size(), dnn.outValues.size());
CHECK_EQ(ref.paraValues.size(), dnn.paraValues.size());
for (size_t i = 0; i < ref.outValues.size(); i++) {
EXPECT_LE(fabs(compareMatrix(ref.outValues[i], dnn.outValues[i])), eps);
}
for (size_t i = 0; i < ref.paraValues.size(); i++) {
EXPECT_LE(fabs(compareVector(ref.paraValues[i], dnn.paraValues[i])), eps);
}
}
void MKLDNNTester::runBranchesTest(const std::string& configPath,
size_t iter,
float eps) {
DataIn in;
initArgument(in, configPath, iter);
DataOut outCpu, outDnn;
getOutResult(configPath, in, outCpu, false, iter);
getOutResult(configPath, in, outDnn, true, iter);
compareResult(outCpu, outDnn, eps);
}
} // namespace paddle } // namespace paddle
...@@ -33,6 +33,17 @@ class MKLDNNTester { ...@@ -33,6 +33,17 @@ class MKLDNNTester {
NUM = 2, // Number of total NUM = 2, // Number of total
}; };
struct DataIn {
std::vector<std::vector<Argument>> inArgs;
std::vector<std::vector<MatrixPtr>> outGrads;
std::vector<VectorPtr> paraValues;
};
struct DataOut {
std::vector<MatrixPtr> outValues;
std::vector<VectorPtr> paraValues;
};
protected: protected:
std::vector<TestConfig> configs_; std::vector<TestConfig> configs_;
vector<string> layerNames_; vector<string> layerNames_;
...@@ -74,7 +85,17 @@ public: ...@@ -74,7 +85,17 @@ public:
float epsilon = 1e-4, float epsilon = 1e-4,
bool log = false, bool log = false,
int level = MKLDNN_ALL); int level = MKLDNN_ALL);
void setLogLevel(int lvl) { lvl_ = lvl; } static void runBranchesTest(const std::string& configPath,
size_t iter = 3,
float eps = 1e-4);
static void initArgument(DataIn& data,
const std::string& configPath,
size_t iter = 3);
static void getOutResult(const std::string& configPath,
DataIn& in,
DataOut& out,
bool use_mkldnn,
size_t iter = 3);
private: private:
void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize); void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize);
...@@ -101,8 +122,9 @@ private: ...@@ -101,8 +122,9 @@ private:
void saveWgt(const vector<ParameterPtr>& from, vector<VectorPtr>& to); void saveWgt(const vector<ParameterPtr>& from, vector<VectorPtr>& to);
void restoreWgt(const vector<VectorPtr>& from, vector<ParameterPtr>& to); void restoreWgt(const vector<VectorPtr>& from, vector<ParameterPtr>& to);
double compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2); static double compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2);
double compareVector(const VectorPtr& v1, const VectorPtr& v2); static double compareVector(const VectorPtr& v1, const VectorPtr& v2);
static void compareResult(DataOut& ref, DataOut& dnn, float eps = 1e-4);
/** /**
* Get delta percent * Get delta percent
...@@ -111,7 +133,7 @@ private: ...@@ -111,7 +133,7 @@ private:
* else return sum(abs(a-b)) / sum(abs(b)) * else return sum(abs(a-b)) / sum(abs(b))
* The return value should be smaller than eps when passing. * The return value should be smaller than eps when passing.
*/ */
double getDelta(const real* d1, static double getDelta(const real* d1,
const real* d2, const real* d2,
size_t len, size_t len,
const float failRate = 1e-3, const float failRate = 1e-3,
......
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=2)
data = data_layer(name ="input", size=3*4*4)
conv = img_conv_layer(input=data,
num_channels=1,
filter_size=3,
num_filters=2,
padding=1,
shared_biases=True,
act=ReluActivation())
b1 = img_conv_layer(input=conv,
filter_size=1,
num_filters=2,
padding=0,
shared_biases=True,
act=ReluActivation())
b2 = img_conv_layer(input=conv,
filter_size=3,
num_filters=2,
padding=1,
shared_biases=True,
act=ReluActivation())
concat = addto_layer(input=[b1, b2])
outputs(concat)
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=16)
data = data_layer(name ="input", size=32*16*16)
conv = img_conv_layer(input=data,
num_channels=32,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
b1 = img_conv_layer(input=conv,
filter_size=1,
num_filters=32,
padding=0,
shared_biases=True,
act=ReluActivation())
b2 = img_conv_layer(input=conv,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
concat = addto_layer(input=[b1, b2])
outputs(concat)
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=2)
data = data_layer(name ="input", size=3*4*4)
conv = img_conv_layer(input=data,
num_channels=1,
filter_size=3,
num_filters=2,
padding=1,
shared_biases=True,
act=ReluActivation())
b1 = img_conv_layer(input=conv,
filter_size=1,
num_filters=2,
padding=0,
shared_biases=True,
act=ReluActivation())
b2 = img_conv_layer(input=conv,
filter_size=3,
num_filters=2,
padding=1,
shared_biases=True,
act=ReluActivation())
concat = concat_layer(input=[b1, b2])
outputs(concat)
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=16)
data = data_layer(name ="input", size=32*16*16)
conv = img_conv_layer(input=data,
num_channels=32,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
b1 = img_conv_layer(input=conv,
filter_size=1,
num_filters=32,
padding=0,
shared_biases=True,
act=ReluActivation())
b2 = img_conv_layer(input=conv,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
concat = concat_layer(input=[b1, b2])
outputs(concat)
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <paddle/utils/PythonUtil.h>
#include <string> #include <string>
#include <vector> #include <vector>
#include "MKLDNNTester.h" #include "MKLDNNTester.h"
...@@ -40,13 +41,13 @@ DECLARE_bool(use_mkldnn); ...@@ -40,13 +41,13 @@ DECLARE_bool(use_mkldnn);
struct testFcDesc { struct testFcDesc {
int bs; int bs;
int ic; int ic;
int oc;
int ih, iw; // oh == ow == 1 int ih, iw; // oh == ow == 1
int oc;
}; };
static void getMKLDNNFcConfig(TestConfig& cfg, const testFcDesc& pm) { static void getMKLDNNFcConfig(TestConfig& cfg, const testFcDesc& pm) {
cfg.layerConfig.set_type("mkldnn_fc"); cfg.layerConfig.set_type("mkldnn_fc");
cfg.layerConfig.set_active_type("sigmoid"); cfg.layerConfig.set_active_type("relu");
cfg.layerConfig.set_size(pm.oc); cfg.layerConfig.set_size(pm.oc);
cfg.inputDefs.push_back( cfg.inputDefs.push_back(
{INPUT_DATA, {INPUT_DATA,
...@@ -247,13 +248,23 @@ TEST(MKLDNNActivation, Activations) { ...@@ -247,13 +248,23 @@ TEST(MKLDNNActivation, Activations) {
} }
} }
// TODO(TJ): add branch test TEST(MKLDNNLayer, branches) {
std::vector<std::string> cases = {"conv_conv_concat",
"conv_conv_concat_32c",
"conv_conv_addto",
"conv_conv_addto_32c"};
for (auto name : cases) {
std::string config = "./gserver/tests/mkldnn_branches_" + name + ".conf";
MKLDNNTester::runBranchesTest(config);
}
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
FLAGS_use_gpu = false; FLAGS_use_gpu = false;
FLAGS_use_mkldnn = true; FLAGS_use_mkldnn = true;
initMain(argc, argv); initMain(argc, argv);
initPython(argc, argv);
FLAGS_thread_local_rand_use_global_seed = true; FLAGS_thread_local_rand_use_global_seed = true;
srand(1); srand(1);
return RUN_ALL_TESTS(); return RUN_ALL_TESTS();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册