提交 ec9009f3 编写于 作者: T tensor-tang

add mkldnn tester

上级 1203ebc4
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "MkldnnFcLayer.h" #include "MkldnnFcLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
namespace paddle { namespace paddle {
...@@ -41,6 +42,7 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap, ...@@ -41,6 +42,7 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap,
// create weight // create weight
weight_ = weight_ =
std::unique_ptr<Weight>(new Weight(oc_, iLayerSize_, parameters_[0], 0)); std::unique_ptr<Weight>(new Weight(oc_, iLayerSize_, parameters_[0], 0));
initWgt();
// create biases // create biases
if (biasParameter_.get() != NULL) { if (biasParameter_.get() != NULL) {
...@@ -49,6 +51,22 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap, ...@@ -49,6 +51,22 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap,
return true; return true;
} }
void MkldnnFcLayer::initWgt() {
// The weight_ is transposed from initial paddle weight
MatrixPtr paddleWgt = Matrix::create(
weight_->getW()->getData(), iLayerSize_, oc_, false, false);
std::ostringstream ostr;
paddleWgt->print(ostr);
VLOG(DNN_BASE) << ostr.str();
// Firstly in mkldnn, the matrix is transposed from initial paddle weight
MatrixPtr paddleWgtT;
paddleWgt->transpose(paddleWgtT, true);
weight_->getW()->copyFrom(*paddleWgtT);
}
void MkldnnFcLayer::reshape() { void MkldnnFcLayer::reshape() {
const Argument& input = getInput(0); const Argument& input = getInput(0);
int batchSize = input.getBatchSize(); int batchSize = input.getBatchSize();
......
...@@ -41,6 +41,8 @@ public: ...@@ -41,6 +41,8 @@ public:
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override; const ParameterMap& parameterMap) override;
void initWgt();
void reshape(); void reshape();
void forward(PassType passType) override; void forward(PassType passType) override;
......
...@@ -26,7 +26,8 @@ namespace paddle { ...@@ -26,7 +26,8 @@ namespace paddle {
bool MkldnnLayer::init(const LayerMap& layerMap, bool MkldnnLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) { const ParameterMap& parameterMap) {
CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn." CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn."
<< "Please set WITH_MKLDNN=ON"; << "Please set WITH_MKLDNN=ON "
<< "and set use_mkldnn=True";
// TODO(TJ): deivecId // TODO(TJ): deivecId
return Layer::init(layerMap, parameterMap); return Layer::init(layerMap, parameterMap);
} }
......
...@@ -18,6 +18,15 @@ add_unittest_without_exec(test_LayerGrad ...@@ -18,6 +18,15 @@ add_unittest_without_exec(test_LayerGrad
add_test(NAME test_LayerGrad add_test(NAME test_LayerGrad
COMMAND test_LayerGrad) COMMAND test_LayerGrad)
########## test_Mkldnn layers and activations ##########
if(WITH_MKLDNN)
add_unittest_without_exec(test_Mkldnn
test_Mkldnn.cpp
MkldnnTester.cpp
LayerGradUtil.cpp)
add_test(NAME test_Mkldnn COMMAND test_Mkldnn)
endif()
################ test_CRFLayerGrad #################### ################ test_CRFLayerGrad ####################
add_unittest_without_exec(test_CRFLayerGrad add_unittest_without_exec(test_CRFLayerGrad
test_CRFLayerGrad.cpp test_CRFLayerGrad.cpp
......
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "MkldnnTester.h"
#include "paddle/gserver/layers/MkldnnBase.h"
namespace paddle {
// init data layer and test layer of both dnn and reference
void MkldnnTester::reset(const TestConfig& dnn,
const TestConfig& ref,
size_t batchSize) {
const bool trans = false;
const bool useGpu = false;
// clear
configs_.clear();
layerNames_.clear();
dataLayers_.clear();
datas_.clear();
layerMaps_.clear();
parameters_.clear();
testLayers_.clear();
// resize
configs_.resize(NUM);
layerNames_.resize(NUM);
dataLayers_.resize(NUM);
datas_.resize(NUM);
layerMaps_.resize(NUM);
parameters_.resize(NUM);
testLayers_.resize(NUM);
// reset configs and layer names
configs_[DNN] = dnn;
configs_[REF] = ref;
layerNames_[DNN] = "mkldnn"; // the first is mkldnn layer
layerNames_[REF] = "reference"; // second is reference layer
// reset others
for (size_t i = 0; i < NUM; ++i) {
configs_[i].layerConfig.set_name(layerNames_[i]);
initDataLayer(configs_[i],
&(dataLayers_[i]),
&(datas_[i]),
&(layerMaps_[i]),
layerNames_[i],
batchSize,
trans,
useGpu);
initTestLayer(
configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i]));
}
dnnLayer_ = testLayers_[DNN];
refLayer_ = testLayers_[REF];
EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size());
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
setInputImgSize();
}
void MkldnnTester::setInputImgSize() {
for (size_t n = 0; n < dataLayers_.size(); ++n) {
for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
// TODO(TJ): fix me when concat and elewise ready
dataLayers_[n][i]->getOutput().setFrameHeight(ih_);
dataLayers_[n][i]->getOutput().setFrameWidth(iw_);
}
}
}
// init randome parameters of ref, and copy to mkldnn
void MkldnnTester::randomWgtDatas() {
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
for (size_t i = 0; i < parameters_[REF].size(); ++i) {
const VectorPtr& dnnValue = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& refValue = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
parameters_[REF][i]->randomize();
dnnValue->copyFrom(*refValue);
VLOG(lvl_) << "Random weight data " << parameters_[DNN][i]->getName();
printVector(dnnValue);
}
}
// random botdata of ref layer and copy same to mkldnn
void MkldnnTester::randomBotDatas() {
CHECK_EQ(dataLayers_.size(), NUM);
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
dataLayers_[REF][i]->getOutputValue()->randomizeUniform();
dataLayers_[DNN][i]->getOutputValue()->copyFrom(
*(dataLayers_[REF][i]->getOutputValue()));
VLOG(lvl_) << "Input " << i << " data:";
printMatrix(dataLayers_[REF][i]->getOutputValue());
}
}
void MkldnnTester::randomTopDiffs() {
refLayer_->getOutputGrad()->randomizeUniform();
dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad()));
VLOG(lvl_) << "Random dom Backward Input, TopDiff: ";
printMatrix(refLayer_->getOutputGrad());
}
void MkldnnTester::checkForward() {
printTopDatas();
double delta = compareMatrix(testLayers_[DNN]->getOutputValue(),
testLayers_[REF]->getOutputValue());
VLOG(DNN_TESTS_DETAILS) << "Check Forward";
EXPECT_LE(fabs(delta), eps_);
}
void MkldnnTester::checkBackwardData() {
const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
const MatrixPtr& dnnDiff = dataLayers_[DNN][i]->getOutputGrad();
const MatrixPtr& refDiff = dataLayers_[REF][i]->getOutputGrad();
VLOG(lvl_) << "Mkldnn Backward Output BotDiff " << i;
printMatrix(dnnDiff);
VLOG(lvl_) << "Reference Backward Output BotDiff " << i;
printMatrix(refDiff);
double delta = compareMatrix(dnnDiff, refDiff);
EXPECT_LE(fabs(delta), eps_);
if (isBN) {
// the other two inputs in batch norm are for moving mean and var
break;
}
}
}
void MkldnnTester::checkBackwardWgts() {
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights
saveWgt(parameters_[DNN], dnnWgts);
// TODO(TJ): cvtWgtToPaddle
for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
VLOG(lvl_) << "Mkldnn Output weight " << parameters_[DNN][i]->getName();
printVector(dnn);
VLOG(lvl_) << "Reference Output weight " << parameters_[REF][i]->getName();
printVector(ref);
double delta = compareVector(dnn, ref);
EXPECT_LE(fabs(delta), eps_);
}
VLOG(DNN_TESTS_DETAILS) << "Restore dnn weights before comapre";
restoreWgt(dnnWgts, parameters_[DNN]);
}
void MkldnnTester::saveWgt(const vector<ParameterPtr>& from,
vector<VectorPtr>& to) {
const bool useGpu = false;
to.resize(from.size());
for (size_t i = 0; i < to.size(); ++i) {
const VectorPtr& wgt = from[i]->getBuf(PARAMETER_VALUE);
to[i] = Vector::create(wgt->getSize(), useGpu);
to[i]->copyFrom(*wgt);
}
}
void MkldnnTester::restoreWgt(const vector<VectorPtr>& from,
vector<ParameterPtr>& to) {
CHECK_EQ(from.size(), to.size());
for (size_t i = 0; i < from.size(); ++i) {
const VectorPtr& wgt = to[i]->getBuf(PARAMETER_VALUE);
wgt->copyFrom(*from[i]);
}
}
// clear parameters grad
void MkldnnTester::clearWgtDiffs() {
for (size_t n = 0; n < parameters_.size(); ++n) {
for (size_t i = 0; i < parameters_[n].size(); ++i) {
const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
if (grad) {
grad->zeroMem();
}
}
}
}
void MkldnnTester::clearBotDiffs() {
// dnn and ref
for (size_t n = 0; n < dataLayers_.size(); ++n) {
// all inputs layers
for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
dataLayers_[n][i]->getOutputGrad()->zeroMem();
}
}
}
void MkldnnTester::clearBotDiffs(int n) {
CHECK_LT(n, NUM);
// all inputs layers
for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
dataLayers_[n][i]->getOutputGrad()->zeroMem();
}
}
void MkldnnTester::clearTopDatas() {
for (size_t i = 0; i < testLayers_.size(); ++i) {
testLayers_[i]->getOutputValue()->zeroMem();
}
}
void MkldnnTester::printTopDatas() {
if (!log_) {
return;
}
for (int n = 0; n < NUM; ++n) {
VLOG(lvl_) << testLayers_[n]->getType() << " forward output TopData: ";
printMatrix(testLayers_[n]->getOutputValue());
}
}
void MkldnnTester::printMatrix(const MatrixPtr& m) {
if (!log_) {
return;
}
#ifdef _DEBUG
std::ostream str;
m->print(str);
VLOG(lvl_) << str;
#endif
}
void MkldnnTester::printVector(const VectorPtr& v) {
if (!log_) {
return;
}
CHECK(v);
CHECK(v->getData());
const real* pd = v->getData();
const size_t sz = v->getSize();
std::stringstream row;
for (size_t i = 0; i < sz; ++i) {
row << pd[i] << ", ";
}
VLOG(lvl_) << row.str();
}
double MkldnnTester::getDelta(const real* d1,
const real* d2,
size_t len,
const float failRate,
const float thres) {
double delta = 0, sum = 0;
int failCnt = 0;
const double eps = 1e-5;
double maxOut = 0;
for (size_t i = 0; i < len; ++i) {
double ref = fabs(d2[i]);
double diff = fabs(d1[i] - d2[i]);
delta += diff;
sum += ref;
if (ref > eps && fabs(d1[i]) > eps && diff / ref > thres) {
maxOut = std::max(maxOut, diff / ref);
failCnt++;
}
}
EXPECT_TRUE(std::isnormal(sum));
EXPECT_FALSE(std::isinf(sum));
EXPECT_FALSE(std::isnan(delta));
VLOG(DNN_TESTS_MORE) << "reference avg data: " << sum / len
<< ", delta: " << delta / sum << ", failCnt:" << failCnt;
return (failCnt / (float)len) > failRate ? maxOut : delta / sum;
}
double MkldnnTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) {
CHECK_EQ(m1->getElementCnt(), m2->getElementCnt());
return getDelta(m1->getData(), m2->getData(), m1->getElementCnt());
}
double MkldnnTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) {
CHECK_EQ(v1->getSize(), v2->getSize());
return getDelta(v1->getData(), v2->getData(), v1->getSize());
}
void MkldnnTester::runOnce() {
// test forward
randomBotDatas();
dnnLayer_->forward(PASS_TRAIN);
refLayer_->forward(PASS_TRAIN);
checkForward();
// test backward
randomTopDiffs();
dnnLayer_->backward(nullptr);
refLayer_->backward(nullptr);
checkBackwardData();
checkBackwardWgts();
// clear buffers
// ref code will addto the diff, dnn code will writeto it
clearBotDiffs(REF);
// below two should be coverd by test layers
// clearTopDatas();
// clearWgtDiffs();
}
void MkldnnTester::run(const TestConfig& dnn,
const TestConfig& ref,
size_t batchSize,
size_t inputImgH,
size_t inputImgW,
size_t iter,
float epsilon,
bool log,
int level) {
VLOG(DNN_TESTS) << "Test MKLDNN functionality: " << dnn.layerConfig.type()
<< " vs " << ref.layerConfig.type();
ih_ = inputImgH;
iw_ = inputImgW;
iter_ = iter;
eps_ = epsilon;
log_ = log;
lvl_ = level;
// Firstly always set flag false to initial from paddle weight
TestConfig first = dnn;
// first.layerConfig.set_init_wgt_from_mkldnn(false);
// reset and run once
reset(first, ref, batchSize);
randomWgtDatas();
clearWgtDiffs();
clearBotDiffs();
VLOG(DNN_TESTS) << "Check Iteration 0";
runOnce();
// firstly get the flag
bool initWgtFromMkldnn = false;
// dnn.layerConfig.has_init_wgt_from_mkldnn() &&
// dnn.layerConfig.init_wgt_from_mkldnn();
if (initWgtFromMkldnn) {
// after run once the mkldnn weight has been stored in dnnlayer
// then save the weigths and restart again
vector<VectorPtr> dnnWgts, refWgts;
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
saveWgt(parameters_[DNN], dnnWgts);
saveWgt(parameters_[REF], refWgts);
// restart again with flag true
reset(dnn, ref, batchSize);
// restore wgt
restoreWgt(dnnWgts, parameters_[DNN]);
restoreWgt(refWgts, parameters_[REF]);
clearWgtDiffs();
clearBotDiffs();
// at least run once
runOnce();
}
for (size_t i = 1; i < iter_; ++i) {
VLOG(DNN_TESTS) << "Check Iteration " << i;
runOnce();
}
}
} // namespace paddle
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "LayerGradUtil.h"
#include "paddle/gserver/layers/MkldnnBase.h"
namespace paddle {
/**
* @brief test the functionality of Mkldnnlayers
* refer to paddle original function
*/
class MkldnnTester {
enum {
DNN = 0,
REF = 1,
NUM = 2,
};
protected:
std::vector<TestConfig> configs_;
vector<string> layerNames_;
vector<vector<DataLayerPtr>> dataLayers_;
vector<vector<Argument>> datas_;
vector<LayerMap> layerMaps_;
vector<vector<ParameterPtr>> parameters_;
vector<LayerPtr> testLayers_;
LayerPtr dnnLayer_, refLayer_;
/// run some iterations, all the result should pass
size_t iter_;
/// whether to print out the details
bool log_;
/// vlog level to print the matrix details datas
int lvl_;
/// epsilon
float eps_;
/// input image size, default 1
size_t ih_, iw_;
public:
explicit MkldnnTester(size_t iter = 3, float epsilon = 1e-4) {
iter_ = iter;
eps_ = epsilon;
log_ = false;
lvl_ = DNN_TESTS_MORE;
}
~MkldnnTester() {}
public:
void run(const TestConfig& dnn,
const TestConfig& ref,
size_t batchSize,
size_t inputImgH = 1,
size_t inputImgW = 1,
size_t iter = 3,
float epsilon = 1e-4,
bool log = false,
int level = DNN_TESTS_MORE);
void setLogLevel(int lvl) { lvl_ = lvl; }
private:
void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize);
void setInputImgSize();
void runOnce();
void randomWgtDatas();
void randomBotDatas();
void randomTopDiffs();
void checkForward();
void checkBackwardData();
void checkBackwardWgts();
void clearWgtDiffs();
void clearBotDiffs();
void clearBotDiffs(int n); // clear specific layer
void clearTopDatas();
void printTopDatas();
void printMatrix(const MatrixPtr& m);
void printVector(const VectorPtr& v);
void saveWgt(const vector<ParameterPtr>& from, vector<VectorPtr>& to);
void restoreWgt(const vector<VectorPtr>& from, vector<ParameterPtr>& to);
double compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2);
double compareVector(const VectorPtr& v1, const VectorPtr& v2);
/**
* Get delta percent
* if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the
* max(diff/ref)
* else return sum(abs(a-b)) / sum(abs(b)) should smaller than eps
*/
double getDelta(const real* d1,
const real* d2,
size_t len,
const float failRate = 1e-3,
const float thres = 0.1);
};
} // namespace paddle
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include "MkldnnTester.h"
#include "ModelConfig.pb.h"
using namespace paddle; // NOLINT
DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(use_gpu);
DECLARE_bool(use_mkldnn);
struct testFCDesc {
int bs;
int ic;
int oc;
int ih, iw; // oh == ow == 1
};
void testFcLayer(const testFCDesc& pm) {
const std::string compareTypes[] = {"mkldnn_fc", "fc"};
TestConfig cfg;
cfg.layerConfig.set_type(compareTypes[0]);
cfg.layerConfig.set_size(pm.oc);
cfg.inputDefs.push_back(
{INPUT_DATA,
"layer_0",
/* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
/* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)});
cfg.layerConfig.add_inputs();
MkldnnTester tester;
for (auto biasSize : {pm.oc, 0}) {
cfg.biasSize = biasSize;
TestConfig ref = cfg;
ref.layerConfig.set_type(compareTypes[1]);
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
}
}
TEST(MkldnnLayer, fcLayer) {
testFcLayer({2, 2, 3, 1, 1}); /*
testFcLayer({16, 32, 64, 1, 1});
testFcLayer({8, 16, 32, 13, 13});
testFcLayer({4, 12, 18, 13, 11});
testFcLayer({2, 64, 32, 16, 16});
testFcLayer({15, 3, 6, 16, 16});*/
}
// TODO(TJ): add branch test
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
FLAGS_use_gpu = false;
FLAGS_use_mkldnn = true;
initMain(argc, argv);
FLAGS_thread_local_rand_use_global_seed = true;
srand(1);
return RUN_ALL_TESTS();
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册