提交 ad6b5319 编写于 作者: T tensor-tang

add unit test for mkldnn_batch_norm layer

上级 64eaeba1
......@@ -91,10 +91,16 @@ void MKLDNNTester::setInputImgSize() {
// init randome parameters of ref, and copy to mkldnn
void MKLDNNTester::randomWgtDatas() {
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
const bool isBN = refLayer_->getType() == "batch_norm";
for (size_t i = 0; i < parameters_[REF].size(); ++i) {
const VectorPtr& dnnValue = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& refValue = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
parameters_[REF][i]->randomize();
if (isBN && i == 2) {
// this param is moving average in batch norm, which must larger than 0
real offset = fabs(refValue->getMin()) + 1.0;
refValue->add(offset);
}
dnnValue->copyFrom(*refValue);
VLOG(MKLDNN_TESTS) << "Random weight " << parameters_[DNN][i]->getName();
......@@ -132,8 +138,7 @@ void MKLDNNTester::checkForward() {
void MKLDNNTester::checkBackwardData() {
VLOG(MKLDNN_TESTS) << "Check Backward Data";
// TODO(TJ): uncomment me when batch norm ready
// const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
const bool isBN = refLayer_->getType() == "batch_norm";
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
const MatrixPtr& dnnDiff = dataLayers_[DNN][i]->getOutputGrad();
const MatrixPtr& refDiff = dataLayers_[REF][i]->getOutputGrad();
......@@ -144,11 +149,11 @@ void MKLDNNTester::checkBackwardData() {
double delta = compareMatrix(dnnDiff, refDiff);
EXPECT_LE(fabs(delta), eps_);
// TODO(TJ): uncomment me when batch norm ready
// if (isBN) {
// // the other two inputs in batch norm are for moving mean and var
// break;
// }
if (isBN) {
// the other two inputs in batch norm are for moving mean and var
// do not have grad to compare
break;
}
}
}
......@@ -308,10 +313,14 @@ double MKLDNNTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) {
void MKLDNNTester::runOnce() {
// test forward
randomBotDatas();
dnnLayer_->forward(PASS_TRAIN);
refLayer_->forward(PASS_TRAIN);
dnnLayer_->forward(passType_);
refLayer_->forward(passType_);
checkForward();
if (passType_ == PASS_TEST) {
return;
}
// test backward
// simple updater
UpdateCallback updateCallback = [](Parameter* para) {
......@@ -343,6 +352,7 @@ void MKLDNNTester::run(const TestConfig& dnn,
size_t batchSize,
size_t inputImgH,
size_t inputImgW,
PassType passType,
bool printDetails,
size_t iter,
float epsilon) {
......@@ -361,6 +371,7 @@ void MKLDNNTester::run(const TestConfig& dnn,
ih_ = inputImgH;
iw_ = inputImgW;
passType_ = passType;
log_ = printDetails;
iter_ = iter;
eps_ = epsilon;
......
......@@ -62,12 +62,15 @@ protected:
float eps_;
/// input image size, default 1
size_t ih_, iw_;
/// passType, PASS_TRAIN, PASS_TEST or PASS_GC (Gradient Check pass)
PassType passType_;
public:
explicit MKLDNNTester(size_t iter = 3, float epsilon = 1e-4) {
iter_ = iter;
eps_ = epsilon;
log_ = false;
passType_ = PASS_TRAIN;
}
~MKLDNNTester() {}
......@@ -78,6 +81,7 @@ public:
size_t batchSize,
size_t inputImgH = 1,
size_t inputImgW = 1,
PassType passType = PASS_TRAIN,
bool printDetails = false,
size_t iter = 3,
float epsilon = 1e-4);
......
......@@ -212,6 +212,66 @@ TEST(MKLDNNLayer, PoolLayer) {
testPoolLayer({2, 8, 56, 56, 29, 29, 3, 3, 1, 1, 2, 2});
}
struct testBatchNormDesc {
int bs;
int ic;
int ih, iw;
};
static void getMKLDNNBatchNormConfig(TestConfig& cfg,
const testBatchNormDesc& pm) {
cfg.layerConfig.set_size(pm.ic * pm.ih * pm.iw);
cfg.layerConfig.set_type("mkldnn_batch_norm");
cfg.biasSize = pm.ic;
cfg.inputDefs.push_back(
{INPUT_DATA,
"layer_0",
/* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
/* size of weight= */ size_t(pm.ic)});
cfg.inputDefs.push_back(
{INPUT_DATA, "layer_1_moving_mean", 1, size_t(pm.ic)});
cfg.inputDefs.back().isStatic = true;
cfg.inputDefs.push_back({INPUT_DATA, "layer_2_moving_var", 1, size_t(pm.ic)});
cfg.inputDefs.back().isStatic = true;
LayerInputConfig* input = cfg.layerConfig.add_inputs();
// TODO(TJ): uncomment me when refine and support comparing all zeroes vector
// cfg.layerConfig.set_active_type("relu");
cfg.layerConfig.add_inputs();
cfg.layerConfig.add_inputs();
ImageConfig* img_conf = input->mutable_image_conf();
img_conf->set_channels(pm.ic);
img_conf->set_img_size_y(pm.ih);
img_conf->set_img_size(pm.iw);
}
void testBatchNormLayer(const testBatchNormDesc& pm) {
TestConfig dnnConfig;
getMKLDNNBatchNormConfig(dnnConfig, pm);
TestConfig refConfig = dnnConfig;
refConfig.layerConfig.set_type("batch_norm");
// for PASS_TRAIN, use_global_stats always should be false, and batchsize != 1
VLOG(MKLDNN_TESTS) << "check train phase";
dnnConfig.layerConfig.set_use_global_stats(false);
refConfig.layerConfig.set_use_global_stats(false);
MKLDNNTester tester;
tester.run(dnnConfig, refConfig, pm.bs, pm.ih, pm.iw, PASS_TRAIN);
// for PASS_TEST, check use_global_stats true and false, and batchsize 1
VLOG(MKLDNN_TESTS) << "check test phase";
for (auto useGS : {false, true}) {
dnnConfig.layerConfig.set_use_global_stats(useGS);
refConfig.layerConfig.set_use_global_stats(useGS);
MKLDNNTester tester;
for (auto bs : {pm.bs, 1}) {
tester.run(dnnConfig, refConfig, bs, pm.ih, pm.iw, PASS_TEST);
}
}
}
TEST(MKLDNNLayer, BatchNormLayer) {
testBatchNormLayer({4, 10, 6, 6});
testBatchNormLayer({16, 32, 16, 16});
}
struct testActDesc {
int bs, ic, ih, iw;
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册