提交 fa722385 编写于 作者: T tensor-tang

refine test_MKLDNN and skip memory copy for relu

上级 d865b047
...@@ -131,8 +131,9 @@ public: ...@@ -131,8 +131,9 @@ public:
fwdPD_.reset(new eltwise_fwd::primitive_desc(fwdDesc, eng)); fwdPD_.reset(new eltwise_fwd::primitive_desc(fwdDesc, eng));
// use inplace for forward but save input value before submit // use inplace for forward but save input value before submit
inVal_ = val_; inVal_ = val_;
if (act.grad) { copyInVal_ = nullptr;
// only copy when need do backward if (act.grad && algo == mkldnn::algorithm::eltwise_tanh) {
// tanh need save src input for backward
inVal_ = MKLDNNMatrix::create(nullptr, val_->getPrimitiveDesc()); inVal_ = MKLDNNMatrix::create(nullptr, val_->getPrimitiveDesc());
copyInVal_ = std::make_shared<mkldnn::reorder>(*val_, *inVal_); copyInVal_ = std::make_shared<mkldnn::reorder>(*val_, *inVal_);
CHECK(copyInVal_) << "should not be emptry"; CHECK(copyInVal_) << "should not be emptry";
......
...@@ -26,17 +26,26 @@ DECLARE_bool(thread_local_rand_use_global_seed); ...@@ -26,17 +26,26 @@ DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(use_gpu); DECLARE_bool(use_gpu);
DECLARE_bool(use_mkldnn); DECLARE_bool(use_mkldnn);
struct testFCDesc { #define RUN_MKLDNN_TEST(DNN_CONFIG, REF_CONFIG, DESC) \
MKLDNNTester tester; \
for (auto bs : {DESC.bs, 1}) { \
tester.run(DNN_CONFIG, REF_CONFIG, bs, DESC.ih, DESC.iw); \
}
#define RUN_MKLDNN_TEST_LAYER(DNN_CONFIG, REF_TYPE, DESC) \
TestConfig ref = DNN_CONFIG; \
ref.layerConfig.set_type(REF_TYPE); \
RUN_MKLDNN_TEST(DNN_CONFIG, ref, DESC)
struct testFcDesc {
int bs; int bs;
int ic; int ic;
int oc; int oc;
int ih, iw; // oh == ow == 1 int ih, iw; // oh == ow == 1
}; };
void testFcLayer(const testFCDesc& pm) { static void getMKLDNNFcConfig(TestConfig& cfg, const testFcDesc& pm) {
const std::string compareTypes[] = {"mkldnn_fc", "fc"}; cfg.layerConfig.set_type("mkldnn_fc");
TestConfig cfg;
cfg.layerConfig.set_type(compareTypes[0]);
cfg.layerConfig.set_size(pm.oc); cfg.layerConfig.set_size(pm.oc);
cfg.inputDefs.push_back( cfg.inputDefs.push_back(
{INPUT_DATA, {INPUT_DATA,
...@@ -44,25 +53,25 @@ void testFcLayer(const testFCDesc& pm) { ...@@ -44,25 +53,25 @@ void testFcLayer(const testFCDesc& pm) {
/* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw), /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
/* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)}); /* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)});
cfg.layerConfig.add_inputs(); cfg.layerConfig.add_inputs();
}
MKLDNNTester tester; void testFcLayer(const testFcDesc& pm) {
TestConfig dnnConfig;
getMKLDNNFcConfig(dnnConfig, pm);
for (auto biasSize : {pm.oc, 0}) { for (auto biasSize : {pm.oc, 0}) {
cfg.biasSize = biasSize; dnnConfig.biasSize = biasSize;
TestConfig ref = cfg; RUN_MKLDNN_TEST_LAYER(dnnConfig, "fc", pm)
ref.layerConfig.set_type(compareTypes[1]);
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
} }
} }
TEST(MKLDNNLayer, FcLayer) { TEST(MKLDNNLayer, FcLayer) {
testFcLayer({/*bs*/ 2, /*ic*/ 2, /*oc*/ 3, /*ih*/ 1, /*iw*/ 1}); /* bs, ic, ih, iw, oc */
testFcLayer({/*bs*/ 3, /*ic*/ 7, /*oc*/ 19, /*ih*/ 1, /*iw*/ 1}); testFcLayer({2, 2, 1, 1, 3});
testFcLayer({/*bs*/ 8, /*ic*/ 16, /*oc*/ 32, /*ih*/ 13, /*iw*/ 13}); testFcLayer({3, 7, 1, 1, 19});
testFcLayer({/*bs*/ 4, /*ic*/ 12, /*oc*/ 18, /*ih*/ 13, /*iw*/ 11}); testFcLayer({8, 16, 13, 13, 32});
testFcLayer({/*bs*/ 2, /*ic*/ 64, /*oc*/ 32, /*ih*/ 16, /*iw*/ 16}); testFcLayer({4, 12, 13, 13, 18});
testFcLayer({/*bs*/ 15, /*ic*/ 3, /*oc*/ 6, /*ih*/ 16, /*iw*/ 16}); testFcLayer({2, 64, 16, 16, 32});
testFcLayer({15, 3, 16, 16, 6});
} }
struct testConvDesc { struct testConvDesc {
...@@ -75,13 +84,10 @@ struct testConvDesc { ...@@ -75,13 +84,10 @@ struct testConvDesc {
int dh, dw; int dh, dw;
}; };
void testConvLayer(const testConvDesc& pm) { static void getMKLDNNConvConfig(TestConfig& cfg, const testConvDesc& pm) {
const std::string compareTypes[] = {"mkldnn_conv", "exconv"}; cfg.layerConfig.set_type("mkldnn_conv");
TestConfig cfg;
cfg.layerConfig.set_type(compareTypes[0]);
cfg.layerConfig.set_num_filters(pm.oc); cfg.layerConfig.set_num_filters(pm.oc);
cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow); cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow);
// cfg.layerConfig.set_partial_sum(1); // TODO: check it
cfg.layerConfig.set_shared_biases(true); cfg.layerConfig.set_shared_biases(true);
cfg.inputDefs.push_back( cfg.inputDefs.push_back(
{INPUT_DATA, {INPUT_DATA,
...@@ -115,15 +121,14 @@ void testConvLayer(const testConvDesc& pm) { ...@@ -115,15 +121,14 @@ void testConvLayer(const testConvDesc& pm) {
int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true); int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true);
CHECK_EQ(ow, pm.ow) << "output size check failed"; CHECK_EQ(ow, pm.ow) << "output size check failed";
CHECK_EQ(oh, pm.oh) << "output size check failed"; CHECK_EQ(oh, pm.oh) << "output size check failed";
}
MKLDNNTester tester; void testConvLayer(const testConvDesc& pm) {
TestConfig dnnConfig;
getMKLDNNConvConfig(dnnConfig, pm);
for (auto biasSize : {pm.oc, 0}) { for (auto biasSize : {pm.oc, 0}) {
cfg.biasSize = biasSize; dnnConfig.biasSize = biasSize;
TestConfig ref = cfg; RUN_MKLDNN_TEST_LAYER(dnnConfig, "exconv", pm)
ref.layerConfig.set_type(compareTypes[1]);
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
} }
} }
...@@ -143,7 +148,7 @@ TEST(MKLDNNLayer, ConvLayer) { ...@@ -143,7 +148,7 @@ TEST(MKLDNNLayer, ConvLayer) {
} }
struct testPoolDesc { struct testPoolDesc {
int bs, ch; // input channel and output channel are the same int bs, ic; // input channel and output channel are the same
int ih, iw; int ih, iw;
int oh, ow; int oh, ow;
int fh, fw; int fh, fw;
...@@ -151,19 +156,18 @@ struct testPoolDesc { ...@@ -151,19 +156,18 @@ struct testPoolDesc {
int sh, sw; int sh, sw;
}; };
void testPoolLayer(const testPoolDesc& pm) { static void getMKLDNNPoolConfig(TestConfig& cfg, const testPoolDesc& pm) {
const std::string compareTypes[] = {"mkldnn_pool", "pool"}; cfg.layerConfig.set_type("mkldnn_pool");
TestConfig cfg; cfg.layerConfig.set_size(pm.ic * pm.oh * pm.ow);
cfg.layerConfig.set_type(compareTypes[0]);
cfg.layerConfig.set_size(pm.ch * pm.oh * pm.ow);
cfg.inputDefs.push_back( cfg.inputDefs.push_back(
{INPUT_DATA, {INPUT_DATA,
"layer_0", "layer_0",
/* size of input layer= */ size_t(pm.ch * pm.ih * pm.iw), /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
0}); 0});
LayerInputConfig* input = cfg.layerConfig.add_inputs(); LayerInputConfig* input = cfg.layerConfig.add_inputs();
PoolConfig* pool = input->mutable_pool_conf(); PoolConfig* pool = input->mutable_pool_conf();
pool->set_channels(pm.ch); pool->set_pool_type("avg-projection");
pool->set_channels(pm.ic);
pool->set_img_size(pm.iw); pool->set_img_size(pm.iw);
pool->set_img_size_y(pm.ih); pool->set_img_size_y(pm.ih);
pool->set_output_x(pm.ow); pool->set_output_x(pm.ow);
...@@ -179,20 +183,21 @@ void testPoolLayer(const testPoolDesc& pm) { ...@@ -179,20 +183,21 @@ void testPoolLayer(const testPoolDesc& pm) {
int ow = outputSize(pm.iw, pm.fw, pm.pw, pm.sw, false); int ow = outputSize(pm.iw, pm.fw, pm.pw, pm.sw, false);
CHECK_EQ(ow, pm.ow) << "output size check failed"; CHECK_EQ(ow, pm.ow) << "output size check failed";
CHECK_EQ(oh, pm.oh) << "output size check failed"; CHECK_EQ(oh, pm.oh) << "output size check failed";
}
MKLDNNTester tester; void testPoolLayer(const testPoolDesc& pm) {
TestConfig dnnConfig;
getMKLDNNPoolConfig(dnnConfig, pm);
LayerInputConfig* input = dnnConfig.layerConfig.mutable_inputs(0);
PoolConfig* pool = input->mutable_pool_conf();
for (auto type : {"max-projection", "avg-projection"}) { for (auto type : {"max-projection", "avg-projection"}) {
pool->set_pool_type(type); pool->set_pool_type(type);
TestConfig ref = cfg; RUN_MKLDNN_TEST_LAYER(dnnConfig, "pool", pm)
ref.layerConfig.set_type(compareTypes[1]);
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
} }
} }
TEST(MKLDNNLayer, PoolLayer) { TEST(MKLDNNLayer, PoolLayer) {
/* bs, ch, ih, iw, oh, ow, fh, fw, ph, pw, sh, sw*/ /* bs, ch, ih, iw, oh, ow, fh, fw, ph, pw, sh, sw */
testPoolLayer({2, 1, 4, 4, 2, 2, 3, 3, 0, 0, 2, 2}); testPoolLayer({2, 1, 4, 4, 2, 2, 3, 3, 0, 0, 2, 2});
testPoolLayer({10, 8, 16, 16, 8, 8, 2, 2, 0, 0, 2, 2}); testPoolLayer({10, 8, 16, 16, 8, 8, 2, 2, 0, 0, 2, 2});
testPoolLayer({4, 2, 5, 5, 3, 3, 3, 3, 1, 1, 2, 2}); testPoolLayer({4, 2, 5, 5, 3, 3, 3, 3, 1, 1, 2, 2});
...@@ -204,44 +209,36 @@ TEST(MKLDNNLayer, PoolLayer) { ...@@ -204,44 +209,36 @@ TEST(MKLDNNLayer, PoolLayer) {
} }
struct testActDesc { struct testActDesc {
int bs, ch; int bs, ic, ih, iw;
int ih, iw;
}; };
static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) { static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) {
cfg.biasSize = 0; cfg.biasSize = 0;
cfg.layerConfig.set_type("addto"); cfg.layerConfig.set_type("addto");
cfg.layerConfig.set_size(pm.ch * pm.ih * pm.iw); size_t layerSize = pm.ih * pm.ih * pm.iw;
cfg.inputDefs.push_back( cfg.layerConfig.set_size(layerSize);
{INPUT_DATA, cfg.inputDefs.push_back({INPUT_DATA, "layer_0", layerSize, 0});
"layer_0",
/* size of input layer= */ size_t(pm.ch * pm.ih * pm.iw),
0});
cfg.layerConfig.add_inputs(); cfg.layerConfig.add_inputs();
} }
void testActivation(std::string& type, const testActDesc& pm) { void testActivation(std::string& actType, const testActDesc& pm) {
const std::string compareTypes[] = {type, type.erase(0, 7)}; // TODO(TJ): mkldnn_softmax not implemented, paddle do not have elu activation
if (actType == "mkldnn_softmax" || actType == "mkldnn_elu") {
return;
}
const std::string compareTypes[] = {actType, actType.erase(0, 7)};
TestConfig cfg; TestConfig cfg;
getAddtoConfig(cfg, pm); getAddtoConfig(cfg, pm);
TestConfig ref = cfg; TestConfig ref = cfg;
cfg.layerConfig.set_active_type(compareTypes[0]); cfg.layerConfig.set_active_type(compareTypes[0]);
ref.layerConfig.set_active_type(compareTypes[1]); ref.layerConfig.set_active_type(compareTypes[1]);
MKLDNNTester tester; RUN_MKLDNN_TEST(cfg, ref, pm)
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
} }
TEST(MKLDNNActivation, Activations) { TEST(MKLDNNActivation, Activations) {
auto types = MKLDNNActivation::getAllRegisteredTypes(); auto types = MKLDNNActivation::getAllRegisteredTypes();
// TODO(TJ): mkldnn_softmax not implemented, paddle do not have elu activation
std::set<string> excluded{"mkldnn_softmax", "mkldnn_elu"};
for (auto type : types) { for (auto type : types) {
if (excluded.count(type)) { /* bs, c, h, w*/
continue;
}
testActivation(type, {16, 64, 32, 32}); testActivation(type, {16, 64, 32, 32});
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册