提交 6373291c 编写于 作者: T tensor-tang

add test case use_mkldnn_wgt

上级 0c951176
......@@ -23,8 +23,6 @@ typedef enum {
DNN_TESTS = 1,
DNN_SIZES,
DNN_FMTS,
DNN_TESTS_DETAILS,
DNN_TESTS_MORE,
DNN_ALL,
} DNN_LOG_LEVEL;
......
......@@ -51,6 +51,10 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap,
}
void MkldnnFcLayer::cvtWgtFromPaddle() {
if (FLAGS_use_mkldnn_wgt) {
return;
}
if (hasInitedWgt_) {
return;
}
......
......@@ -19,6 +19,9 @@ limitations under the License. */
#include "MkldnnBase.h"
#include "mkldnn.hpp"
DECLARE_bool(use_mkldnn);
DECLARE_bool(use_mkldnn_wgt);
namespace paddle {
class MkldnnLayer;
......
......@@ -118,7 +118,7 @@ void MkldnnTester::checkForward() {
printTopDatas();
double delta = compareMatrix(testLayers_[DNN]->getOutputValue(),
testLayers_[REF]->getOutputValue());
VLOG(DNN_TESTS_DETAILS) << "Check Forward";
VLOG(DNN_ALL) << "Check Forward";
EXPECT_LE(fabs(delta), eps_);
}
......@@ -162,7 +162,7 @@ void MkldnnTester::checkBackwardWgts() {
EXPECT_LE(fabs(delta), eps_);
}
VLOG(DNN_TESTS_DETAILS) << "Restore dnn weights before comapre";
VLOG(DNN_ALL) << "Restore dnn weights before comapre";
restoreWgt(dnnWgts, parameters_[DNN]);
}
......@@ -275,7 +275,7 @@ double MkldnnTester::getDelta(const real* d1,
EXPECT_TRUE(std::isnormal(sum));
EXPECT_FALSE(std::isinf(sum));
EXPECT_FALSE(std::isnan(delta));
VLOG(DNN_TESTS_MORE) << "reference avg data: " << sum / len
VLOG(DNN_ALL) << "reference avg data: " << sum / len
<< ", delta: " << delta / sum << ", failCnt:" << failCnt;
return (failCnt / (float)len) > failRate ? maxOut : delta / sum;
}
......@@ -330,22 +330,20 @@ void MkldnnTester::run(const TestConfig& dnn,
log_ = log;
lvl_ = level;
// Firstly always set flag false to initial from paddle weight
TestConfig first = dnn;
// Firstly test FLAGS_use_mkldnn_wgt = false
FLAGS_use_mkldnn_wgt = false;
// reset and run once
reset(first, ref, batchSize);
reset(dnn, ref, batchSize);
randomWgtDatas();
clearWgtDiffs();
clearBotDiffs();
VLOG(DNN_TESTS) << "Check Iteration 0";
for (size_t i = 0; i < iter_; ++i) {
VLOG(DNN_TESTS) << "Check Iteration " << i;
runOnce();
}
// firstly get the flag
bool initWgtFromMkldnn = false;
if (initWgtFromMkldnn) {
// Then test FLAGS_use_mkldnn_wgt = true
FLAGS_use_mkldnn_wgt = true;
// after run once the mkldnn weight has been stored in dnnlayer
// then save the weigths and restart again
vector<VectorPtr> dnnWgts, refWgts;
......@@ -362,11 +360,7 @@ void MkldnnTester::run(const TestConfig& dnn,
clearWgtDiffs();
clearBotDiffs();
// at least run once
runOnce();
}
for (size_t i = 1; i < iter_; ++i) {
for (size_t i = 0; i < iter_; ++i) {
VLOG(DNN_TESTS) << "Check Iteration " << i;
runOnce();
}
......
......@@ -58,7 +58,7 @@ public:
iter_ = iter;
eps_ = epsilon;
log_ = false;
lvl_ = DNN_TESTS_MORE;
lvl_ = DNN_ALL;
}
~MkldnnTester() {}
......@@ -72,7 +72,7 @@ public:
size_t iter = 3,
float epsilon = 1e-4,
bool log = false,
int level = DNN_TESTS_MORE);
int level = DNN_ALL);
void setLogLevel(int lvl) { lvl_ = lvl; }
private:
......
......@@ -23,6 +23,7 @@ using namespace paddle; // NOLINT
DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(use_gpu);
DECLARE_bool(use_mkldnn);
DECLARE_bool(use_mkldnn_wgt);
struct testFCDesc {
int bs;
......
......@@ -29,6 +29,7 @@ DECLARE_bool(with_gpu);
DECLARE_bool(parallel_nn);
DECLARE_string(config_args);
DECLARE_bool(use_mkldnn);
DECLARE_bool(use_mkldnn_wgt);
const char *kConfigParserModuleName = "paddle.trainer.config_parser";
const char *kConfigParserFuncName = "parse_config_and_serialize";
......@@ -46,6 +47,7 @@ TrainerConfigHelper::TrainerConfigHelper(const std::string &configFilePath)
<< ",with_cost=" << FLAGS_with_cost << ",use_gpu=" << FLAGS_use_gpu
<< ",parallel_nn=" << FLAGS_parallel_nn
<< ",use_mkldnn=" << FLAGS_use_mkldnn
<< ",use_mkldnn_wgt=" << FLAGS_use_mkldnn_wgt
<< ",cudnn_version=" << hl_get_cudnn_lib_version();
if (!FLAGS_config_args.empty()) {
configArgs << "," << FLAGS_config_args;
......
......@@ -27,6 +27,7 @@ DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training");
DEFINE_bool(use_mkldnn, false, "Only support CPU training");
#endif
DEFINE_bool(use_mkldnn_wgt, false, "Init weight from CPU weight");
DEFINE_bool(parallel_nn,
false,
"Whether to use multi-threads to calculate one neural network."
......
......@@ -41,3 +41,4 @@ DECLARE_string(predict_file);
DECLARE_bool(prev_batch_state);
DECLARE_string(init_model_path);
DECLARE_bool(use_mkldnn);
DECLARE_bool(use_mkldnn_wgt);
......@@ -1619,6 +1619,8 @@ class FCLayer(LayerBase):
config_assert(
len(inputs) == 1,
"MkldnnFCLayer support one and only one input!")
use_mkldnn_wgt = bool(
int(g_command_config_args.get("use_mkldnn_wgt", 0)))
super(FCLayer, self).__init__(
name, self.layer_type, size, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
......@@ -1627,9 +1629,10 @@ class FCLayer(LayerBase):
format = self.inputs[input_index].format
sparse = format == "csr" or format == "csc"
if use_mkldnn:
dims = [self.config.size, input_layer.size]
config_assert(not sparse,
"MkldnnFCLayer do not support sparse format yet")
if use_mkldnn and use_mkldnn_wgt:
dims = [self.config.size, input_layer.size]
else:
dims = [input_layer.size, self.config.size]
if sparse:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册