提交 56f6e231 编写于 作者: T tensor-tang

refine mkldnntester, support comparing values near zero

上级 0049ce04
...@@ -273,31 +273,37 @@ void MKLDNNTester::printVector(const VectorPtr& v) { ...@@ -273,31 +273,37 @@ void MKLDNNTester::printVector(const VectorPtr& v) {
VLOG(MKLDNN_ALL) << std::endl << ostr.str(); VLOG(MKLDNN_ALL) << std::endl << ostr.str();
} }
double MKLDNNTester::getDelta(const real* d1, double MKLDNNTester::getDelta(const real* refer,
const real* d2, const real* value,
size_t len, size_t len,
const float failRate, const float failRate,
const float thres) { const float thres) {
double delta = 0, sum = 0; double delta = 0, sum = 0;
int failCnt = 0; int failCnt = 0;
const double eps = 1e-5; const double eps = 1e-5;
double maxOut = 0; double maxRatio = 0;
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
double ref = fabs(d2[i]); double ref = fabs(refer[i]);
double diff = fabs(d1[i] - d2[i]); double val = fabs(value[i]);
double diff = fabs(refer[i] - value[i]);
delta += diff; delta += diff;
sum += ref; sum += ref;
if (ref > eps && fabs(d1[i]) > eps && diff / ref > thres) { if (ref < eps && val < eps) { // both values are very small
maxOut = std::max(maxOut, diff / ref); continue;
}
double ratio = diff / ref;
if (ratio > thres) {
maxRatio = std::max(maxRatio, ratio);
failCnt++; failCnt++;
} }
} }
EXPECT_TRUE(std::isnormal(sum));
EXPECT_FALSE(std::isinf(sum)); EXPECT_FALSE(std::isinf(sum));
EXPECT_FALSE(std::isnan(sum));
EXPECT_FALSE(std::isnan(delta)); EXPECT_FALSE(std::isnan(delta));
VLOG(MKLDNN_ALL) << "reference avg data: " << sum / len VLOG(MKLDNN_ALL) << "reference avg data: " << sum / len
<< ", delta: " << delta / sum << ", failCnt:" << failCnt; << ", delta: " << delta / sum << ", failCnt:" << failCnt;
return (failCnt / (float)len) > failRate ? maxOut : delta / sum; double res = sum > eps ? delta / sum : eps;
return (failCnt / (float)len) > failRate ? maxRatio : res;
} }
double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) { double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) {
...@@ -543,12 +549,12 @@ void MKLDNNTester::getOutResult(const std::string& configPath, ...@@ -543,12 +549,12 @@ void MKLDNNTester::getOutResult(const std::string& configPath,
void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) { void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) {
CHECK_EQ(ref.outValues.size(), dnn.outValues.size()); CHECK_EQ(ref.outValues.size(), dnn.outValues.size());
CHECK_EQ(ref.paraValues.size(), dnn.paraValues.size()); CHECK_EQ(ref.paraValues.size(), dnn.paraValues.size());
VLOG(MKLDNN_TESTS) << "compare value size: " << ref.outValues.size();
for (size_t i = 0; i < ref.outValues.size(); i++) { for (size_t i = 0; i < ref.outValues.size(); i++) {
VLOG(MKLDNN_TESTS) << "compare value index: " << i;
EXPECT_LE(fabs(compareMatrix(ref.outValues[i], dnn.outValues[i])), eps); EXPECT_LE(fabs(compareMatrix(ref.outValues[i], dnn.outValues[i])), eps);
} }
VLOG(MKLDNN_TESTS) << "compare param size: " << ref.outValues.size();
for (size_t i = 0; i < ref.paraValues.size(); i++) { for (size_t i = 0; i < ref.paraValues.size(); i++) {
VLOG(MKLDNN_TESTS) << "compare param index: " << i;
EXPECT_LE(fabs(compareVector(ref.paraValues[i], dnn.paraValues[i])), eps); EXPECT_LE(fabs(compareVector(ref.paraValues[i], dnn.paraValues[i])), eps);
} }
} }
......
...@@ -128,13 +128,13 @@ private: ...@@ -128,13 +128,13 @@ private:
/** /**
* Get delta percent * Get delta percent
* if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the * if many(>failRate) wrong(abs(val-ref)/abs(ref) > thres) points
* max(diff/ref) * return the max(diff/ref)
* else return sum(abs(a-b)) / sum(abs(b)) * else return sum(abs(diff)) / sum(abs(ref))
* The return value should be smaller than eps when passing. * The return value should be smaller than eps when passing.
*/ */
static double getDelta(const real* d1, static double getDelta(const real* refer,
const real* d2, const real* value,
size_t len, size_t len,
const float failRate = 1e-3, const float failRate = 1e-3,
const float thres = 0.1); const float thres = 0.1);
......
...@@ -234,8 +234,7 @@ static void getMKLDNNBatchNormConfig(TestConfig& cfg, ...@@ -234,8 +234,7 @@ static void getMKLDNNBatchNormConfig(TestConfig& cfg,
cfg.inputDefs.push_back({INPUT_DATA, "layer_2_moving_var", 1, size_t(pm.ic)}); cfg.inputDefs.push_back({INPUT_DATA, "layer_2_moving_var", 1, size_t(pm.ic)});
cfg.inputDefs.back().isStatic = true; cfg.inputDefs.back().isStatic = true;
LayerInputConfig* input = cfg.layerConfig.add_inputs(); LayerInputConfig* input = cfg.layerConfig.add_inputs();
// TODO(TJ): uncomment me when refine and support comparing all zeroes vector cfg.layerConfig.set_active_type("relu");
// cfg.layerConfig.set_active_type("relu");
cfg.layerConfig.add_inputs(); cfg.layerConfig.add_inputs();
cfg.layerConfig.add_inputs(); cfg.layerConfig.add_inputs();
ImageConfig* img_conf = input->mutable_image_conf(); ImageConfig* img_conf = input->mutable_image_conf();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册