提交 56f6e231 编写于 作者: T tensor-tang

refine mkldnntester, support comparing values near zero

上级 0049ce04
......@@ -273,31 +273,37 @@ void MKLDNNTester::printVector(const VectorPtr& v) {
VLOG(MKLDNN_ALL) << std::endl << ostr.str();
}
double MKLDNNTester::getDelta(const real* d1,
const real* d2,
double MKLDNNTester::getDelta(const real* refer,
const real* value,
size_t len,
const float failRate,
const float thres) {
double delta = 0, sum = 0;
int failCnt = 0;
const double eps = 1e-5;
double maxOut = 0;
double maxRatio = 0;
for (size_t i = 0; i < len; ++i) {
double ref = fabs(d2[i]);
double diff = fabs(d1[i] - d2[i]);
double ref = fabs(refer[i]);
double val = fabs(value[i]);
double diff = fabs(refer[i] - value[i]);
delta += diff;
sum += ref;
if (ref > eps && fabs(d1[i]) > eps && diff / ref > thres) {
maxOut = std::max(maxOut, diff / ref);
if (ref < eps && val < eps) { // both values are very small
continue;
}
double ratio = diff / ref;
if (ratio > thres) {
maxRatio = std::max(maxRatio, ratio);
failCnt++;
}
}
EXPECT_TRUE(std::isnormal(sum));
EXPECT_FALSE(std::isinf(sum));
EXPECT_FALSE(std::isnan(sum));
EXPECT_FALSE(std::isnan(delta));
VLOG(MKLDNN_ALL) << "reference avg data: " << sum / len
<< ", delta: " << delta / sum << ", failCnt:" << failCnt;
return (failCnt / (float)len) > failRate ? maxOut : delta / sum;
double res = sum > eps ? delta / sum : eps;
return (failCnt / (float)len) > failRate ? maxRatio : res;
}
double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) {
......@@ -543,12 +549,12 @@ void MKLDNNTester::getOutResult(const std::string& configPath,
void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) {
CHECK_EQ(ref.outValues.size(), dnn.outValues.size());
CHECK_EQ(ref.paraValues.size(), dnn.paraValues.size());
VLOG(MKLDNN_TESTS) << "compare value size: " << ref.outValues.size();
for (size_t i = 0; i < ref.outValues.size(); i++) {
VLOG(MKLDNN_TESTS) << "compare value index: " << i;
EXPECT_LE(fabs(compareMatrix(ref.outValues[i], dnn.outValues[i])), eps);
}
VLOG(MKLDNN_TESTS) << "compare param size: " << ref.outValues.size();
for (size_t i = 0; i < ref.paraValues.size(); i++) {
VLOG(MKLDNN_TESTS) << "compare param index: " << i;
EXPECT_LE(fabs(compareVector(ref.paraValues[i], dnn.paraValues[i])), eps);
}
}
......
......@@ -128,13 +128,13 @@ private:
/**
* Get delta percent
* if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the
* max(diff/ref)
* else return sum(abs(a-b)) / sum(abs(b))
* if many(>failRate) wrong(abs(val-ref)/abs(ref) > thres) points
* return the max(diff/ref)
* else return sum(abs(diff)) / sum(abs(ref))
* The return value should be smaller than eps when passing.
*/
static double getDelta(const real* d1,
const real* d2,
static double getDelta(const real* refer,
const real* value,
size_t len,
const float failRate = 1e-3,
const float thres = 0.1);
......
......@@ -234,8 +234,7 @@ static void getMKLDNNBatchNormConfig(TestConfig& cfg,
cfg.inputDefs.push_back({INPUT_DATA, "layer_2_moving_var", 1, size_t(pm.ic)});
cfg.inputDefs.back().isStatic = true;
LayerInputConfig* input = cfg.layerConfig.add_inputs();
// TODO(TJ): uncomment me when refine and support comparing all zeroes vector
// cfg.layerConfig.set_active_type("relu");
cfg.layerConfig.set_active_type("relu");
cfg.layerConfig.add_inputs();
cfg.layerConfig.add_inputs();
ImageConfig* img_conf = input->mutable_image_conf();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册