提交 e581e3cf 编写于 作者: L Liangliang He

Merge branch 'relative-error' into 'master'

Replace abs error with relative error for ops test.

See merge request !383
......@@ -42,28 +42,60 @@ template <DeviceType D>
void SimpleAdd3() {
// Construct graph
OpsTestNet net;
// Add input data
net.AddInputFromArray<D, float>("Input0", {1, 2, 3, 1},
{-2.06715, 2, 3, 4, 5, 6});
net.AddInputFromArray<D, float>("Input1", {1, 2, 3, 1},
{0.875977, 2, 3, 4, 5, 6});
net.AddInputFromArray<D, float>("Input2", {1, 2, 3, 1},
{1.34866, 2, 3, 4, 5, 6});
net.AddInputFromArray<D, float>("Input3", {1, 2, 3, 1},
{-0.1582, 2, 3, 4, 5, 6});
const int input_num = 4;
if (D == DeviceType::OPENCL) {
// run on gpu
for (int i = 0; i < input_num; ++i) {
BufferToImage<D, half>(&net, MakeString("Input", i),
MakeString("InputImage", i),
kernels::BufferType::IN_OUT_CHANNEL);
}
auto op_def_cl = OpDefBuilder("AddN", "AddNTest");
for (int i = 0; i < input_num; ++i) {
op_def_cl.Input(MakeString("InputImage", i));
}
op_def_cl.Output("OutputImage")
.AddIntArg("T", static_cast<int>(DataType::DT_HALF))
.Finalize(net.NewOperatorDef());
// Run on device
net.RunOp(D);
ImageToBuffer<D, float>(&net, "OutputImage", "Output",
kernels::BufferType::IN_OUT_CHANNEL);
} else {
OpDefBuilder("AddN", "AddNTest")
.Input("Input0")
.Input("Input1")
.Input("Input2")
.Input("Input3")
.Output("Output")
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<D, float>("Input1", {1, 2, 3, 1}, {1, 2, 3, 4, 5, 6});
net.AddInputFromArray<D, float>("Input2", {1, 2, 3, 1}, {1, 2, 3, 4, 5, 6});
net.AddInputFromArray<D, float>("Input3", {1, 2, 3, 1}, {1, 2, 3, 4, 5, 6});
// Run
net.RunOp(D);
}
auto expected = CreateTensor<float>({1, 2, 3, 1}, {3, 6, 9, 12, 15, 18});
auto expected = CreateTensor<float>({1, 2, 3, 1},
{-0.000713, 8, 12, 16, 20, 24});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-4, 1e-3);
}
} // namespace
TEST_F(AddnOpTest, CPUSimpleAdd3) { SimpleAdd3<DeviceType::CPU>(); }
TEST_F(AddnOpTest, GPUSimpleAdd3) { SimpleAdd3<DeviceType::OPENCL>(); }
namespace {
template <DeviceType D>
......@@ -118,7 +150,8 @@ void RandomTest() {
ImageToBuffer<D, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.1);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-2, 1e-2);
}
}
} // namespace
......
......@@ -67,10 +67,11 @@ void Simple() {
// Check
auto expected =
CreateTensor<float>({1, 6, 2, 1}, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
CreateTensor<float>({1, 6, 2, 1}, {-3.8543, -3.8543, -1.5125, -1.5125,
0.8291, 0.8291, 3.1708, 3.1708,
5.5125, 5.5125, 7.8543, 7.8543});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-2);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-4);
}
} // namespace
......@@ -81,8 +82,8 @@ TEST_F(BatchNormOpTest, SimpleOPENCL) { Simple<DeviceType::OPENCL>(); }
TEST_F(BatchNormOpTest, SimpleRandomOPENCL) {
// generate random input
static unsigned int seed = time(NULL);
index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t batch = 1 + rand_r(&seed) % 5;
index_t channels = 3 + rand_r(&seed) % 25;
index_t height = 64;
index_t width = 64;
......@@ -146,7 +147,7 @@ TEST_F(BatchNormOpTest, SimpleRandomOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-5, 1e-4);
}
TEST_F(BatchNormOpTest, SimpleRandomHalfOPENCL) {
......@@ -218,14 +219,14 @@ TEST_F(BatchNormOpTest, SimpleRandomHalfOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.5);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2, 1e-2);
}
TEST_F(BatchNormOpTest, ComplexRandomOPENCL) {
// generate random input
static unsigned int seed = time(NULL);
index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t batch = 1 + rand_r(&seed) % 5;
index_t channels = 3 + rand_r(&seed) % 25;
index_t height = 103;
index_t width = 113;
......@@ -289,14 +290,14 @@ TEST_F(BatchNormOpTest, ComplexRandomOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-5, 1e-4);
}
TEST_F(BatchNormOpTest, ComplexRandomHalfOPENCL) {
// generate random input
static unsigned int seed = time(NULL);
index_t batch = 1 + rand_r(&seed) % 10;
index_t channels = 3 + rand_r(&seed) % 50;
index_t batch = 1 + rand_r(&seed) % 5;
index_t channels = 3 + rand_r(&seed) % 25;
index_t height = 103;
index_t width = 113;
......@@ -361,7 +362,7 @@ TEST_F(BatchNormOpTest, ComplexRandomHalfOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.5);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2, 1e-2);
}
TEST_F(BatchNormOpTest, NEONTest) {
......@@ -418,7 +419,7 @@ TEST_F(BatchNormOpTest, NEONTest) {
ExpectTensorNear<float>(*net.GetOutput("OutputExptected"),
*net.GetOutput("OutputNeon"),
0.001);
1e-5);
}
} // namespace test
......
......@@ -53,7 +53,7 @@ void BiasAddSimple() {
{1, 6, 2, 1},
{5.5, 5.5, 7.5, 7.5, 9.5, 9.5, 11.5, 11.5, 13.5, 13.5, 15.5, 15.5});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-2);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -109,7 +109,7 @@ TEST_F(BiasAddOpTest, SimpleRandomOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-5);
}
TEST_F(BiasAddOpTest, ComplexRandomOPENCL) {
......@@ -158,7 +158,7 @@ TEST_F(BiasAddOpTest, ComplexRandomOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-5);
}
} // namespace test
......
......@@ -143,7 +143,7 @@ void TestDiffTypeBidirectionTransform(const int type,
// Check
ExpectTensorNear<float>(*net.GetOutput("Input"), *net.GetOutput("I2BOutput"),
1e-2);
1e-3, 1e-6);
}
} // namespace
......@@ -185,7 +185,7 @@ void TestStringHalfBidirectionTransform(const int type,
// Check
ExpectTensorNear<half>(*net.GetOutput("Input"), *net.GetOutput("I2BOutput"),
1e-2);
1e-3, 1e-6);
}
} // namespace
......
......@@ -32,7 +32,7 @@ TEST_F(ChannelShuffleOpTest, C8G4_CPU) {
auto expected = CreateTensor<float>(
{1, 1, 2, 8}, {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
TEST_F(ChannelShuffleOpTest, C16G4_OPENCL) {
......@@ -66,7 +66,7 @@ TEST_F(ChannelShuffleOpTest, C16G4_OPENCL) {
{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19, 23, 27, 31});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace test
......
......@@ -68,7 +68,7 @@ void TestNHWCSimple3x3VALID() {
}
auto expected = CreateTensor<float>({1, 1, 1, 1}, {18.1f});
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 0.01);
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 1e-5);
}
template<DeviceType D, typename T>
......@@ -128,7 +128,7 @@ void TestNHWCSimple3x3SAME() {
{1, 3, 3, 1},
{8.1f, 12.1f, 8.1f, 12.1f, 18.1f, 12.1f, 8.1f, 12.1f, 8.1f});
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 0.01);
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -194,7 +194,7 @@ void TestNHWCSimple3x3WithoutBias() {
// Check
auto expected = CreateTensor<float>({1, 1, 1, 1}, {18.0f});
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 0.01);
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -266,7 +266,7 @@ void TestNHWCCombined3x3() {
auto expected = CreateTensor<float>(
{1, 3, 3, 2}, {8.1f, 4.2f, 12.1f, 6.2f, 8.1f, 4.2f, 12.1f, 6.2f, 18.1f,
9.2f, 12.1f, 6.2f, 8.1f, 4.2f, 12.1f, 6.2f, 8.1f, 4.2f});
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 0.01);
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -345,7 +345,7 @@ void TestConv1x1() {
5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f,
5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -415,7 +415,8 @@ void TestComplexConvNxNS12(const std::vector<index_t> &shape,
ImageToBuffer<D, T>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.001);
ExpectTensorNear<float>(expected,
*net.GetOutput("OPENCLOutput"), 1e-5, 1e-4);
};
for (int kernel_size : {1, 3, 7}) {
......@@ -516,7 +517,8 @@ void TestHalfComplexConvNxNS12(const std::vector<index_t> &input_shape,
ImageToBuffer<D, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.5);
ExpectTensorNear<float>(expected,
*net.GetOutput("OPENCLOutput"), 1e-2, 1e-1);
};
func(1, 1, VALID);
......@@ -641,7 +643,8 @@ void TestDilationConvNxN(const std::vector<index_t> &shape,
ImageToBuffer<D, T>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-5, 1e-4);
};
for (int kernel_size : {3}) {
......@@ -725,7 +728,8 @@ void TestArbitraryPadConvNxN(const std::vector<index_t> &shape,
ImageToBuffer<D, T>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-5, 1e-4);
};
for (int kernel_size : {3, 5}) {
......@@ -807,7 +811,7 @@ static void TestNeonArbitraryPadConvNxN(const std::vector<index_t> &shape,
ExpectTensorNear<float>(*net.GetOutput("OutputExptected"),
*net.GetOutput("OutputNeon"),
0.001);
1e-5, 1e-3);
};
for (int kernel_size : {1, 3, 5}) {
......
......@@ -54,7 +54,7 @@ void Simple(const kernels::CWiseType type,
auto expected = CreateTensor<float>(shape, output);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-3);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5, 1e-3);
}
} // namespace
......@@ -140,10 +140,10 @@ void RandomTest(const kernels::CWiseType type,
if (DataTypeToEnum<T>::value == DT_FLOAT) {
ExpectTensorNear<float>(*net.GetTensor("Output"),
*net.GetOutput("OPENCLOutput"), 1e-3);
*net.GetOutput("OPENCLOutput"), 1e-5, 1e-4);
} else {
ExpectTensorNear<float>(*net.GetTensor("Output"),
*net.GetOutput("OPENCLOutput"), 1e-1);
*net.GetOutput("OPENCLOutput"), 1e-2, 1e-2);
}
}
} // namespace
......
......@@ -48,7 +48,7 @@ void RunDepthToSpace(const bool d2s,
kernels::BufferType::IN_OUT_CHANNEL);
}
auto expected = CreateTensor<float>(expected_shape, expected_data);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -194,10 +194,10 @@ void RandomTest(const bool d2s, const int block_size,
if (DataTypeToEnum<T>::value == DT_FLOAT) {
ExpectTensorNear<float>(*net.GetTensor("Output"),
*net.GetOutput("OPENCLOutput"), 1e-3);
*net.GetOutput("OPENCLOutput"), 1e-5);
} else {
ExpectTensorNear<float>(*net.GetTensor("Output"),
*net.GetOutput("OPENCLOutput"), 1e-1);
*net.GetOutput("OPENCLOutput"), 1e-3, 1e-4);
}
}
} // namespace
......
......@@ -46,7 +46,7 @@ void SimpleValidTest() {
net.RunOp(D);
// Transfer output
ImageToBuffer<D, T>(&net, "OutputImage", "Output",
ImageToBuffer<D, float>(&net, "OutputImage", "Output",
kernels::BufferType::IN_OUT_CHANNEL);
} else {
......@@ -64,11 +64,15 @@ void SimpleValidTest() {
}
// Check
auto expected = CreateTensor<T>(
{1, 2, 2, 2}, VectorStaticCast<T>({37.1f, 148.2f, 47.1f, 188.2f, 67.1f,
268.2f, 77.1f, 308.2f}));
auto expected = CreateTensor<float>(
{1, 2, 2, 2}, {37.1f, 148.2f, 47.1f, 188.2f,
67.1f, 268.2f, 77.1f, 308.2f});
ExpectTensorNear<T>(*expected, *net.GetOutput("Output"), 1e-5);
if (DataTypeToEnum<T>::value == DT_HALF) {
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-3, 1e-3);
} else {
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
}
} // namespace
......@@ -189,7 +193,11 @@ void ComplexValidTest() {
9.13650036, 8.5095005, 8.92500019, 9.34349918, 8.69849968,
9.12300014, 9.55049992, 4.55220032, 4.80690002, 5.06340027}));
ExpectTensorNear<T>(*expected, *net.GetOutput("Output"), 0.2);
if (DataTypeToEnum<T>::value == DT_FLOAT) {
ExpectTensorNear<T>(*expected, *net.GetOutput("Output"), 1e-5);
} else {
ExpectTensorNear<T>(*expected, *net.GetOutput("Output"), 1e-2);
}
}
} // namespace
......@@ -282,7 +290,13 @@ void TestNxNS12(const index_t height, const index_t width) {
}
// Check
ExpectTensorNear<float>(expected, *net.GetOutput("DeviceOutput"), 0.1);
if (DataTypeToEnum<T>::value == DT_FLOAT) {
ExpectTensorNear<float>(expected, *net.GetOutput("DeviceOutput"),
1e-5, 1e-4);
} else {
ExpectTensorNear<float>(expected, *net.GetOutput("DeviceOutput"),
1e-2, 1e-2);
}
};
for (int kernel_size : {2, 3, 4}) {
......@@ -303,12 +317,10 @@ TEST_F(DepthwiseConv2dOpTest, OpenCLSimpleNxNS12Half) {
}
TEST_F(DepthwiseConv2dOpTest, OpenCLAlignedNxNS12) {
TestNxNS12<DeviceType::OPENCL, float>(64, 64);
TestNxNS12<DeviceType::OPENCL, float>(128, 128);
}
TEST_F(DepthwiseConv2dOpTest, OpenCLAlignedNxNS12Half) {
TestNxNS12<DeviceType::OPENCL, half>(64, 64);
TestNxNS12<DeviceType::OPENCL, half>(128, 128);
}
......@@ -380,7 +392,7 @@ void TestNEONNxNS12(const index_t height,
// Check
ExpectTensorNear<float>(*net.GetOutput("OutputExptected"),
*net.GetOutput("OutputNeon"),
0.001);
1e-5, 1e-3);
};
for (int kernel_size : {1, 3, 5}) {
......
......@@ -60,7 +60,7 @@ void Simple(const kernels::EltwiseType type,
auto expected = CreateTensor<float>(shape, output);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-3);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -146,10 +146,10 @@ void RandomTest(const kernels::EltwiseType type,
if (DataTypeToEnum<T>::value == DT_FLOAT) {
ExpectTensorNear<float>(*net.GetTensor("Output"),
*net.GetOutput("OPENCLOutput"), 1e-3);
*net.GetOutput("OPENCLOutput"), 1e-5, 1e-4);
} else {
ExpectTensorNear<float>(*net.GetTensor("Output"),
*net.GetOutput("OPENCLOutput"), 1e-1);
*net.GetOutput("OPENCLOutput"), 1e-2, 1e-2);
}
}
} // namespace
......
......@@ -72,10 +72,11 @@ void Simple() {
// Check
auto expected =
CreateTensor<float>({1, 6, 2, 1}, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
CreateTensor<float>({1, 6, 2, 1}, {-3.8543, -3.8543, -1.5125, -1.5125,
0.8291, 0.8291, 3.1708, 3.1708,
5.5125, 5.5125, 7.8543, 7.8543});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-2);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-4);
}
} // namespace
......@@ -226,7 +227,7 @@ TEST_F(FoldedBatchNormOpTest, SimpleRandomOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-5, 1e-4);
}
TEST_F(FoldedBatchNormOpTest, SimpleRandomHalfOPENCL) {
......@@ -281,7 +282,7 @@ TEST_F(FoldedBatchNormOpTest, SimpleRandomHalfOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.5);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2, 1e-2);
}
TEST_F(FoldedBatchNormOpTest, ComplexRandomOPENCL) {
......@@ -334,7 +335,7 @@ TEST_F(FoldedBatchNormOpTest, ComplexRandomOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-5, 1e-4);
}
TEST_F(FoldedBatchNormOpTest, ComplexRandomHalfOPENCL) {
......@@ -388,7 +389,7 @@ TEST_F(FoldedBatchNormOpTest, ComplexRandomHalfOPENCL) {
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.5);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2, 1e-2);
}
} // namespace test
......
......@@ -164,9 +164,11 @@ void Complex(const index_t batch,
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
if (DataTypeToEnum<T>::value == DataType::DT_HALF) {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-1, 1e-1);
} else {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-3);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-5, 1e-4);
}
}
} // namespace
......@@ -247,9 +249,11 @@ void TestWXFormat(const index_t batch,
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
if (DataTypeToEnum<T>::value == DataType::DT_HALF) {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-1, 1e-1);
} else {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-2);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-3, 1e-3);
}
}
} // namespace
......@@ -267,7 +271,7 @@ TEST_F(FullyConnectedOpTest, OPENCLWidthFormatMultiBatch) {
}
TEST_F(FullyConnectedOpTest, OPENCLHalfWidthFormatAligned) {
TestWXFormat<float>(1, 2, 2, 512, 2);
TestWXFormat<half>(1, 2, 2, 512, 2);
TestWXFormat<half>(1, 11, 11, 32, 16);
TestWXFormat<half>(1, 16, 32, 32, 32);
}
......@@ -315,7 +319,7 @@ void FullyConnectedTestNEON(const index_t batch,
ExpectTensorNear<float>(*net.GetOutput("OutputExptected"),
*net.GetOutput("OutputNeon"),
0.01);
1e-3, 1e-3);
}
} // namespace
......
......@@ -18,14 +18,14 @@ template<DeviceType D, typename T>
void TestNHWCSimple3x3VALID() {
OpsTestNet net;
// Add input data
net.AddInputFromArray<D, T>(
net.AddInputFromArray<D, float>(
"Input", {1, 3, 3, 2},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1});
net.AddInputFromArray<D, T>(
net.AddInputFromArray<D, float>(
"Filter", {3, 3, 1, 2},
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f});
net.AddInputFromArray<D, T>("Bias", {1}, {-0.1f});
net.AddInputFromArray<D, float>("Bias", {1}, {-0.1f});
if (D == DeviceType::OPENCL) {
BufferToImage<D, T>(&net, "Input", "InputImage",
......@@ -49,7 +49,7 @@ void TestNHWCSimple3x3VALID() {
net.RunOp(D);
// Transfer output
ImageToBuffer<D, T>(&net, "OutputImage", "Output",
ImageToBuffer<D, float>(&net, "OutputImage", "Output",
kernels::BufferType::IN_OUT_CHANNEL);
} else {
......@@ -69,7 +69,7 @@ void TestNHWCSimple3x3VALID() {
}
auto expected = CreateTensor<float>({1, 1, 1, 1}, {0.0f});
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 0.01);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"));
}
template<DeviceType D, typename T>
......@@ -77,14 +77,14 @@ void TestNHWCSimple3x3SAME() {
OpsTestNet net;
// Add input data
net.AddInputFromArray<D, T>(
net.AddInputFromArray<D, float>(
"Input", {1, 3, 3, 2},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1});
net.AddInputFromArray<D, T>(
net.AddInputFromArray<D, float>(
"Filter", {3, 3, 1, 2},
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f});
net.AddInputFromArray<D, T>("Bias", {1}, {-0.1f});
net.AddInputFromArray<D, float>("Bias", {1}, {-0.1f});
if (D == DeviceType::OPENCL) {
BufferToImage<D, T>(&net, "Input", "InputImage",
......@@ -108,7 +108,7 @@ void TestNHWCSimple3x3SAME() {
net.RunOp(D);
// Transfer output
ImageToBuffer<D, T>(&net, "OutputImage", "Output",
ImageToBuffer<D, float>(&net, "OutputImage", "Output",
kernels::BufferType::IN_OUT_CHANNEL);
} else {
......@@ -130,7 +130,7 @@ void TestNHWCSimple3x3SAME() {
auto expected = CreateTensor<float>(
{1, 3, 3, 1}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f});
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 0.01);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"));
}
} // namespace
......@@ -150,10 +150,10 @@ void TestNHWCSimple3x3WithoutBias() {
OpsTestNet net;
// Add input data
net.AddInputFromArray<D, T>(
net.AddInputFromArray<D, float>(
"Input", {1, 3, 3, 2},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1});
net.AddInputFromArray<D, T>(
net.AddInputFromArray<D, float>(
"Filter", {3, 3, 1, 2},
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f});
......@@ -177,7 +177,7 @@ void TestNHWCSimple3x3WithoutBias() {
// Run
net.RunOp(D);
// Transfer output
ImageToBuffer<D, T>(&net, "OutputImage", "Output",
ImageToBuffer<D, float>(&net, "OutputImage", "Output",
kernels::BufferType::IN_OUT_CHANNEL);
} else {
OpDefBuilder("FusedConv2D", "FusedConv2dTest")
......@@ -198,7 +198,7 @@ void TestNHWCSimple3x3WithoutBias() {
// Check
auto expected = CreateTensor<float>({1, 1, 1, 1}, {0.0f});
ExpectTensorNear<float, T>(*expected, *net.GetOutput("Output"), 0.01);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"));
}
} // namespace
......@@ -277,7 +277,7 @@ void TestConv1x1() {
5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f,
5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f, 5.1f, 10.2f});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"));
}
} // namespace
......@@ -346,7 +346,8 @@ void TestComplexConvNxNS12(const std::vector<index_t> &shape) {
ImageToBuffer<D, T>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-5, 1e-4);
};
for (int kernel_size : {1, 3}) {
......@@ -434,7 +435,8 @@ void TestHalfComplexConvNxNS12(const std::vector<index_t> &shape) {
ImageToBuffer<D, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.2);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-2, 1e-1);
};
for (int kernel_size : {1, 3}) {
......@@ -513,7 +515,8 @@ void TestGeneralConvNxNS12(const std::vector<index_t> &image_shape,
ImageToBuffer<D, T>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-5, 1e-4);
};
for (int stride : {1, 2}) {
......@@ -594,7 +597,8 @@ void TestAtrousConvNxN(const std::vector<index_t> &shape,
ImageToBuffer<D, T>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-5, 1e-4);
};
for (int kernel_size : {3}) {
......@@ -683,7 +687,8 @@ void TestGeneralHalfAtrousConv(const std::vector<index_t> &image_shape,
ImageToBuffer<D, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.7);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-2, 1e-1);
};
func(1, 1, VALID);
......@@ -763,7 +768,7 @@ void TestNEONGeneralConvNxNS12(
ExpectTensorNear<float>(*net.GetOutput("OutputExptected"),
*net.GetOutput("OutputNeon"),
0.001);
1e-5, 1e-4);
};
for (int stride : {1, 2}) {
......
......@@ -31,7 +31,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_CPU) {
// Check
auto expected = CreateTensor<float>({1, 3, 1, 1}, {1, 2, 3});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace test
......
......@@ -148,9 +148,11 @@ void Complex(const index_t batch,
ImageToBuffer<DeviceType::OPENCL, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_HEIGHT);
if (DataTypeToEnum<T>::value == DataType::DT_HALF) {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-1);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-2, 1e-1);
} else {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-4);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-5, 1e-5);
}
}
} // namespace
......
......@@ -439,7 +439,9 @@ struct Expector<EXP_TYPE, RES_TYPE, true> {
}
}
static void Near(const Tensor &x, const Tensor &y, const double abs_err) {
static void Near(const Tensor &x, const Tensor &y,
const double rel_err,
const double abs_err) {
ASSERT_EQ(x.dtype(), DataTypeToEnum<EXP_TYPE>::v());
ASSERT_EQ(y.dtype(), DataTypeToEnum<RES_TYPE>::v());
AssertSameDims(x, y);
......@@ -452,7 +454,8 @@ struct Expector<EXP_TYPE, RES_TYPE, true> {
for (int h = 0; h < x.dim(1); ++h) {
for (int w = 0; w < x.dim(2); ++w) {
for (int c = 0; c < x.dim(3); ++c) {
EXPECT_NEAR(*a, *b, abs_err) << "with index = [" << n << ", " << h
const double error = abs_err + rel_err * std::abs(*a);
EXPECT_NEAR(*a, *b, error) << "with index = [" << n << ", " << h
<< ", " << w << ", " << c << "]";
a++;
b++;
......@@ -462,7 +465,8 @@ struct Expector<EXP_TYPE, RES_TYPE, true> {
}
} else {
for (int i = 0; i < x.size(); ++i) {
EXPECT_NEAR(a[i], b[i], abs_err) << "a = " << a << " b = " << b
const double error = abs_err + rel_err * std::abs(a[i]);
EXPECT_NEAR(a[i], b[i], error) << "a = " << a << " b = " << b
<< " index = " << i;
}
}
......@@ -470,18 +474,22 @@ struct Expector<EXP_TYPE, RES_TYPE, true> {
};
template<typename T>
void ExpectTensorNear(const Tensor &x, const Tensor &y, const double abs_err) {
void ExpectTensorNear(const Tensor &x, const Tensor &y,
const double rel_err = 1e-5,
const double abs_err = 1e-8) {
static_assert(is_floating_point_type<T>::value,
"T is not a floating point type");
Expector<T, T>::Near(x, y, abs_err);
Expector<T, T>::Near(x, y, rel_err, abs_err);
}
template<typename EXP_TYPE, typename RES_TYPE>
void ExpectTensorNear(const Tensor &x, const Tensor &y, const double abs_err) {
void ExpectTensorNear(const Tensor &x, const Tensor &y,
const double rel_err = 1e-5,
const double abs_err = 1e-8) {
static_assert(is_floating_point_type<EXP_TYPE>::value &&
is_floating_point_type<RES_TYPE>::value,
"T is not a floating point type");
Expector<EXP_TYPE, RES_TYPE>::Near(x, y, abs_err);
Expector<EXP_TYPE, RES_TYPE>::Near(x, y, rel_err, abs_err);
}
template<DeviceType D, typename T>
......
......@@ -137,7 +137,7 @@ void Complex(const std::vector<index_t> &input_shape,
auto output = net.GetTensor("OpenCLOutput");
if (DataTypeToEnum<T>::value == DT_HALF) {
ExpectTensorNear<float>(expected, *output, 1e-1);
ExpectTensorNear<float>(expected, *output, 1e-2, 1e-2);
} else {
ExpectTensorNear<float>(expected, *output, 1e-5);
}
......
......@@ -41,7 +41,7 @@ TEST_F(PoolingOpTest, MAX_VALID) {
auto expected =
CreateTensor<float>({1, 2, 2, 2}, {5, 21, 7, 23, 13, 29, 15, 31});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
TEST_F(PoolingOpTest, MAX_SAME) {
......@@ -67,7 +67,7 @@ TEST_F(PoolingOpTest, MAX_SAME) {
// Check
auto expected = CreateTensor<float>({1, 2, 2, 1}, {4, 5, 7, 8});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
TEST_F(PoolingOpTest, MAX_VALID_DILATION) {
......@@ -94,7 +94,7 @@ TEST_F(PoolingOpTest, MAX_VALID_DILATION) {
// Check
auto expected = CreateTensor<float>({1, 2, 2, 1}, {10, 11, 14, 15});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
TEST_F(PoolingOpTest, MAX_k2x2s2x2) {
......@@ -120,7 +120,7 @@ TEST_F(PoolingOpTest, MAX_k2x2s2x2) {
// Check
auto expected = CreateTensor<float>({1, 1, 5, 1}, {10, 12, 14, 16, 17});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
namespace {
......@@ -167,7 +167,7 @@ void SimpleMaxPooling3S2() {
// Check
auto expected = CreateTensor<float>({1, 1, 4, 1}, {20, 22, 24, 26});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -192,11 +192,10 @@ void MaxPooling3S2(const std::vector<index_t> &input_shape,
.AddIntsArg("strides", strides)
.AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1})
.AddIntArg("T", static_cast<int>(DataTypeToEnum<T>::value))
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<D, T>("Input", input_shape);
net.AddRandomInput<D, float>("Input", input_shape);
// run on cpu
net.RunOp();
......@@ -216,10 +215,15 @@ void MaxPooling3S2(const std::vector<index_t> &input_shape,
.AddIntArg("T", static_cast<int>(DataTypeToEnum<T>::value))
.Finalize(net.NewOperatorDef());
net.RunOp(D);
ImageToBuffer<D, T>(&net, "OutputImage", "OPENCLOutput",
ImageToBuffer<D, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<T>(expected, *net.GetOutput("OPENCLOutput"), 0.001);
if (DataTypeToEnum<T>::value == DT_HALF) {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-3, 1e-4);
} else {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-5);
}
}
} // namespace
......@@ -276,7 +280,7 @@ TEST_F(PoolingOpTest, AVG_VALID) {
auto expected = CreateTensor<float>(
{1, 2, 2, 2}, {2.5, 18.5, 4.5, 20.5, 10.5, 26.5, 12.5, 28.5});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
namespace {
......@@ -309,7 +313,7 @@ void SimpleAvgPoolingTest() {
// Check
auto expected = CreateTensor<float>({1, 1, 4, 1}, {4.5, 6.5, 8.5, 10.5});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -356,10 +360,17 @@ void AvgPoolingTest(const std::vector<index_t> &shape,
.AddIntArg("T", static_cast<int>(DataTypeToEnum<T>::value))
.Finalize(net.NewOperatorDef());
net.RunOp(D);
ImageToBuffer<D, T>(&net, "OutputImage", "OPENCLOutput",
ImageToBuffer<D, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float, T>(expected, *net.GetOutput("OPENCLOutput"), 0.01);
if (DataTypeToEnum<T>::value == DT_HALF) {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-3, 1e-3);
} else {
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-5);
}
}
} // namespace
......@@ -448,7 +459,7 @@ void AvgPoolingNEONTest(const std::vector<index_t> &shape,
ExpectTensorNear<float>(*net.GetOutput("OutputExptected"),
*net.GetOutput("OutputNeon"),
0.01);
1e-5, 1e-4);
}
} // namespace
......
......@@ -35,7 +35,7 @@ TEST_F(ResizeBilinearTest, CPUResizeBilinearWOAlignCorners) {
// Check
auto expected = CreateTensor<float>({1, 1, 2, 3}, {0, 1, 2, 6, 7, 8});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
TEST_F(ResizeBilinearTest, ResizeBilinearWAlignCorners) {
......@@ -60,7 +60,7 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWAlignCorners) {
// Check
auto expected = CreateTensor<float>({1, 1, 2, 3}, {0, 1, 2, 9, 10, 11});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 0.001);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
namespace {
......@@ -113,7 +113,8 @@ void TestRandomResizeBilinear() {
// TODO(someone): support NEON
}
// Check
ExpectTensorNear<float>(expected, *net.GetOutput("DeviceOutput"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("DeviceOutput"),
1e-5, 1e-6);
}
}
} // namespace
......
......@@ -49,7 +49,7 @@ void Simple() {
{1, 1, 2, 4},
{0.25, 0.25, 0.25, 0.25, 0.0320586, 0.08714432, 0.23688282, 0.64391426});
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-7);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
} // namespace
......@@ -89,7 +89,8 @@ void Complex(const std::vector<index_t> &logits_shape) {
ImageToBuffer<D, float>(&net, "OutputImage", "OPENCLOutput",
kernels::BufferType::IN_OUT_CHANNEL);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 1e-5);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"),
1e-5);
}
} // namespace
......@@ -138,7 +139,7 @@ void SoftMaxNEONTest(const std::vector<index_t> &logits_shape) {
ExpectTensorNear<float>(*net.GetOutput("OutputExptected"),
*net.GetOutput("OutputNeon"),
0.01);
1e-5, 1e-5);
}
} // namespace
......
......@@ -36,7 +36,7 @@ void RunSpaceToBatch(const std::vector<index_t> &input_shape,
ImageToBuffer<D, float>(&net, "OutputImage", "Output",
kernels::BufferType::IN_OUT_CHANNEL);
// Check
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-8);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"));
}
template <DeviceType D>
......@@ -64,7 +64,7 @@ void RunBatchToSpace(const std::vector<index_t> &input_shape,
ImageToBuffer<D, float>(&net, "OutputImage", "Output",
kernels::BufferType::IN_OUT_CHANNEL);
// Check
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-8);
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"));
}
template <typename T>
......
......@@ -30,8 +30,7 @@ void TransposeNCHWTest(const std::vector<index_t> &input_shape) {
net.FillNHWCInputToNCHWInput<DeviceType::CPU, float>("InputNCHW", "Input");
ExpectTensorNear<float>(*net.GetOutput("InputNCHW"),
*net.GetOutput("Output"),
0.01);
*net.GetOutput("Output"));
}
} // namespace
......
......@@ -127,9 +127,11 @@ void WinogradConvolution(const index_t batch,
ImageToBuffer<D, float>(&net, "WinoOutputImage", "WinoOutput",
kernels::BufferType::IN_OUT_CHANNEL);
if (DataTypeToEnum<T>::value == DataType::DT_HALF) {
ExpectTensorNear<float>(expected, *net.GetOutput("WinoOutput"), 1e-1);
ExpectTensorNear<float>(expected, *net.GetOutput("WinoOutput"),
1e-2, 1e-2);
} else {
ExpectTensorNear<float>(expected, *net.GetOutput("WinoOutput"), 1e-4);
ExpectTensorNear<float>(expected, *net.GetOutput("WinoOutput"),
1e-5, 1e-4);
}
}
} // namespace
......@@ -246,9 +248,11 @@ void WinogradConvolutionWithPad(const index_t batch,
ImageToBuffer<D, float>(&net, "WinoOutputImage", "WinoOutput",
kernels::BufferType::IN_OUT_CHANNEL);
if (DataTypeToEnum<T>::value == DataType::DT_HALF) {
ExpectTensorNear<float>(expected, *net.GetOutput("WinoOutput"), 1e-1);
ExpectTensorNear<float>(expected, *net.GetOutput("WinoOutput"),
1e-2, 1e-2);
} else {
ExpectTensorNear<float>(expected, *net.GetOutput("WinoOutput"), 1e-3);
ExpectTensorNear<float>(expected, *net.GetOutput("WinoOutput"),
1e-5, 1e-4);
}
}
} // namespace
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册