提交 65a567b2 编写于 作者: L Liangliang He

Update test helper net to reference

上级 615ab78b
......@@ -125,7 +125,7 @@ void Benchmark::Register() {
}
void Benchmark::Run(int arg1, int arg2, int* run_count, double* run_seconds) {
static const int64_t kMinIters = 100;
static const int64_t kMinIters = 10;
static const int64_t kMaxIters = 1000000000;
static const double kMinTime = 0.5;
int64_t iters = kMinIters;
......
......@@ -11,7 +11,7 @@ class BatchNormOpTest : public OpsTestBase {};
TEST_F(BatchNormOpTest, Simple) {
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("BatchNorm", "BatchNormTest")
.Input("Input")
.Input("Scale")
......@@ -19,25 +19,25 @@ TEST_F(BatchNormOpTest, Simple) {
.Input("Mean")
.Input("Var")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add input data
net->AddInputFromArray<float>("Input", {1, 1, 6, 2},
net.AddInputFromArray<float>("Input", {1, 1, 6, 2},
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
net->AddInputFromArray<float>("Scale", {1}, {4.0f});
net->AddInputFromArray<float>("Offset", {1}, {2.0});
net->AddInputFromArray<float>("Mean", {1}, {10});
net->AddInputFromArray<float>("Var", {1}, {11.67f});
net.AddInputFromArray<float>("Scale", {1}, {4.0f});
net.AddInputFromArray<float>("Offset", {1}, {2.0});
net.AddInputFromArray<float>("Mean", {1}, {10});
net.AddInputFromArray<float>("Var", {1}, {11.67f});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 1, 6, 2},
{-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.01);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.01);
}
}
......@@ -12,73 +12,73 @@ class Conv2dOpTest : public OpsTestBase {};
TEST_F(Conv2dOpTest, Simple_VALID) {
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("Conv2d", "Conv2dTest")
.Input("Input")
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add args
net->AddIntsArg("strides", {1, 1});
net->AddIntArg("padding", Padding::VALID);
net->AddIntsArg("dilations", {1, 1});
net.AddIntsArg("strides", {1, 1});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
// Add input data
net->AddInputFromArray<float>("Input", {1, 2, 3, 3},
net.AddInputFromArray<float>("Input", {1, 2, 3, 3},
{1, 1, 1,
1, 1, 1,
1, 1, 1,
1, 1, 1,
1, 1, 1,
1, 1, 1});
net->AddInputFromArray<float>("Filter", {1, 2, 3, 3},
net.AddInputFromArray<float>("Filter", {1, 2, 3, 3},
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f});
net->AddInputFromArray<float>("Bias", {1}, {0.1f});
net.AddInputFromArray<float>("Bias", {1}, {0.1f});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 1, 1, 1}, {18.1f});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
TEST_F(Conv2dOpTest, Simple_SAME) {
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("Conv2d", "Conv2dTest")
.Input("Input")
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add args
net->AddIntsArg("strides", {1, 1});
net->AddIntArg("padding", Padding::SAME);
net->AddIntsArg("dilations", {1, 1});
net.AddIntsArg("strides", {1, 1});
net.AddIntArg("padding", Padding::SAME);
net.AddIntsArg("dilations", {1, 1});
// Add input data
net->AddInputFromArray<float>("Input", {1, 2, 3, 3},
net.AddInputFromArray<float>("Input", {1, 2, 3, 3},
{1, 1, 1,
1, 1, 1,
1, 1, 1,
1, 1, 1,
1, 1, 1,
1, 1, 1});
net->AddInputFromArray<float>("Filter", {1, 2, 3, 3},
net.AddInputFromArray<float>("Filter", {1, 2, 3, 3},
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f});
net->AddInputFromArray<float>("Bias", {1}, {0.1f});
net.AddInputFromArray<float>("Bias", {1}, {0.1f});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 1, 3, 3},
......@@ -86,26 +86,26 @@ TEST_F(Conv2dOpTest, Simple_SAME) {
12.1f, 18.1f, 12.1f,
8.1f, 12.1f, 8.1f});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
TEST_F(Conv2dOpTest, Combined) {
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("Conv2d", "Conv2dTest")
.Input("Input")
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add args
net->AddIntsArg("strides", {2, 2});
net->AddIntArg("padding", Padding::SAME);
net->AddIntsArg("dilations", {1, 1});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::SAME);
net.AddIntsArg("dilations", {1, 1});
// Add input data
net->AddInputFromArray<float>("Input", {1, 2, 5, 5},
net.AddInputFromArray<float>("Input", {1, 2, 5, 5},
{1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
......@@ -116,15 +116,15 @@ TEST_F(Conv2dOpTest, Combined) {
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1});
net->AddInputFromArray<float>("Filter", {2, 2, 3, 3},
net.AddInputFromArray<float>("Filter", {2, 2, 3, 3},
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f});
net->AddInputFromArray<float>("Bias", {2}, {0.1f, 0.2f});
net.AddInputFromArray<float>("Bias", {2}, {0.1f, 0.2f});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 2, 3, 3},
......@@ -136,26 +136,26 @@ TEST_F(Conv2dOpTest, Combined) {
4.2f, 6.2f, 4.2f});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
TEST_F(Conv2dOpTest, Conv1x1) {
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("Conv2d", "Conv2dTest")
.Input("Input")
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add args
net->AddIntsArg("strides", {1, 1});
net->AddIntArg("padding", Padding::VALID);
net->AddIntsArg("dilations", {1, 1});
net.AddIntsArg("strides", {1, 1});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
// Add input data
net->AddInputFromArray<float>("Input", {1, 5, 3, 10},
net.AddInputFromArray<float>("Input", {1, 5, 3, 10},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
......@@ -171,13 +171,13 @@ TEST_F(Conv2dOpTest, Conv1x1) {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1});
net->AddInputFromArray<float>("Filter", {2, 5, 1, 1},
net.AddInputFromArray<float>("Filter", {2, 5, 1, 1},
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
2.0f, 2.0f, 2.0f, 2.0f, 2.0f});
net->AddInputFromArray<float>("Bias", {2}, {0.1f, 0.2f});
net.AddInputFromArray<float>("Bias", {2}, {0.1f, 0.2f});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 2, 3, 10},
......@@ -188,7 +188,7 @@ TEST_F(Conv2dOpTest, Conv1x1) {
10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f,
10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f, 10.2f});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
// TODO we need more tests
......@@ -136,7 +136,7 @@ class OpsTestNet {
class OpsTestBase : public ::testing::Test {
public:
OpsTestNet* test_net() { return &test_net_; };
OpsTestNet& test_net() { return test_net_; };
protected:
virtual void TearDown() {
......
......@@ -15,21 +15,21 @@ class PoolingOpTest : public OpsTestBase {};
TEST_F(PoolingOpTest, MAX_VALID) {
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add args
net->AddIntsArg("kernels", {2, 2});
net->AddIntsArg("strides", {2, 2});
net->AddIntArg("padding", Padding::VALID);
net->AddIntsArg("dilations", {1, 1});
net->AddIntArg("pooling_type", PoolingType::MAX);
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
net.AddIntArg("pooling_type", PoolingType::MAX);
// Add input data
net->AddInputFromArray<float>("Input", {1, 2, 4, 4},
net.AddInputFromArray<float>("Input", {1, 2, 4, 4},
{0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
......@@ -40,33 +40,33 @@ TEST_F(PoolingOpTest, MAX_VALID) {
28, 29, 30, 31});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 2, 2, 2},
{5, 7, 13, 15, 21, 23, 29, 31});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
TEST_F(PoolingOpTest, AVG_VALID) {
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add args
net->AddIntsArg("kernels", {2, 2});
net->AddIntsArg("strides", {2, 2});
net->AddIntArg("padding", Padding::VALID);
net->AddIntsArg("dilations", {1, 1});
net->AddIntArg("pooling_type", PoolingType::AVG);
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
net.AddIntArg("pooling_type", PoolingType::AVG);
// Add input data
net->AddInputFromArray<float>("Input", {1, 2, 4, 4},
net.AddInputFromArray<float>("Input", {1, 2, 4, 4},
{0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
......@@ -77,74 +77,74 @@ TEST_F(PoolingOpTest, AVG_VALID) {
28, 29, 30, 31});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 2, 2, 2},
{2.5, 4.5, 10.5, 12.5, 18.5, 20.5, 26.5, 28.5});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
TEST_F(PoolingOpTest, MAX_SAME) {
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add args
net->AddIntsArg("kernels", {2, 2});
net->AddIntsArg("strides", {2, 2});
net->AddIntArg("padding", Padding::SAME);
net->AddIntsArg("dilations", {1, 1});
net->AddIntArg("pooling_type", PoolingType::MAX);
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::SAME);
net.AddIntsArg("dilations", {1, 1});
net.AddIntArg("pooling_type", PoolingType::MAX);
// Add input data
net->AddInputFromArray<float>("Input", {1, 1, 3, 3},
net.AddInputFromArray<float>("Input", {1, 1, 3, 3},
{0, 1, 2,
3, 4, 5,
6, 7, 8});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 1, 2, 2},
{4, 5, 7, 8});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
TEST_F(PoolingOpTest, MAX_VALID_DILATION) {
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add args
net->AddIntsArg("kernels", {2, 2});
net->AddIntsArg("strides", {1, 1});
net->AddIntArg("padding", Padding::VALID);
net->AddIntsArg("dilations", {2, 2});
net->AddIntArg("pooling_type", PoolingType::MAX);
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {1, 1});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {2, 2});
net.AddIntArg("pooling_type", PoolingType::MAX);
// Add input data
net->AddInputFromArray<float>("Input", {1, 1, 4, 4},
net.AddInputFromArray<float>("Input", {1, 1, 4, 4},
{0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 1, 2, 2},
{10, 11, 14, 15});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
......@@ -13,51 +13,51 @@ class ResizeBilinearTest : public OpsTestBase {};
TEST_F(ResizeBilinearTest, ResizeBilinearWOAlignCorners) {
testing::internal::LogToStderr();
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("ResizeBilinear", "ResizeBilinearTest")
.Input("Input")
.Input("OutSize")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
// Add input data
vector<float> input(24);
std::iota(begin(input), end(input), 0);
net->AddInputFromArray<float>("Input", {1, 3, 2, 4}, input);
net->AddInputFromArray<index_t>("OutSize", {2}, {1, 2});
net.AddInputFromArray<float>("Input", {1, 3, 2, 4}, input);
net.AddInputFromArray<index_t>("OutSize", {2}, {1, 2});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 3, 1, 2}, {0, 2, 8, 10, 16, 18});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
TEST_F(ResizeBilinearTest, ResizeBilinearWAlignCorners) {
testing::internal::LogToStderr();
// Construct graph
auto net = test_net();
auto& net = test_net();
OpDefBuilder("ResizeBilinear", "ResizeBilinearTest")
.Input("Input")
.Input("OutSize")
.Output("Output")
.Finalize(net->operator_def());
.Finalize(net.operator_def());
net->AddIntArg("align_corners", 1);
net.AddIntArg("align_corners", 1);
// Add input data
vector<float> input(24);
std::iota(begin(input), end(input), 0);
net->AddInputFromArray<float>("Input", {1, 3, 2, 4}, input);
net->AddInputFromArray<index_t>("OutSize", {2}, {1, 2});
net.AddInputFromArray<float>("Input", {1, 3, 2, 4}, input);
net.AddInputFromArray<index_t>("OutSize", {2}, {1, 2});
// Run
net->RunOp();
net.RunOp();
// Check
Tensor expected = CreateTensor<float>({1, 3, 1, 2}, {0, 3, 8, 11, 16, 19});
ExpectTensorNear<float>(expected, *net->GetOutput("Output"), 0.001);
ExpectTensorNear<float>(expected, *net.GetOutput("Output"), 0.001);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册