提交 29b97706 编写于 作者: L Liangliang He

Refactor new_operator_def to NewOperatorDef

上级 de44231f
......@@ -17,7 +17,7 @@ static void AddNBenchmark(int iters, int n, int size) {
for (int i = 0; i < n; ++i) {
op_def_builder.Input(internal::MakeString("Input", i).c_str());
}
op_def_builder.Output("Output").Finalize(net.new_operator_def());
op_def_builder.Output("Output").Finalize(net.NewOperatorDef());
// Add input data
for (int i = 0; i < n; ++i) {
......
......@@ -17,7 +17,7 @@ TEST_F(AddnOpTest, AddnOp) {
.Input("Input2")
.Input("Input3")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input1", {1, 2, 3, 4});
......
......@@ -21,7 +21,7 @@ static void BatchNorm(
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<D, T>("Input", {batch, channels, height, width});
......
......@@ -21,7 +21,7 @@ void Simple() {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<D, float>("Input", {1, 1, 6, 2},
......@@ -73,7 +73,7 @@ TEST_F(BatchNormOpTest, SimpleRandomNeon) {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......@@ -114,7 +114,7 @@ TEST_F(BatchNormOpTest, ComplexRandomNeon) {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......@@ -155,7 +155,7 @@ TEST_F(BatchNormOpTest, SimpleRandomOPENCL) {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::OPENCL, float>("Input", {batch, channels, height, width});
......@@ -201,7 +201,7 @@ TEST_F(BatchNormOpTest, ComplexRandomOPENCL) {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::OPENCL, float>("Input", {batch, channels, height, width});
......
......@@ -20,7 +20,7 @@ static void ChannelShuffle(
.Input("Input")
.Output("Output")
.AddIntArg("group", group)
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......
......@@ -15,7 +15,7 @@ TEST_F(ChannelShuffleOpTest, C8G4) {
.Input("Input")
.Output("Output")
.AddIntArg("group", 4)
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
......
......@@ -17,7 +17,7 @@ static void ConcatHelper(int iters, int concat_dim, int dim1) {
.Input("Input1")
.Input("Axis")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
const int kDim0 = 100;
......
......@@ -18,7 +18,7 @@ TEST_F(ConcatOpTest, Simple_Horizon) {
.Input("Input1")
.Input("Axis")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
std::vector<index_t> input_shape = {4, 4};
std::vector<float> input0;
......@@ -56,7 +56,7 @@ TEST_F(ConcatOpTest, Simple_Vertical) {
.Input("Input1")
.Input("Axis")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
std::vector<index_t> input_shape = {4, 4};
std::vector<float> input0;
......@@ -99,7 +99,7 @@ TEST_F(ConcatOpTest, Random) {
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(("Input" + ToString(i)).c_str());
}
builder.Input("Axis").Output("Output").Finalize(net.new_operator_def());
builder.Input("Axis").Output("Output").Finalize(net.NewOperatorDef());
std::vector<index_t> shape_data;
GenerateRandomIntTypeData<index_t>({dim}, shape_data, 1, dim);
......
......@@ -35,7 +35,7 @@ static void Conv2d(int iters,
.AddIntsArg("strides", {stride, stride})
.AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<D, float>("Input", {batch, channels, height, width});
......
......@@ -21,7 +21,7 @@ TEST_F(Conv2dOpTest, Simple_VALID) {
.AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add args
......@@ -55,7 +55,7 @@ TEST_F(Conv2dOpTest, Simple_SAME) {
.AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -89,7 +89,7 @@ TEST_F(Conv2dOpTest, Combined) {
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -126,7 +126,7 @@ void TestConv1x1() {
.AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<D, float>(
......@@ -187,7 +187,7 @@ TEST_F(Conv2dOpTest, AlignedConvNxNS12) {
.AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, input_channels, height, width});
......@@ -236,7 +236,7 @@ TEST_F(Conv2dOpTest, UnalignedConvNxNS12) {
.AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, input_channels, height, width});
......
......@@ -22,7 +22,7 @@ void SimpleValidTest() {
.AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<D, float>("Input", {1, 2, 2, 3},
......@@ -69,7 +69,7 @@ void TestNxNS12(const index_t height, const index_t width) {
.AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<D, float>("Input", {batch, input_channels, height, width});
......
......@@ -33,7 +33,7 @@ static void DepthwiseConv2d(int iters,
.AddIntsArg("strides", {stride, stride})
.AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<D, float>("Input", {batch, channels, height, width});
......
......@@ -19,7 +19,7 @@ static void GlobalAvgPooling(
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......
......@@ -14,7 +14,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_CPU) {
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
std::vector<float> input(147);
......@@ -38,7 +38,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_NEON) {
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
std::vector<float> input(147);
......
......@@ -138,7 +138,7 @@ class OpsTestNet {
});
}
OperatorDef *new_operator_def() {
OperatorDef *NewOperatorDef() {
op_defs_.emplace_back(OperatorDef());
return &op_defs_[op_defs_.size() - 1];
}
......
......@@ -32,7 +32,7 @@ static void Pooling(int iters,
.AddIntsArg("strides", {stride, stride})
.AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......
......@@ -24,7 +24,7 @@ TEST_F(PoolingOpTest, MAX_VALID) {
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.AddIntArg("pooling_type", PoolingType::MAX)
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -53,7 +53,7 @@ TEST_F(PoolingOpTest, AVG_VALID) {
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.AddIntArg("pooling_type", PoolingType::AVG)
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -82,7 +82,7 @@ TEST_F(PoolingOpTest, MAX_SAME) {
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.AddIntArg("pooling_type", PoolingType::MAX)
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>("Input", {1, 1, 3, 3},
......@@ -108,7 +108,7 @@ TEST_F(PoolingOpTest, MAX_VALID_DILATION) {
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {2, 2})
.AddIntArg("pooling_type", PoolingType::MAX)
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -135,7 +135,7 @@ TEST_F(PoolingOpTest, MAX_k2x2s2x2) {
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -161,7 +161,7 @@ TEST_F(PoolingOpTest, MAX_k3x3s2x2) {
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -188,7 +188,7 @@ TEST_F(PoolingOpTest, AVG_k2x2s2x2) {
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......
......@@ -16,7 +16,7 @@ static void ReluBenchmark(int iters, int size) {
OpDefBuilder("Relu", "ReluBM")
.Input("Input")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {size});
......
......@@ -15,7 +15,7 @@ TEST_F(ReluOpTest, ReluOp) {
OpDefBuilder("Relu", "ReluTest")
.Input("Input")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {1, 2, 3, 5});
......@@ -39,7 +39,7 @@ TEST_F(ReluOpTest, ReluOpWithMax) {
.Input("Input")
.Output("Output")
.AddFloatArg("max_limit", 0.5)
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {1, 2, 3, 5});
......
......@@ -18,7 +18,7 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWOAlignCorners) {
.Input("Input")
.Input("OutSize")
.Output("Output")
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
vector<float> input(24);
......@@ -44,7 +44,7 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWAlignCorners) {
.Input("OutSize")
.Output("Output")
.AddIntArg("align_corners", 1)
.Finalize(net.new_operator_def());
.Finalize(net.NewOperatorDef());
// Add input data
vector<float> input(24);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册