提交 29b97706 编写于 作者: L Liangliang He

Refactor new_operator_def to NewOperatorDef

上级 de44231f
...@@ -17,7 +17,7 @@ static void AddNBenchmark(int iters, int n, int size) { ...@@ -17,7 +17,7 @@ static void AddNBenchmark(int iters, int n, int size) {
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
op_def_builder.Input(internal::MakeString("Input", i).c_str()); op_def_builder.Input(internal::MakeString("Input", i).c_str());
} }
op_def_builder.Output("Output").Finalize(net.new_operator_def()); op_def_builder.Output("Output").Finalize(net.NewOperatorDef());
// Add input data // Add input data
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
......
...@@ -17,7 +17,7 @@ TEST_F(AddnOpTest, AddnOp) { ...@@ -17,7 +17,7 @@ TEST_F(AddnOpTest, AddnOp) {
.Input("Input2") .Input("Input2")
.Input("Input3") .Input("Input3")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input1", {1, 2, 3, 4}); net.AddRandomInput<DeviceType::CPU, float>("Input1", {1, 2, 3, 4});
......
...@@ -21,7 +21,7 @@ static void BatchNorm( ...@@ -21,7 +21,7 @@ static void BatchNorm(
.Input("Var") .Input("Var")
.Input("Epsilon") .Input("Epsilon")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<D, T>("Input", {batch, channels, height, width}); net.AddRandomInput<D, T>("Input", {batch, channels, height, width});
......
...@@ -21,7 +21,7 @@ void Simple() { ...@@ -21,7 +21,7 @@ void Simple() {
.Input("Var") .Input("Var")
.Input("Epsilon") .Input("Epsilon")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<D, float>("Input", {1, 1, 6, 2}, net.AddInputFromArray<D, float>("Input", {1, 1, 6, 2},
...@@ -73,7 +73,7 @@ TEST_F(BatchNormOpTest, SimpleRandomNeon) { ...@@ -73,7 +73,7 @@ TEST_F(BatchNormOpTest, SimpleRandomNeon) {
.Input("Var") .Input("Var")
.Input("Epsilon") .Input("Epsilon")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width}); net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
...@@ -114,7 +114,7 @@ TEST_F(BatchNormOpTest, ComplexRandomNeon) { ...@@ -114,7 +114,7 @@ TEST_F(BatchNormOpTest, ComplexRandomNeon) {
.Input("Var") .Input("Var")
.Input("Epsilon") .Input("Epsilon")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width}); net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
...@@ -155,7 +155,7 @@ TEST_F(BatchNormOpTest, SimpleRandomOPENCL) { ...@@ -155,7 +155,7 @@ TEST_F(BatchNormOpTest, SimpleRandomOPENCL) {
.Input("Var") .Input("Var")
.Input("Epsilon") .Input("Epsilon")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::OPENCL, float>("Input", {batch, channels, height, width}); net.AddRandomInput<DeviceType::OPENCL, float>("Input", {batch, channels, height, width});
...@@ -201,7 +201,7 @@ TEST_F(BatchNormOpTest, ComplexRandomOPENCL) { ...@@ -201,7 +201,7 @@ TEST_F(BatchNormOpTest, ComplexRandomOPENCL) {
.Input("Var") .Input("Var")
.Input("Epsilon") .Input("Epsilon")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::OPENCL, float>("Input", {batch, channels, height, width}); net.AddRandomInput<DeviceType::OPENCL, float>("Input", {batch, channels, height, width});
......
...@@ -20,7 +20,7 @@ static void ChannelShuffle( ...@@ -20,7 +20,7 @@ static void ChannelShuffle(
.Input("Input") .Input("Input")
.Output("Output") .Output("Output")
.AddIntArg("group", group) .AddIntArg("group", group)
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width}); net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......
...@@ -15,7 +15,7 @@ TEST_F(ChannelShuffleOpTest, C8G4) { ...@@ -15,7 +15,7 @@ TEST_F(ChannelShuffleOpTest, C8G4) {
.Input("Input") .Input("Input")
.Output("Output") .Output("Output")
.AddIntArg("group", 4) .AddIntArg("group", 4)
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
......
...@@ -17,7 +17,7 @@ static void ConcatHelper(int iters, int concat_dim, int dim1) { ...@@ -17,7 +17,7 @@ static void ConcatHelper(int iters, int concat_dim, int dim1) {
.Input("Input1") .Input("Input1")
.Input("Axis") .Input("Axis")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
const int kDim0 = 100; const int kDim0 = 100;
......
...@@ -18,7 +18,7 @@ TEST_F(ConcatOpTest, Simple_Horizon) { ...@@ -18,7 +18,7 @@ TEST_F(ConcatOpTest, Simple_Horizon) {
.Input("Input1") .Input("Input1")
.Input("Axis") .Input("Axis")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
std::vector<index_t> input_shape = {4, 4}; std::vector<index_t> input_shape = {4, 4};
std::vector<float> input0; std::vector<float> input0;
...@@ -56,7 +56,7 @@ TEST_F(ConcatOpTest, Simple_Vertical) { ...@@ -56,7 +56,7 @@ TEST_F(ConcatOpTest, Simple_Vertical) {
.Input("Input1") .Input("Input1")
.Input("Axis") .Input("Axis")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
std::vector<index_t> input_shape = {4, 4}; std::vector<index_t> input_shape = {4, 4};
std::vector<float> input0; std::vector<float> input0;
...@@ -99,7 +99,7 @@ TEST_F(ConcatOpTest, Random) { ...@@ -99,7 +99,7 @@ TEST_F(ConcatOpTest, Random) {
for (int i = 0; i < num_inputs; ++i) { for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(("Input" + ToString(i)).c_str()); builder = builder.Input(("Input" + ToString(i)).c_str());
} }
builder.Input("Axis").Output("Output").Finalize(net.new_operator_def()); builder.Input("Axis").Output("Output").Finalize(net.NewOperatorDef());
std::vector<index_t> shape_data; std::vector<index_t> shape_data;
GenerateRandomIntTypeData<index_t>({dim}, shape_data, 1, dim); GenerateRandomIntTypeData<index_t>({dim}, shape_data, 1, dim);
......
...@@ -35,7 +35,7 @@ static void Conv2d(int iters, ...@@ -35,7 +35,7 @@ static void Conv2d(int iters,
.AddIntsArg("strides", {stride, stride}) .AddIntsArg("strides", {stride, stride})
.AddIntArg("padding", padding) .AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<D, float>("Input", {batch, channels, height, width}); net.AddRandomInput<D, float>("Input", {batch, channels, height, width});
......
...@@ -21,7 +21,7 @@ TEST_F(Conv2dOpTest, Simple_VALID) { ...@@ -21,7 +21,7 @@ TEST_F(Conv2dOpTest, Simple_VALID) {
.AddIntsArg("strides", {1, 1}) .AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID) .AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add args // Add args
...@@ -55,7 +55,7 @@ TEST_F(Conv2dOpTest, Simple_SAME) { ...@@ -55,7 +55,7 @@ TEST_F(Conv2dOpTest, Simple_SAME) {
.AddIntsArg("strides", {1, 1}) .AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::SAME) .AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<DeviceType::CPU, float>( net.AddInputFromArray<DeviceType::CPU, float>(
...@@ -89,7 +89,7 @@ TEST_F(Conv2dOpTest, Combined) { ...@@ -89,7 +89,7 @@ TEST_F(Conv2dOpTest, Combined) {
.AddIntsArg("strides", {2, 2}) .AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME) .AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<DeviceType::CPU, float>( net.AddInputFromArray<DeviceType::CPU, float>(
...@@ -126,7 +126,7 @@ void TestConv1x1() { ...@@ -126,7 +126,7 @@ void TestConv1x1() {
.AddIntsArg("strides", {1, 1}) .AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID) .AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<D, float>( net.AddInputFromArray<D, float>(
...@@ -187,7 +187,7 @@ TEST_F(Conv2dOpTest, AlignedConvNxNS12) { ...@@ -187,7 +187,7 @@ TEST_F(Conv2dOpTest, AlignedConvNxNS12) {
.AddIntsArg("strides", {stride_h, stride_w}) .AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type) .AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, input_channels, height, width}); net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, input_channels, height, width});
...@@ -236,7 +236,7 @@ TEST_F(Conv2dOpTest, UnalignedConvNxNS12) { ...@@ -236,7 +236,7 @@ TEST_F(Conv2dOpTest, UnalignedConvNxNS12) {
.AddIntsArg("strides", {stride_h, stride_w}) .AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type) .AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, input_channels, height, width}); net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, input_channels, height, width});
......
...@@ -22,7 +22,7 @@ void SimpleValidTest() { ...@@ -22,7 +22,7 @@ void SimpleValidTest() {
.AddIntsArg("strides", {1, 1}) .AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID) .AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<D, float>("Input", {1, 2, 2, 3}, net.AddInputFromArray<D, float>("Input", {1, 2, 2, 3},
...@@ -69,7 +69,7 @@ void TestNxNS12(const index_t height, const index_t width) { ...@@ -69,7 +69,7 @@ void TestNxNS12(const index_t height, const index_t width) {
.AddIntsArg("strides", {stride_h, stride_w}) .AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type) .AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<D, float>("Input", {batch, input_channels, height, width}); net.AddRandomInput<D, float>("Input", {batch, input_channels, height, width});
......
...@@ -33,7 +33,7 @@ static void DepthwiseConv2d(int iters, ...@@ -33,7 +33,7 @@ static void DepthwiseConv2d(int iters,
.AddIntsArg("strides", {stride, stride}) .AddIntsArg("strides", {stride, stride})
.AddIntArg("padding", padding) .AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<D, float>("Input", {batch, channels, height, width}); net.AddRandomInput<D, float>("Input", {batch, channels, height, width});
......
...@@ -19,7 +19,7 @@ static void GlobalAvgPooling( ...@@ -19,7 +19,7 @@ static void GlobalAvgPooling(
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest") OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input") .Input("Input")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width}); net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......
...@@ -14,7 +14,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_CPU) { ...@@ -14,7 +14,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_CPU) {
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest") OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input") .Input("Input")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
std::vector<float> input(147); std::vector<float> input(147);
...@@ -38,7 +38,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_NEON) { ...@@ -38,7 +38,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_NEON) {
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest") OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input") .Input("Input")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
std::vector<float> input(147); std::vector<float> input(147);
......
...@@ -138,7 +138,7 @@ class OpsTestNet { ...@@ -138,7 +138,7 @@ class OpsTestNet {
}); });
} }
OperatorDef *new_operator_def() { OperatorDef *NewOperatorDef() {
op_defs_.emplace_back(OperatorDef()); op_defs_.emplace_back(OperatorDef());
return &op_defs_[op_defs_.size() - 1]; return &op_defs_[op_defs_.size() - 1];
} }
......
...@@ -32,7 +32,7 @@ static void Pooling(int iters, ...@@ -32,7 +32,7 @@ static void Pooling(int iters,
.AddIntsArg("strides", {stride, stride}) .AddIntsArg("strides", {stride, stride})
.AddIntArg("padding", padding) .AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width}); net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......
...@@ -24,7 +24,7 @@ TEST_F(PoolingOpTest, MAX_VALID) { ...@@ -24,7 +24,7 @@ TEST_F(PoolingOpTest, MAX_VALID) {
.AddIntArg("padding", Padding::VALID) .AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.AddIntArg("pooling_type", PoolingType::MAX) .AddIntArg("pooling_type", PoolingType::MAX)
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<DeviceType::CPU, float>( net.AddInputFromArray<DeviceType::CPU, float>(
...@@ -53,7 +53,7 @@ TEST_F(PoolingOpTest, AVG_VALID) { ...@@ -53,7 +53,7 @@ TEST_F(PoolingOpTest, AVG_VALID) {
.AddIntArg("padding", Padding::VALID) .AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.AddIntArg("pooling_type", PoolingType::AVG) .AddIntArg("pooling_type", PoolingType::AVG)
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<DeviceType::CPU, float>( net.AddInputFromArray<DeviceType::CPU, float>(
...@@ -82,7 +82,7 @@ TEST_F(PoolingOpTest, MAX_SAME) { ...@@ -82,7 +82,7 @@ TEST_F(PoolingOpTest, MAX_SAME) {
.AddIntArg("padding", Padding::SAME) .AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.AddIntArg("pooling_type", PoolingType::MAX) .AddIntArg("pooling_type", PoolingType::MAX)
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<DeviceType::CPU, float>("Input", {1, 1, 3, 3}, net.AddInputFromArray<DeviceType::CPU, float>("Input", {1, 1, 3, 3},
...@@ -108,7 +108,7 @@ TEST_F(PoolingOpTest, MAX_VALID_DILATION) { ...@@ -108,7 +108,7 @@ TEST_F(PoolingOpTest, MAX_VALID_DILATION) {
.AddIntArg("padding", Padding::VALID) .AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {2, 2}) .AddIntsArg("dilations", {2, 2})
.AddIntArg("pooling_type", PoolingType::MAX) .AddIntArg("pooling_type", PoolingType::MAX)
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<DeviceType::CPU, float>( net.AddInputFromArray<DeviceType::CPU, float>(
...@@ -135,7 +135,7 @@ TEST_F(PoolingOpTest, MAX_k2x2s2x2) { ...@@ -135,7 +135,7 @@ TEST_F(PoolingOpTest, MAX_k2x2s2x2) {
.AddIntsArg("strides", {2, 2}) .AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME) .AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<DeviceType::CPU, float>( net.AddInputFromArray<DeviceType::CPU, float>(
...@@ -161,7 +161,7 @@ TEST_F(PoolingOpTest, MAX_k3x3s2x2) { ...@@ -161,7 +161,7 @@ TEST_F(PoolingOpTest, MAX_k3x3s2x2) {
.AddIntsArg("strides", {2, 2}) .AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::VALID) .AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<DeviceType::CPU, float>( net.AddInputFromArray<DeviceType::CPU, float>(
...@@ -188,7 +188,7 @@ TEST_F(PoolingOpTest, AVG_k2x2s2x2) { ...@@ -188,7 +188,7 @@ TEST_F(PoolingOpTest, AVG_k2x2s2x2) {
.AddIntsArg("strides", {2, 2}) .AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME) .AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1}) .AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddInputFromArray<DeviceType::CPU, float>( net.AddInputFromArray<DeviceType::CPU, float>(
......
...@@ -16,7 +16,7 @@ static void ReluBenchmark(int iters, int size) { ...@@ -16,7 +16,7 @@ static void ReluBenchmark(int iters, int size) {
OpDefBuilder("Relu", "ReluBM") OpDefBuilder("Relu", "ReluBM")
.Input("Input") .Input("Input")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {size}); net.AddRandomInput<DeviceType::CPU, float>("Input", {size});
......
...@@ -15,7 +15,7 @@ TEST_F(ReluOpTest, ReluOp) { ...@@ -15,7 +15,7 @@ TEST_F(ReluOpTest, ReluOp) {
OpDefBuilder("Relu", "ReluTest") OpDefBuilder("Relu", "ReluTest")
.Input("Input") .Input("Input")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {1, 2, 3, 5}); net.AddRandomInput<DeviceType::CPU, float>("Input", {1, 2, 3, 5});
...@@ -39,7 +39,7 @@ TEST_F(ReluOpTest, ReluOpWithMax) { ...@@ -39,7 +39,7 @@ TEST_F(ReluOpTest, ReluOpWithMax) {
.Input("Input") .Input("Input")
.Output("Output") .Output("Output")
.AddFloatArg("max_limit", 0.5) .AddFloatArg("max_limit", 0.5)
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {1, 2, 3, 5}); net.AddRandomInput<DeviceType::CPU, float>("Input", {1, 2, 3, 5});
......
...@@ -18,7 +18,7 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWOAlignCorners) { ...@@ -18,7 +18,7 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWOAlignCorners) {
.Input("Input") .Input("Input")
.Input("OutSize") .Input("OutSize")
.Output("Output") .Output("Output")
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
vector<float> input(24); vector<float> input(24);
...@@ -44,7 +44,7 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWAlignCorners) { ...@@ -44,7 +44,7 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWAlignCorners) {
.Input("OutSize") .Input("OutSize")
.Output("Output") .Output("Output")
.AddIntArg("align_corners", 1) .AddIntArg("align_corners", 1)
.Finalize(net.new_operator_def()); .Finalize(net.NewOperatorDef());
// Add input data // Add input data
vector<float> input(24); vector<float> input(24);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册