提交 de44231f 编写于 作者: L Liangliang He

Update conv2d benchmark test

上级 de992bf8
......@@ -89,7 +89,6 @@ extern void Conv2dOpenclK1x1S1(const Tensor *input, const Tensor *filter,
MACE_CHECK(input_batch == batch && input_height == height &&
input_width == width);
// Conv1x1Naive(input, filter, bias, output);
Conv1x1V2(input, filter, bias, output);
};
......
......@@ -17,7 +17,7 @@ static void AddNBenchmark(int iters, int n, int size) {
for (int i = 0; i < n; ++i) {
op_def_builder.Input(internal::MakeString("Input", i).c_str());
}
op_def_builder.Output("Output").Finalize(net.operator_def());
op_def_builder.Output("Output").Finalize(net.new_operator_def());
// Add input data
for (int i = 0; i < n; ++i) {
......
......@@ -17,7 +17,7 @@ TEST_F(AddnOpTest, AddnOp) {
.Input("Input2")
.Input("Input3")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input1", {1, 2, 3, 4});
......
......@@ -21,7 +21,7 @@ static void BatchNorm(
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<D, T>("Input", {batch, channels, height, width});
......
......@@ -21,7 +21,7 @@ void Simple() {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<D, float>("Input", {1, 1, 6, 2},
......@@ -73,7 +73,7 @@ TEST_F(BatchNormOpTest, SimpleRandomNeon) {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......@@ -114,7 +114,7 @@ TEST_F(BatchNormOpTest, ComplexRandomNeon) {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......@@ -155,7 +155,7 @@ TEST_F(BatchNormOpTest, SimpleRandomOPENCL) {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::OPENCL, float>("Input", {batch, channels, height, width});
......@@ -201,7 +201,7 @@ TEST_F(BatchNormOpTest, ComplexRandomOPENCL) {
.Input("Var")
.Input("Epsilon")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::OPENCL, float>("Input", {batch, channels, height, width});
......
......@@ -19,10 +19,10 @@ static void ChannelShuffle(
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
.AddIntArg("group", group)
.Finalize(net.new_operator_def());
// Add input data
net.AddIntArg("group", group);
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
// Warm-up
......
......@@ -14,9 +14,9 @@ TEST_F(ChannelShuffleOpTest, C8G4) {
OpDefBuilder("ChannelShuffle", "ChannelShuffleTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
.AddIntArg("group", 4)
.Finalize(net.new_operator_def());
net.AddIntArg("group", 4);
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......
......@@ -17,7 +17,7 @@ static void ConcatHelper(int iters, int concat_dim, int dim1) {
.Input("Input1")
.Input("Axis")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
const int kDim0 = 100;
......
......@@ -18,7 +18,7 @@ TEST_F(ConcatOpTest, Simple_Horizon) {
.Input("Input1")
.Input("Axis")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
std::vector<index_t> input_shape = {4, 4};
std::vector<float> input0;
......@@ -56,7 +56,7 @@ TEST_F(ConcatOpTest, Simple_Vertical) {
.Input("Input1")
.Input("Axis")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
std::vector<index_t> input_shape = {4, 4};
std::vector<float> input0;
......@@ -99,7 +99,7 @@ TEST_F(ConcatOpTest, Random) {
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(("Input" + ToString(i)).c_str());
}
builder.Input("Axis").Output("Output").Finalize(net.operator_def());
builder.Input("Axis").Output("Output").Finalize(net.new_operator_def());
std::vector<index_t> shape_data;
GenerateRandomIntTypeData<index_t>({dim}, shape_data, 1, dim);
......
......@@ -3,6 +3,7 @@
//
#include <algorithm>
#include <sstream>
#include "mace/core/operator.h"
#include "mace/core/testing/test_benchmark.h"
......@@ -13,6 +14,7 @@ namespace mace {
template <DeviceType D, typename T>
static void Conv2d(int iters,
int iters_to_sync,
int batch,
int channels,
int height,
......@@ -30,17 +32,15 @@ static void Conv2d(int iters,
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("strides", {stride, stride});
net.AddIntArg("padding", padding);
net.AddIntsArg("dilations", {1, 1});
.AddIntsArg("strides", {stride, stride})
.AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<D, float>("Input", {batch, channels, height, width});
net.AddRandomInput<D, float>("Filter",
{output_channels, channels, kernel_h, kernel_w});
{output_channels, channels, kernel_h, kernel_w});
net.AddRandomInput<D, float>("Bias", {output_channels});
// Warm-up
......@@ -52,10 +52,17 @@ static void Conv2d(int iters,
mace::testing::StartTiming();
while (iters--) {
net.RunOp(D);
net.Sync();
if (iters % iters_to_sync == 0) {
net.Sync();
}
}
}
// In common network, there are usually more than 1 layers, this is used to
// approximate the amortized latency. The OpenCL runtime for Mali/Adreno is
// in-order.
constexpr int kItersToSync = 10;
#define BM_CONV_2D_MACRO(N, C, H, W, KH, KW, STRIDE, P, OC, TYPE, DEVICE) \
static void \
BM_CONV_2D_##N##_##C##_##H##_##W##_K##KH##x##KW##S##STRIDE##_##P##_##OC##_##TYPE##_##DEVICE( \
......@@ -63,8 +70,8 @@ static void Conv2d(int iters,
const int64_t tot = static_cast<int64_t>(iters) * N * C * H * W; \
mace::testing::ItemsProcessed(tot); \
mace::testing::BytesProcessed(tot *(sizeof(TYPE))); \
Conv2d<DEVICE, TYPE>(iters, N, C, H, W, KH, KW, STRIDE, mace::Padding::P, \
OC); \
Conv2d<DEVICE, TYPE>(iters, kItersToSync, N, C, H, W, KH, KW, STRIDE, \
mace::Padding::P, OC); \
} \
BENCHMARK( \
BM_CONV_2D_##N##_##C##_##H##_##W##_K##KH##x##KW##S##STRIDE##_##P##_##OC##_##TYPE##_##DEVICE)
......
......@@ -18,12 +18,12 @@ TEST_F(Conv2dOpTest, Simple_VALID) {
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
.AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add args
net.AddIntsArg("strides", {1, 1});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -52,12 +52,10 @@ TEST_F(Conv2dOpTest, Simple_SAME) {
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("strides", {1, 1});
net.AddIntArg("padding", Padding::SAME);
net.AddIntsArg("dilations", {1, 1});
.AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -88,12 +86,10 @@ TEST_F(Conv2dOpTest, Combined) {
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::SAME);
net.AddIntsArg("dilations", {1, 1});
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -127,12 +123,10 @@ void TestConv1x1() {
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("strides", {1, 1});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
.AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<D, float>(
......@@ -190,12 +184,10 @@ TEST_F(Conv2dOpTest, AlignedConvNxNS12) {
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("strides", {stride_h, stride_w});
net.AddIntArg("padding", type);
net.AddIntsArg("dilations", {1, 1});
.AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, input_channels, height, width});
......@@ -241,12 +233,10 @@ TEST_F(Conv2dOpTest, UnalignedConvNxNS12) {
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("strides", {stride_h, stride_w});
net.AddIntArg("padding", type);
net.AddIntsArg("dilations", {1, 1});
.AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, input_channels, height, width});
......
......@@ -19,12 +19,10 @@ void SimpleValidTest() {
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("strides", {1, 1});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
.AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<D, float>("Input", {1, 2, 2, 3},
......@@ -68,12 +66,10 @@ void TestNxNS12(const index_t height, const index_t width) {
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("strides", {stride_h, stride_w});
net.AddIntArg("padding", type);
net.AddIntsArg("dilations", {1, 1});
.AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<D, float>("Input", {batch, input_channels, height, width});
......
......@@ -30,12 +30,10 @@ static void DepthwiseConv2d(int iters,
.Input("Filter")
.Input("Bias")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("strides", {stride, stride});
net.AddIntArg("padding", padding);
net.AddIntsArg("dilations", {1, 1});
.AddIntsArg("strides", {stride, stride})
.AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<D, float>("Input", {batch, channels, height, width});
......
......@@ -19,7 +19,7 @@ static void GlobalAvgPooling(
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......
......@@ -14,7 +14,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_CPU) {
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
std::vector<float> input(147);
......@@ -38,7 +38,7 @@ TEST_F(GlobalAvgPoolingOpTest, 3x7x7_NEON) {
OpDefBuilder("GlobalAvgPooling", "GlobalAvgPoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
std::vector<float> input(147);
......
......@@ -17,21 +17,70 @@ namespace mace {
class OpDefBuilder {
public:
OpDefBuilder(const char *type, const char *name) {
OpDefBuilder(const char *type, const std::string &name) {
op_def_.set_type(type);
op_def_.set_name(name);
}
OpDefBuilder &Input(const char *input_name) {
OpDefBuilder &Input(const std::string &input_name) {
op_def_.add_input(input_name);
return *this;
}
OpDefBuilder &Output(const char *output_name) {
OpDefBuilder &Output(const std::string &output_name) {
op_def_.add_output(output_name);
return *this;
}
OpDefBuilder AddIntArg(const std::string &name, const int value) {
auto arg = op_def_.add_arg();
arg->set_name(name);
arg->set_i(value);
return *this;
}
OpDefBuilder AddFloatArg(const std::string &name, const float value) {
auto arg = op_def_.add_arg();
arg->set_name(name);
arg->set_f(value);
return *this;
}
OpDefBuilder AddStringArg(const std::string &name, const char *value) {
auto arg = op_def_.add_arg();
arg->set_name(name);
arg->set_s(value);
return *this;
}
OpDefBuilder AddIntsArg(const std::string &name, const std::vector<int> &values) {
auto arg = op_def_.add_arg();
arg->set_name(name);
for (auto value : values) {
arg->add_ints(value);
}
return *this;
}
OpDefBuilder AddFloatsArg(const std::string &name, const std::vector<float> &values) {
auto arg = op_def_.add_arg();
arg->set_name(name);
for (auto value : values) {
arg->add_floats(value);
}
return *this;
}
OpDefBuilder AddStringsArg(const std::string &name,
const std::vector<const char *> &values) {
auto arg = op_def_.add_arg();
arg->set_name(name);
for (auto value : values) {
arg->add_strings(value);
}
return *this;
}
void Finalize(OperatorDef *op_def) const {
MACE_CHECK(op_def != nullptr, "input should not be null.");
*op_def = op_def_;
......@@ -45,7 +94,7 @@ class OpsTestNet {
OpsTestNet() {}
template <DeviceType D, typename T>
void AddInputFromArray(const char *name,
void AddInputFromArray(const std::string &name,
const std::vector<index_t> &shape,
const std::vector<T> &data) {
Tensor *input =
......@@ -58,7 +107,7 @@ class OpsTestNet {
}
template <DeviceType D, typename T>
void AddRepeatedInput(const char *name,
void AddRepeatedInput(const std::string &name,
const std::vector<index_t> &shape,
const T data) {
Tensor *input =
......@@ -70,7 +119,7 @@ class OpsTestNet {
}
template <DeviceType D, typename T>
void AddRandomInput(const char *name,
void AddRandomInput(const std::string &name,
const std::vector<index_t> &shape,
bool positive = false) {
Tensor *input =
......@@ -89,56 +138,18 @@ class OpsTestNet {
});
}
void AddIntArg(const char *name, const int value) {
auto arg = op_def_.add_arg();
arg->set_name(name);
arg->set_i(value);
}
void AddFloatArg(const char *name, const float value) {
auto arg = op_def_.add_arg();
arg->set_name(name);
arg->set_f(value);
}
void AddStringArg(const char *name, const char *value) {
auto arg = op_def_.add_arg();
arg->set_name(name);
arg->set_s(value);
}
void AddIntsArg(const char *name, const std::vector<int> &values) {
auto arg = op_def_.add_arg();
arg->set_name(name);
for (auto value : values) {
arg->add_ints(value);
}
}
void AddFloatsArg(const char *name, const std::vector<float> &values) {
auto arg = op_def_.add_arg();
arg->set_name(name);
for (auto value : values) {
arg->add_floats(value);
}
}
void AddStringsArg(const char *name,
const std::vector<const char *> &values) {
auto arg = op_def_.add_arg();
arg->set_name(name);
for (auto value : values) {
arg->add_strings(value);
}
OperatorDef *new_operator_def() {
op_defs_.emplace_back(OperatorDef());
return &op_defs_[op_defs_.size() - 1];
}
OperatorDef *operator_def() { return &op_def_; }
Workspace *ws() { return &ws_; }
bool RunOp(DeviceType device) {
NetDef net_def;
net_def.add_op()->CopyFrom(op_def_);
for (auto &op_def_ : op_defs_) {
net_def.add_op()->CopyFrom(op_def_);
}
VLOG(3) << net_def.DebugString();
net_ = CreateNet(net_def, &ws_, device);
device_ = device;
......@@ -159,7 +170,7 @@ class OpsTestNet {
public:
Workspace ws_;
OperatorDef op_def_;
std::vector<OperatorDef> op_defs_;
std::unique_ptr<NetBase> net_;
DeviceType device_;
};
......
......@@ -27,14 +27,12 @@ static void Pooling(int iters,
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntArg("pooling_type", pooling_type);
net.AddIntsArg("kernels", {kernel, kernel});
net.AddIntsArg("strides", {stride, stride});
net.AddIntArg("padding", padding);
net.AddIntsArg("dilations", {1, 1});
.AddIntArg("pooling_type", pooling_type)
.AddIntsArg("kernels", {kernel, kernel})
.AddIntsArg("strides", {stride, stride})
.AddIntArg("padding", padding)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {batch, channels, height, width});
......
......@@ -19,14 +19,12 @@ TEST_F(PoolingOpTest, MAX_VALID) {
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
net.AddIntArg("pooling_type", PoolingType::MAX);
.AddIntsArg("kernels", {2, 2})
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.AddIntArg("pooling_type", PoolingType::MAX)
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -50,14 +48,12 @@ TEST_F(PoolingOpTest, AVG_VALID) {
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
net.AddIntArg("pooling_type", PoolingType::AVG);
.AddIntsArg("kernels", {2, 2})
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.AddIntArg("pooling_type", PoolingType::AVG)
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -81,14 +77,12 @@ TEST_F(PoolingOpTest, MAX_SAME) {
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::SAME);
net.AddIntsArg("dilations", {1, 1});
net.AddIntArg("pooling_type", PoolingType::MAX);
.AddIntsArg("kernels", {2, 2})
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.AddIntArg("pooling_type", PoolingType::MAX)
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>("Input", {1, 1, 3, 3},
......@@ -109,14 +103,12 @@ TEST_F(PoolingOpTest, MAX_VALID_DILATION) {
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {1, 1});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {2, 2});
net.AddIntArg("pooling_type", PoolingType::MAX);
.AddIntsArg("kernels", {2, 2})
.AddIntsArg("strides", {1, 1})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {2, 2})
.AddIntArg("pooling_type", PoolingType::MAX)
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -138,14 +130,12 @@ TEST_F(PoolingOpTest, MAX_k2x2s2x2) {
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntArg("pooling_type", PoolingType::MAX);
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::SAME);
net.AddIntsArg("dilations", {1, 1});
.AddIntArg("pooling_type", PoolingType::MAX)
.AddIntsArg("kernels", {2, 2})
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -166,14 +156,12 @@ TEST_F(PoolingOpTest, MAX_k3x3s2x2) {
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntArg("pooling_type", PoolingType::MAX);
net.AddIntsArg("kernels", {3, 3});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::VALID);
net.AddIntsArg("dilations", {1, 1});
.AddIntArg("pooling_type", PoolingType::MAX)
.AddIntsArg("kernels", {3, 3})
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::VALID)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......@@ -195,14 +183,12 @@ TEST_F(PoolingOpTest, AVG_k2x2s2x2) {
OpDefBuilder("Pooling", "PoolingTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
// Add args
net.AddIntArg("pooling_type", PoolingType::AVG);
net.AddIntsArg("kernels", {2, 2});
net.AddIntsArg("strides", {2, 2});
net.AddIntArg("padding", Padding::SAME);
net.AddIntsArg("dilations", {1, 1});
.AddIntArg("pooling_type", PoolingType::AVG)
.AddIntsArg("kernels", {2, 2})
.AddIntsArg("strides", {2, 2})
.AddIntArg("padding", Padding::SAME)
.AddIntsArg("dilations", {1, 1})
.Finalize(net.new_operator_def());
// Add input data
net.AddInputFromArray<DeviceType::CPU, float>(
......
......@@ -16,7 +16,7 @@ static void ReluBenchmark(int iters, int size) {
OpDefBuilder("Relu", "ReluBM")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {size});
......
......@@ -15,7 +15,7 @@ TEST_F(ReluOpTest, ReluOp) {
OpDefBuilder("Relu", "ReluTest")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {1, 2, 3, 5});
......@@ -38,11 +38,11 @@ TEST_F(ReluOpTest, ReluOpWithMax) {
OpDefBuilder("Relu", "ReluTestWithMax")
.Input("Input")
.Output("Output")
.Finalize(net.operator_def());
.AddFloatArg("max_limit", 0.5)
.Finalize(net.new_operator_def());
// Add input data
net.AddRandomInput<DeviceType::CPU, float>("Input", {1, 2, 3, 5});
net.AddFloatArg("max_limit", 0.5);
// Run
net.RunOp();
......
......@@ -18,7 +18,7 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWOAlignCorners) {
.Input("Input")
.Input("OutSize")
.Output("Output")
.Finalize(net.operator_def());
.Finalize(net.new_operator_def());
// Add input data
vector<float> input(24);
......@@ -43,9 +43,8 @@ TEST_F(ResizeBilinearTest, ResizeBilinearWAlignCorners) {
.Input("Input")
.Input("OutSize")
.Output("Output")
.Finalize(net.operator_def());
net.AddIntArg("align_corners", 1);
.AddIntArg("align_corners", 1)
.Finalize(net.new_operator_def());
// Add input data
vector<float> input(24);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册