提交 758fff83 编写于 作者: L liuqi

Add -Werror at op BUILD and fix warnings.

上级 65bb3630
...@@ -30,6 +30,17 @@ struct StatsFuture { ...@@ -30,6 +30,17 @@ struct StatsFuture {
}; };
}; };
inline void SetFutureDefaultWaitFn(StatsFuture *future) {
if (future != nullptr) {
future->wait_fn = [](CallStats * stats) {
if (stats != nullptr) {
stats->start_micros = NowMicros();
stats->end_micros = stats->start_micros;
}
};
}
}
} // namespace mace } // namespace mace
#endif // MACE_CORE_FUTURE_H_ #endif // MACE_CORE_FUTURE_H_
...@@ -652,11 +652,14 @@ struct EltwiseFunctor<DeviceType::CPU, float> : EltwiseFunctorBase { ...@@ -652,11 +652,14 @@ struct EltwiseFunctor<DeviceType::CPU, float> : EltwiseFunctorBase {
static_cast<uint32_t>(input0->dim_size() - input1->dim_size()); static_cast<uint32_t>(input0->dim_size() - input1->dim_size());
if (data_format_ == NCHW) { if (data_format_ == NCHW) {
MACE_CHECK( MACE_CHECK(
input0->dim_size() == 4 && (input0->dim_size() == 4)
(input1->dim_size() == 0 || && ((input1->dim_size() == 0)
input1->dim_size() == 4 && input1->dim(1) == input0->dim(1) && || (input1->dim_size() == 4
(input1->dim(0) == input0->dim(0) || input1->dim(0) == 1) || && input1->dim(1) == input0->dim(1)
input1->dim_size() == 1 && input1->dim(0) == input0->dim(1)), && (input1->dim(0) == input0->dim(0)
|| input1->dim(0) == 1))
|| (input1->dim_size() == 1
&& input1->dim(0) == input0->dim(1))),
"only support broadcast channel dimension"); "only support broadcast channel dimension");
} else { } else {
if (rank_diff > 0 && rank_diff < input0->dim_size()) { if (rank_diff > 0 && rank_diff < input0->dim_size()) {
......
...@@ -34,8 +34,6 @@ struct StackFunctor { ...@@ -34,8 +34,6 @@ struct StackFunctor {
MaceStatus operator()(const std::vector<const Tensor *> &inputs, MaceStatus operator()(const std::vector<const Tensor *> &inputs,
Tensor *output, Tensor *output,
StatsFuture *future) { StatsFuture *future) {
MACE_UNUSED(future);
MACE_CHECK(!inputs.empty(), "stack inputs are empty."); MACE_CHECK(!inputs.empty(), "stack inputs are empty.");
std::vector<index_t> input_shape = inputs[0]->shape(); std::vector<index_t> input_shape = inputs[0]->shape();
MACE_CHECK(axis_ >= -(inputs[0]->dim_size() + 1) && MACE_CHECK(axis_ >= -(inputs[0]->dim_size() + 1) &&
...@@ -51,7 +49,7 @@ struct StackFunctor { ...@@ -51,7 +49,7 @@ struct StackFunctor {
// On host, no need to map data // On host, no need to map data
T *output_data = output->mutable_data<T>(); T *output_data = output->mutable_data<T>();
std::vector<const T *> input_data(inputs.size()); std::vector<const T *> input_data(inputs.size());
for (int i = 0; i < inputs.size(); ++i) { for (size_t i = 0; i < inputs.size(); ++i) {
input_data[i] = inputs[i]->data<T>(); input_data[i] = inputs[i]->data<T>();
} }
...@@ -62,13 +60,14 @@ struct StackFunctor { ...@@ -62,13 +60,14 @@ struct StackFunctor {
std::accumulate(input_shape.begin() + axis_, input_shape.end(), 1, std::accumulate(input_shape.begin() + axis_, input_shape.end(), 1,
std::multiplies<index_t>()); std::multiplies<index_t>());
for (index_t h = 0; h < high_dim_elem_size; ++h) { for (index_t h = 0; h < high_dim_elem_size; ++h) {
for (index_t i = 0; i < inputs.size(); ++i) { for (size_t i = 0; i < inputs.size(); ++i) {
memcpy(output_data, input_data[i] + h * low_dim_elem_size, memcpy(output_data, input_data[i] + h * low_dim_elem_size,
sizeof(T) * low_dim_elem_size); sizeof(T) * low_dim_elem_size);
output_data += low_dim_elem_size; output_data += low_dim_elem_size;
} }
} }
SetFutureDefaultWaitFn(future);
return MACE_SUCCESS; return MACE_SUCCESS;
} }
......
...@@ -45,7 +45,6 @@ struct StridedSliceFunctor { ...@@ -45,7 +45,6 @@ struct StridedSliceFunctor {
const Tensor *strides, const Tensor *strides,
Tensor *output, Tensor *output,
StatsFuture *future) { StatsFuture *future) {
MACE_UNUSED(future);
MACE_CHECK(ellipsis_mask_ == 0 && new_axis_mask_ == 0, MACE_CHECK(ellipsis_mask_ == 0 && new_axis_mask_ == 0,
"ellipsis_mask and new_axis_mask are not supported yet."); "ellipsis_mask and new_axis_mask are not supported yet.");
...@@ -144,6 +143,7 @@ struct StridedSliceFunctor { ...@@ -144,6 +143,7 @@ struct StridedSliceFunctor {
} }
} }
SetFutureDefaultWaitFn(future);
return MACE_SUCCESS; return MACE_SUCCESS;
} }
......
...@@ -95,9 +95,9 @@ struct WinogradInverseTransformFunctorBase { ...@@ -95,9 +95,9 @@ struct WinogradInverseTransformFunctorBase {
: batch_(batch), : batch_(batch),
height_(height), height_(height),
width_(width), width_(width),
wino_blk_size_(block_size),
activation_(activation), activation_(activation),
relux_max_limit_(relux_max_limit), relux_max_limit_(relux_max_limit) {}
wino_blk_size_(block_size) {}
const int batch_; const int batch_;
const int height_; const int height_;
......
...@@ -41,7 +41,10 @@ cc_library( ...@@ -41,7 +41,10 @@ cc_library(
["*.h"], ["*.h"],
exclude = ["ops_test_util.h"], exclude = ["ops_test_util.h"],
), ),
copts = if_openmp_enabled(["-fopenmp"]) + if_neon_enabled([ copts = [
"-Werror",
"-Wextra",
] + if_openmp_enabled(["-fopenmp"]) + if_neon_enabled([
"-DMACE_ENABLE_NEON", "-DMACE_ENABLE_NEON",
]) + if_android_armv7([ ]) + if_android_armv7([
"-mfpu=neon", "-mfpu=neon",
...@@ -64,7 +67,10 @@ cc_test( ...@@ -64,7 +67,10 @@ cc_test(
srcs = glob( srcs = glob(
["*_test.cc"], ["*_test.cc"],
), ),
copts = if_openmp_enabled(["-fopenmp"]) + if_neon_enabled([ copts = [
"-Werror",
"-Wextra",
] + if_openmp_enabled(["-fopenmp"]) + if_neon_enabled([
"-DMACE_ENABLE_NEON", "-DMACE_ENABLE_NEON",
]) + if_android_armv7([ ]) + if_android_armv7([
"-mfpu=neon", "-mfpu=neon",
...@@ -88,7 +94,10 @@ cc_test( ...@@ -88,7 +94,10 @@ cc_test(
name = "ops_benchmark", name = "ops_benchmark",
testonly = 1, testonly = 1,
srcs = glob(["*_benchmark.cc"]), srcs = glob(["*_benchmark.cc"]),
copts = if_openmp_enabled(["-fopenmp"]) + if_neon_enabled([ copts = [
"-Werror",
"-Wextra",
] + if_openmp_enabled(["-fopenmp"]) + if_neon_enabled([
"-DMACE_ENABLE_NEON", "-DMACE_ENABLE_NEON",
]) + if_android_armv7([ ]) + if_android_armv7([
"-mfpu=neon", "-mfpu=neon",
......
...@@ -32,6 +32,7 @@ class IdentityOp : public Operator<D, T> { ...@@ -32,6 +32,7 @@ class IdentityOp : public Operator<D, T> {
const Tensor *input = this->Input(INPUT); const Tensor *input = this->Input(INPUT);
Tensor *output = this->Output(OUTPUT); Tensor *output = this->Output(OUTPUT);
output->ReuseTensorBuffer(*input); output->ReuseTensorBuffer(*input);
SetFutureDefaultWaitFn(future);
return MACE_SUCCESS; return MACE_SUCCESS;
} }
......
...@@ -42,6 +42,7 @@ class ShapeOp : public Operator<D, T> { ...@@ -42,6 +42,7 @@ class ShapeOp : public Operator<D, T> {
for (index_t i = 0; i < input->dim_size(); ++i) { for (index_t i = 0; i < input->dim_size(); ++i) {
output_data[i] = input->dim(i); output_data[i] = input->dim(i);
} }
SetFutureDefaultWaitFn(future);
return MACE_SUCCESS; return MACE_SUCCESS;
} }
......
...@@ -31,8 +31,6 @@ class SqueezeOp : public Operator<D, T> { ...@@ -31,8 +31,6 @@ class SqueezeOp : public Operator<D, T> {
axis_(OperatorBase::GetRepeatedArgs<int>("axis", {})) {} axis_(OperatorBase::GetRepeatedArgs<int>("axis", {})) {}
MaceStatus Run(StatsFuture *future) override { MaceStatus Run(StatsFuture *future) override {
MACE_UNUSED(future);
const Tensor *input = this->Input(INPUT); const Tensor *input = this->Input(INPUT);
Tensor *output = this->Output(OUTPUT); Tensor *output = this->Output(OUTPUT);
...@@ -47,6 +45,7 @@ class SqueezeOp : public Operator<D, T> { ...@@ -47,6 +45,7 @@ class SqueezeOp : public Operator<D, T> {
output->ReuseTensorBuffer(*input); output->ReuseTensorBuffer(*input);
output->Reshape(output_shape); output->Reshape(output_shape);
SetFutureDefaultWaitFn(future);
return MACE_SUCCESS; return MACE_SUCCESS;
} }
......
...@@ -33,7 +33,6 @@ class StackOp : public Operator<D, T> { ...@@ -33,7 +33,6 @@ class StackOp : public Operator<D, T> {
MaceStatus Run(StatsFuture *future) override { MaceStatus Run(StatsFuture *future) override {
const std::vector<const Tensor *> &inputs = this->Inputs(); const std::vector<const Tensor *> &inputs = this->Inputs();
Tensor *output = this->Output(OUTPUT); Tensor *output = this->Output(OUTPUT);
return functor_(inputs, output, future); return functor_(inputs, output, future);
} }
......
...@@ -29,7 +29,7 @@ void TestStack(const std::vector<index_t> &input_shape, ...@@ -29,7 +29,7 @@ void TestStack(const std::vector<index_t> &input_shape,
const std::vector<index_t> &output_shape, const std::vector<index_t> &output_shape,
const std::vector<float> &output) { const std::vector<float> &output) {
OpsTestNet net; OpsTestNet net;
for (int i = 0; i < inputs.size(); ++i) { for (size_t i = 0; i < inputs.size(); ++i) {
net.AddInputFromArray<CPU, float>(MakeString("Input", i), input_shape, net.AddInputFromArray<CPU, float>(MakeString("Input", i), input_shape,
inputs[i]); inputs[i]);
} }
...@@ -38,7 +38,7 @@ void TestStack(const std::vector<index_t> &input_shape, ...@@ -38,7 +38,7 @@ void TestStack(const std::vector<index_t> &input_shape,
.Output("Output") .Output("Output")
.AddIntArg("axis", axis); .AddIntArg("axis", axis);
for (int i = 0; i < inputs.size(); ++i) { for (size_t i = 0; i < inputs.size(); ++i) {
op_builder.Input(MakeString("Input", i)); op_builder.Input(MakeString("Input", i));
} }
op_builder.Finalize(net.NewOperatorDef()); op_builder.Finalize(net.NewOperatorDef());
......
...@@ -4,7 +4,10 @@ load("//mace:mace.bzl", "if_openmp_enabled", "if_android") ...@@ -4,7 +4,10 @@ load("//mace:mace.bzl", "if_openmp_enabled", "if_android")
cc_binary( cc_binary(
name = "mace_run_static", name = "mace_run_static",
srcs = ["mace_run.cc"], srcs = ["mace_run.cc"],
copts = if_android([ copts = [
"-Werror",
"-Wextra",
] + if_android([
"-DMACE_ENABLE_OPENCL", "-DMACE_ENABLE_OPENCL",
]), ]),
linkopts = if_openmp_enabled(["-fopenmp"]), linkopts = if_openmp_enabled(["-fopenmp"]),
...@@ -19,7 +22,10 @@ cc_binary( ...@@ -19,7 +22,10 @@ cc_binary(
cc_binary( cc_binary(
name = "mace_run_shared", name = "mace_run_shared",
srcs = ["mace_run.cc"], srcs = ["mace_run.cc"],
copts = if_android([ copts = [
"-Werror",
"-Wextra",
] + if_android([
"-DMACE_ENABLE_OPENCL", "-DMACE_ENABLE_OPENCL",
]), ]),
linkopts = ["-lm", "-pie", "-fPIE"] + if_openmp_enabled(["-fopenmp"]), linkopts = ["-lm", "-pie", "-fPIE"] + if_openmp_enabled(["-fopenmp"]),
......
...@@ -1045,7 +1045,7 @@ def run_specific_target(flags, configs, target_abi, ...@@ -1045,7 +1045,7 @@ def run_specific_target(flags, configs, target_abi,
def run_mace(flags): def run_mace(flags):
configs = format_model_config(flags.config) configs = format_model_config(flags)
target_socs = configs[YAMLKeyword.target_socs] target_socs = configs[YAMLKeyword.target_socs]
if not target_socs or ALL_SOC_TAG in target_socs: if not target_socs or ALL_SOC_TAG in target_socs:
...@@ -1159,7 +1159,7 @@ def bm_specific_target(flags, configs, target_abi, target_soc, serial_num): ...@@ -1159,7 +1159,7 @@ def bm_specific_target(flags, configs, target_abi, target_soc, serial_num):
def benchmark_model(flags): def benchmark_model(flags):
configs = format_model_config(flags.config) configs = format_model_config(flags)
target_socs = configs[YAMLKeyword.target_socs] target_socs = configs[YAMLKeyword.target_socs]
if not target_socs or ALL_SOC_TAG in target_socs: if not target_socs or ALL_SOC_TAG in target_socs:
...@@ -1211,6 +1211,16 @@ def parse_args(): ...@@ -1211,6 +1211,16 @@ def parse_args():
default="", default="",
required=True, required=True,
help="model yaml configuration file path") help="model yaml configuration file path")
all_type_parent_parser.add_argument(
"--build_type",
type=str,
default="",
help="Model build type, can be ['proto', 'code'].")
all_type_parent_parser.add_argument(
"--target_abis",
type=str,
default="",
help="Target ABIs, comma seperated list.")
build_run_parent_parser = argparse.ArgumentParser(add_help=False) build_run_parent_parser = argparse.ArgumentParser(add_help=False)
build_run_parent_parser.add_argument( build_run_parent_parser.add_argument(
'--address_sanitizer', '--address_sanitizer',
...@@ -1253,16 +1263,6 @@ def parse_args(): ...@@ -1253,16 +1263,6 @@ def parse_args():
"--enable_openmp", "--enable_openmp",
action="store_false", action="store_false",
help="Enable openmp for multiple thread.") help="Enable openmp for multiple thread.")
build.add_argument(
"--build_type",
type=str,
default="",
help="Model build type, can be ['proto', 'code'].")
build.add_argument(
"--target_abis",
type=str,
default="",
help="Target ABIs, comma seperated list.")
run = subparsers.add_parser( run = subparsers.add_parser(
'run', 'run',
parents=[all_type_parent_parser, run_bm_parent_parser, parents=[all_type_parent_parser, run_bm_parent_parser,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册