From 144229c485564c0140012fb73b13f380b179fc36 Mon Sep 17 00:00:00 2001 From: luxuhui Date: Wed, 10 Jun 2020 09:49:20 +0800 Subject: [PATCH] fix: fix error in micro static lib building and warning from `memset` N/A Signed-off-by: Luxuhui --- docs/micro-controllers/basic_usage.rst | 6 +++--- mace/core/buffer.h | 2 +- mace/core/runtime/hexagon/hexagon_hta_wrapper.cc | 4 ++-- mace/ops/addn.cc | 2 +- mace/ops/arm/base/gemm.h | 3 ++- mace/ops/cumsum.cc | 4 ++-- mace/ops/reduce.cc | 2 +- mace/ops/space_to_batch.cc | 12 ++++++------ mace/ops/sqrdiff_mean.cc | 2 +- micro/codegen/BUILD.bazel | 8 ++++++-- .../jinja2_files/micro_engine_factory.cc.jinja2 | 1 + 11 files changed, 26 insertions(+), 20 deletions(-) diff --git a/docs/micro-controllers/basic_usage.rst b/docs/micro-controllers/basic_usage.rst index f2a510b7..f73f6c86 100644 --- a/docs/micro-controllers/basic_usage.rst +++ b/docs/micro-controllers/basic_usage.rst @@ -52,17 +52,17 @@ Here we use the har-cnn model as an example. .. code-block:: sh - # copy convert result to micro dir ``path/to/micro`` + # copy convert result to micro dir ``path/to/micro``, which should not be the sub directory of the ``mace/``. cp build/har-cnn/model/har_cnn_micro.tar.gz path/to/micro/ cd path/to/micro tar zxvf har_cnn_micro.tar.gz - bazel build //micro/codegen:micro_engine + bazel build //micro/codegen:libmicro.so .. note:: - This step can be skipped if you just want to run a model using ``tools/python/run_micro.py``, such as commands in step 5. - - The build result ``bazel-bin/micro/codegen/libmicro_engine.so``'s abi is host, if you want to run the model on micro controllers, you should build the code with the target abi. + - The build result ``bazel-bin/micro/codegen/libmicro.so``'s abi is host, if you want to run the model on micro controllers, you should build the code with the target abi. 5. Run the model on host. diff --git a/mace/core/buffer.h b/mace/core/buffer.h index 49be4217..99f3613c 100644 --- a/mace/core/buffer.h +++ b/mace/core/buffer.h @@ -224,7 +224,7 @@ class Buffer : public BufferBase { } void Clear(index_t size) { - memset(reinterpret_cast(raw_mutable_data()), 0, size); + memset(reinterpret_cast(raw_mutable_data()), 0, size); } const std::vector shape() const { diff --git a/mace/core/runtime/hexagon/hexagon_hta_wrapper.cc b/mace/core/runtime/hexagon/hexagon_hta_wrapper.cc index a6b25939..ed70e48e 100644 --- a/mace/core/runtime/hexagon/hexagon_hta_wrapper.cc +++ b/mace/core/runtime/hexagon/hexagon_hta_wrapper.cc @@ -244,7 +244,7 @@ bool HexagonHTAWrapper::SetupGraph(const NetDef &net_def, auto quantized_tensor = make_unique(allocator_, DT_UINT8); auto hta_tensor = make_unique(allocator_, DT_UINT8); hexagon_hta_nn_hw_tensordef &input_tensordef = input_tensordef_[index]; - memset(&input_tensordef, 0, sizeof(input_tensordef)); + memset(static_cast(&input_tensordef), 0, sizeof(input_tensordef)); MACE_CHECK(hexagon_hta_nn_get_memory_layout(nn_id_, 0, index, &input_tensordef) == 0); input_tensordef.dataLen = input_tensordef.batchStride; @@ -290,7 +290,7 @@ bool HexagonHTAWrapper::SetupGraph(const NetDef &net_def, quantized_tensor->SetZeroPoint(output_info.zero_point()); hexagon_hta_nn_hw_tensordef &output_tensordef = output_tensordef_[index]; - memset(&output_tensordef, 0, sizeof(output_tensordef)); + memset(static_cast(&output_tensordef), 0, sizeof(output_tensordef)); MACE_CHECK(hexagon_hta_nn_get_memory_layout(nn_id_, 1, index, &output_tensordef) == 0); output_tensordef.dataLen = output_tensordef.batchStride; diff --git a/mace/ops/addn.cc b/mace/ops/addn.cc index 4121d49f..4d674f10 100644 --- a/mace/ops/addn.cc +++ b/mace/ops/addn.cc @@ -47,7 +47,7 @@ class AddNOp : public Operation { Tensor::MappingGuard output_guard(output); auto output_data = output->mutable_data(); - memset(output_data, 0, size * sizeof(T)); + memset(static_cast(output_data), 0, size * sizeof(T)); for (auto &input : inputs_) { Tensor::MappingGuard input_guard(input); diff --git a/mace/ops/arm/base/gemm.h b/mace/ops/arm/base/gemm.h index b2320a71..ec6cc318 100644 --- a/mace/ops/arm/base/gemm.h +++ b/mace/ops/arm/base/gemm.h @@ -134,7 +134,8 @@ class Gemm : public delegator::Gemm { depth = rows; } const index_t depth_padded = RoundUp(depth, static_cast(4)); - memset(packed_matrix, 0, sizeof(T) * WidthBlockSize * depth_padded); + memset(static_cast(packed_matrix), 0, + sizeof(T) * WidthBlockSize * depth_padded); if (dst_major == ColMajor) { for (index_t c = 0; c < cols; ++c) { for (index_t r = 0; r < rows; ++r) { diff --git a/mace/ops/cumsum.cc b/mace/ops/cumsum.cc index 41230ea2..c35cb971 100644 --- a/mace/ops/cumsum.cc +++ b/mace/ops/cumsum.cc @@ -84,7 +84,7 @@ class CumsumOp : public Operation { for (index_t cum_idx = 0; cum_idx < cum_size; ++cum_idx) { if (cum_idx == 0) { if (exclusive_) { - std::memset(output_ptr + start_idx, + std::memset(static_cast(output_ptr + start_idx), 0, sizeof(T) * inner_size); } else { @@ -111,7 +111,7 @@ class CumsumOp : public Operation { index_t cur_idx = start_idx + cum_idx * inner_size; if (cum_idx == cum_size - 1) { if (exclusive_) { - std::memset(output_ptr + cur_idx, + std::memset(static_cast(output_ptr + cur_idx), 0, sizeof(T) * inner_size); } else { diff --git a/mace/ops/reduce.cc b/mace/ops/reduce.cc index a9b58633..0141b05c 100644 --- a/mace/ops/reduce.cc +++ b/mace/ops/reduce.cc @@ -590,7 +590,7 @@ class ReduceOp : public ReduceOpBase { const T *input_ptr = input->data(); Tensor::MappingGuard output_map(output); T *output_ptr = output->mutable_data(); - memset(output_ptr, 0, output->size() * sizeof(T)); + memset(static_cast(output_ptr), 0, output->size() * sizeof(T)); switch (data_reshape_.size()) { case 1:Reduce1Dims(context, input_ptr, reduce_type_, output_ptr); break; diff --git a/mace/ops/space_to_batch.cc b/mace/ops/space_to_batch.cc index 641d746f..f53e6385 100644 --- a/mace/ops/space_to_batch.cc +++ b/mace/ops/space_to_batch.cc @@ -163,13 +163,13 @@ class SpaceToBatchNDOp : public SpaceToBatchOpBase { T *output_base = output_data + (b * channels + c) * out_height * out_width; - memset(output_base + block_h * out_width, + memset(static_cast(output_base + block_h * out_width), 0, (valid_h_start - block_h) * out_width * sizeof(T)); index_t in_h = valid_h_start * block_shape_h + tile_h - pad_top; for (index_t h = valid_h_start; h < valid_h_end; ++h) { - memset(output_base + h * out_width, + memset(static_cast(output_base + h * out_width), 0, valid_w_start * sizeof(T)); @@ -181,12 +181,12 @@ class SpaceToBatchNDOp : public SpaceToBatchOpBase { } // w in_h += block_shape_h; - memset(output_base + h * out_width + valid_w_end, - 0, - (out_width - valid_w_end) * sizeof(T)); + memset( + static_cast(output_base + h * out_width + valid_w_end), + 0, (out_width - valid_w_end) * sizeof(T)); } // h - memset(output_base + valid_h_end * out_width, + memset(static_cast(output_base + valid_h_end * out_width), 0, (std::min(out_height, block_h + block_h_size) - valid_h_end) * out_width * sizeof(T)); diff --git a/mace/ops/sqrdiff_mean.cc b/mace/ops/sqrdiff_mean.cc index 901b52ab..e219bc78 100644 --- a/mace/ops/sqrdiff_mean.cc +++ b/mace/ops/sqrdiff_mean.cc @@ -62,7 +62,7 @@ class SqrDiffMeanOp : public Operation { const T *input_ptr1 = input1->data(); Tensor::MappingGuard output_map(output); T *output_ptr = output->mutable_data(); - memset(output_ptr, 0, output->size() * sizeof(T)); + memset(static_cast(output_ptr), 0, output->size() * sizeof(T)); const index_t img_size = input0->dim(2) * input0->dim(3); const index_t bc = input0->dim(0) * input0->dim(1); diff --git a/micro/codegen/BUILD.bazel b/micro/codegen/BUILD.bazel index e094f07b..fb3b54a0 100644 --- a/micro/codegen/BUILD.bazel +++ b/micro/codegen/BUILD.bazel @@ -81,18 +81,22 @@ genrule( "//micro/include", "//micro/model", "//micro/ops", + "//micro/base", + "//micro/port", ], outs = ["libmicro.a"], cmd = "tmp_mri_file=$$(mktemp micro-static-lib-mri.XXXXXXXXXX);" + "mri_stream=$$(python $(location //mace/python/tools:archive_static_lib) " + "$(locations micro_engine) " + "$(locations generated_models) " + + "$(locations //micro/base) " + "$(locations //micro/framework) " + "$(locations //micro/model) " + - "$(locations ////micro/ops) " + + "$(locations //micro/ops) " + + "$(locations //micro/port) " + "$@ " + "$$tmp_mri_file);" + - "$(AR) -M <$$tmp_mri_file;" + + "$(AR) -M <$$tmp_mri_file;" + "rm -rf $$tmp_mri_file;", tools = ["//mace/python/tools:archive_static_lib"], visibility = ["//visibility:public"], diff --git a/tools/python/micro/jinja2_files/micro_engine_factory.cc.jinja2 b/tools/python/micro/jinja2_files/micro_engine_factory.cc.jinja2 index c0c5c71a..797b2cac 100644 --- a/tools/python/micro/jinja2_files/micro_engine_factory.cc.jinja2 +++ b/tools/python/micro/jinja2_files/micro_engine_factory.cc.jinja2 @@ -32,6 +32,7 @@ MaceStatus GetMicroEngineSingleton(MaceMicroEngine **engine) { if (!kHasInit) { MaceMicroEngineConfig *engine_config = GetMicroEngineConfig(); status = kMaceMicroEngine.Init(engine_config); + kHasInit = (status == MACE_SUCCESS); } if (status == MACE_SUCCESS) { *engine = &kMaceMicroEngine; -- GitLab