diff --git a/docs/micro-controllers/basic_usage.rst b/docs/micro-controllers/basic_usage.rst index f2a510b7292962563f5f4232f7261093f63b51d1..f73f6c864df60653f9cfe4b268692c8adfff0775 100644 --- a/docs/micro-controllers/basic_usage.rst +++ b/docs/micro-controllers/basic_usage.rst @@ -52,17 +52,17 @@ Here we use the har-cnn model as an example. .. code-block:: sh - # copy convert result to micro dir ``path/to/micro`` + # copy convert result to micro dir ``path/to/micro``, which should not be the sub directory of the ``mace/``. cp build/har-cnn/model/har_cnn_micro.tar.gz path/to/micro/ cd path/to/micro tar zxvf har_cnn_micro.tar.gz - bazel build //micro/codegen:micro_engine + bazel build //micro/codegen:libmicro.so .. note:: - This step can be skipped if you just want to run a model using ``tools/python/run_micro.py``, such as commands in step 5. - - The build result ``bazel-bin/micro/codegen/libmicro_engine.so``'s abi is host, if you want to run the model on micro controllers, you should build the code with the target abi. + - The build result ``bazel-bin/micro/codegen/libmicro.so``'s abi is host, if you want to run the model on micro controllers, you should build the code with the target abi. 5. Run the model on host. diff --git a/mace/core/buffer.h b/mace/core/buffer.h index 49be42179b1e38a17b3f5c2f21f94c82edf88a68..99f3613c650cf2255531e0df25c6361d2c9a5f79 100644 --- a/mace/core/buffer.h +++ b/mace/core/buffer.h @@ -224,7 +224,7 @@ class Buffer : public BufferBase { } void Clear(index_t size) { - memset(reinterpret_cast(raw_mutable_data()), 0, size); + memset(reinterpret_cast(raw_mutable_data()), 0, size); } const std::vector shape() const { diff --git a/mace/core/runtime/hexagon/hexagon_hta_wrapper.cc b/mace/core/runtime/hexagon/hexagon_hta_wrapper.cc index a6b25939a7a84c533efcb88b72d024b988cf7fe8..ed70e48ecc2bd84f70c57a5ec1f03b140743b6df 100644 --- a/mace/core/runtime/hexagon/hexagon_hta_wrapper.cc +++ b/mace/core/runtime/hexagon/hexagon_hta_wrapper.cc @@ -244,7 +244,7 @@ bool HexagonHTAWrapper::SetupGraph(const NetDef &net_def, auto quantized_tensor = make_unique(allocator_, DT_UINT8); auto hta_tensor = make_unique(allocator_, DT_UINT8); hexagon_hta_nn_hw_tensordef &input_tensordef = input_tensordef_[index]; - memset(&input_tensordef, 0, sizeof(input_tensordef)); + memset(static_cast(&input_tensordef), 0, sizeof(input_tensordef)); MACE_CHECK(hexagon_hta_nn_get_memory_layout(nn_id_, 0, index, &input_tensordef) == 0); input_tensordef.dataLen = input_tensordef.batchStride; @@ -290,7 +290,7 @@ bool HexagonHTAWrapper::SetupGraph(const NetDef &net_def, quantized_tensor->SetZeroPoint(output_info.zero_point()); hexagon_hta_nn_hw_tensordef &output_tensordef = output_tensordef_[index]; - memset(&output_tensordef, 0, sizeof(output_tensordef)); + memset(static_cast(&output_tensordef), 0, sizeof(output_tensordef)); MACE_CHECK(hexagon_hta_nn_get_memory_layout(nn_id_, 1, index, &output_tensordef) == 0); output_tensordef.dataLen = output_tensordef.batchStride; diff --git a/mace/ops/addn.cc b/mace/ops/addn.cc index 4121d49ff27afdb81624b088512ad20d2b5ced5f..4d674f105638bade92f0c772aa71757de4b86f82 100644 --- a/mace/ops/addn.cc +++ b/mace/ops/addn.cc @@ -47,7 +47,7 @@ class AddNOp : public Operation { Tensor::MappingGuard output_guard(output); auto output_data = output->mutable_data(); - memset(output_data, 0, size * sizeof(T)); + memset(static_cast(output_data), 0, size * sizeof(T)); for (auto &input : inputs_) { Tensor::MappingGuard input_guard(input); diff --git a/mace/ops/arm/base/gemm.h b/mace/ops/arm/base/gemm.h index b2320a71d95842c96fd562413f116516bd0c0c87..ec6cc3180df5f12c0127b3c06d77beb549e211e3 100644 --- a/mace/ops/arm/base/gemm.h +++ b/mace/ops/arm/base/gemm.h @@ -134,7 +134,8 @@ class Gemm : public delegator::Gemm { depth = rows; } const index_t depth_padded = RoundUp(depth, static_cast(4)); - memset(packed_matrix, 0, sizeof(T) * WidthBlockSize * depth_padded); + memset(static_cast(packed_matrix), 0, + sizeof(T) * WidthBlockSize * depth_padded); if (dst_major == ColMajor) { for (index_t c = 0; c < cols; ++c) { for (index_t r = 0; r < rows; ++r) { diff --git a/mace/ops/cumsum.cc b/mace/ops/cumsum.cc index 41230ea25b3880694ce370096d9fd51c636984d7..c35cb971c25262c7c61b1b0ffbd5f209736a65e5 100644 --- a/mace/ops/cumsum.cc +++ b/mace/ops/cumsum.cc @@ -84,7 +84,7 @@ class CumsumOp : public Operation { for (index_t cum_idx = 0; cum_idx < cum_size; ++cum_idx) { if (cum_idx == 0) { if (exclusive_) { - std::memset(output_ptr + start_idx, + std::memset(static_cast(output_ptr + start_idx), 0, sizeof(T) * inner_size); } else { @@ -111,7 +111,7 @@ class CumsumOp : public Operation { index_t cur_idx = start_idx + cum_idx * inner_size; if (cum_idx == cum_size - 1) { if (exclusive_) { - std::memset(output_ptr + cur_idx, + std::memset(static_cast(output_ptr + cur_idx), 0, sizeof(T) * inner_size); } else { diff --git a/mace/ops/reduce.cc b/mace/ops/reduce.cc index a9b58633498355f21d93292f32e27be54ecb62ec..0141b05c42ddc6b13ac59c87da596e794cd2d58c 100644 --- a/mace/ops/reduce.cc +++ b/mace/ops/reduce.cc @@ -590,7 +590,7 @@ class ReduceOp : public ReduceOpBase { const T *input_ptr = input->data(); Tensor::MappingGuard output_map(output); T *output_ptr = output->mutable_data(); - memset(output_ptr, 0, output->size() * sizeof(T)); + memset(static_cast(output_ptr), 0, output->size() * sizeof(T)); switch (data_reshape_.size()) { case 1:Reduce1Dims(context, input_ptr, reduce_type_, output_ptr); break; diff --git a/mace/ops/space_to_batch.cc b/mace/ops/space_to_batch.cc index 641d746f46c95df1f6aee8ac8d4757b2b78732b9..f53e638594e2ccd374eea3b95ae659ebeb00b9b0 100644 --- a/mace/ops/space_to_batch.cc +++ b/mace/ops/space_to_batch.cc @@ -163,13 +163,13 @@ class SpaceToBatchNDOp : public SpaceToBatchOpBase { T *output_base = output_data + (b * channels + c) * out_height * out_width; - memset(output_base + block_h * out_width, + memset(static_cast(output_base + block_h * out_width), 0, (valid_h_start - block_h) * out_width * sizeof(T)); index_t in_h = valid_h_start * block_shape_h + tile_h - pad_top; for (index_t h = valid_h_start; h < valid_h_end; ++h) { - memset(output_base + h * out_width, + memset(static_cast(output_base + h * out_width), 0, valid_w_start * sizeof(T)); @@ -181,12 +181,12 @@ class SpaceToBatchNDOp : public SpaceToBatchOpBase { } // w in_h += block_shape_h; - memset(output_base + h * out_width + valid_w_end, - 0, - (out_width - valid_w_end) * sizeof(T)); + memset( + static_cast(output_base + h * out_width + valid_w_end), + 0, (out_width - valid_w_end) * sizeof(T)); } // h - memset(output_base + valid_h_end * out_width, + memset(static_cast(output_base + valid_h_end * out_width), 0, (std::min(out_height, block_h + block_h_size) - valid_h_end) * out_width * sizeof(T)); diff --git a/mace/ops/sqrdiff_mean.cc b/mace/ops/sqrdiff_mean.cc index 901b52aba9360ff350e04f302c60ec52e1fcb5b0..e219bc78f7f2e0ae6e6810aec02026fdae685797 100644 --- a/mace/ops/sqrdiff_mean.cc +++ b/mace/ops/sqrdiff_mean.cc @@ -62,7 +62,7 @@ class SqrDiffMeanOp : public Operation { const T *input_ptr1 = input1->data(); Tensor::MappingGuard output_map(output); T *output_ptr = output->mutable_data(); - memset(output_ptr, 0, output->size() * sizeof(T)); + memset(static_cast(output_ptr), 0, output->size() * sizeof(T)); const index_t img_size = input0->dim(2) * input0->dim(3); const index_t bc = input0->dim(0) * input0->dim(1); diff --git a/micro/codegen/BUILD.bazel b/micro/codegen/BUILD.bazel index e094f07b6b51488173bda159633f4d0f759dc440..fb3b54a0c5f55dd403d13787853ce4e2042846e7 100644 --- a/micro/codegen/BUILD.bazel +++ b/micro/codegen/BUILD.bazel @@ -81,18 +81,22 @@ genrule( "//micro/include", "//micro/model", "//micro/ops", + "//micro/base", + "//micro/port", ], outs = ["libmicro.a"], cmd = "tmp_mri_file=$$(mktemp micro-static-lib-mri.XXXXXXXXXX);" + "mri_stream=$$(python $(location //mace/python/tools:archive_static_lib) " + "$(locations micro_engine) " + "$(locations generated_models) " + + "$(locations //micro/base) " + "$(locations //micro/framework) " + "$(locations //micro/model) " + - "$(locations ////micro/ops) " + + "$(locations //micro/ops) " + + "$(locations //micro/port) " + "$@ " + "$$tmp_mri_file);" + - "$(AR) -M <$$tmp_mri_file;" + + "$(AR) -M <$$tmp_mri_file;" + "rm -rf $$tmp_mri_file;", tools = ["//mace/python/tools:archive_static_lib"], visibility = ["//visibility:public"], diff --git a/tools/python/micro/jinja2_files/micro_engine_factory.cc.jinja2 b/tools/python/micro/jinja2_files/micro_engine_factory.cc.jinja2 index c0c5c71a3d4f42bdefa13860b5764bdbc0480039..797b2cac062633266d755ff27d947f2ab4c20ed6 100644 --- a/tools/python/micro/jinja2_files/micro_engine_factory.cc.jinja2 +++ b/tools/python/micro/jinja2_files/micro_engine_factory.cc.jinja2 @@ -32,6 +32,7 @@ MaceStatus GetMicroEngineSingleton(MaceMicroEngine **engine) { if (!kHasInit) { MaceMicroEngineConfig *engine_config = GetMicroEngineConfig(); status = kMaceMicroEngine.Init(engine_config); + kHasInit = (status == MACE_SUCCESS); } if (status == MACE_SUCCESS) { *engine = &kMaceMicroEngine;