提交 bc01a8e0 编写于 作者: L luxuhui

fix: fix the interface compatibility issue

N/A
Signed-off-by: NLuxuhui <luxuhui@xiaomi.com>
上级 d24a3ddd
......@@ -431,6 +431,12 @@ class MACE_API MaceEngine {
std::map<std::string, MaceTensor> *outputs,
RunMetadata *run_metadata);
// @Deprecated, will be removed in future version
MaceStatus Init(const NetDef *net_def,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes,
const unsigned char *model_data);
private:
class Impl;
std::unique_ptr<Impl> impl_;
......
......@@ -106,8 +106,10 @@ bool ApuWrapper::Init(const NetDef &net_def,
}
const auto tensor_end = const_tensor.offset() +
const_tensor->data_size() * GetEnumTypeSize(const_tensor.data_type());
MACE_CHECK(tensor_end <= model_data_size, "tensor_end (", tensor_end,
") should <= ", model_data_size);
if (model_data_size >= 0) {
MACE_CHECK(tensor_end <= model_data_size, "tensor_end (", tensor_end,
") should <= ", model_data_size);
}
tensor.data_buf =
const_cast<unsigned char *>(model_data + const_tensor.offset());
const_tensors.push_back(tensor);
......
......@@ -189,9 +189,11 @@ bool HexagonDSPWrapper::SetupGraph(const NetDef &net_def,
data = const_cast<unsigned char *>(model_data + const_tensor.offset());
data_len =
const_tensor.data_size() * GetEnumTypeSize(const_tensor.data_type());
MACE_CHECK(const_tensor.offset() + data_len <= model_data_size,
"tensor end (", const_tensor.offset() + data_len,
") should <= ", model_data_size);
if (model_data_size >= 0) {
MACE_CHECK(const_tensor.offset() + data_len <= model_data_size,
"tensor end (", const_tensor.offset() + data_len,
") should <= ", model_data_size);
}
}
MACE_CHECK(
hexagon_nn_append_const_node(nn_id_,
......
......@@ -160,9 +160,12 @@ bool HexagonHTAWrapper::SetupGraph(const NetDef &net_def,
const_cast<unsigned char *>(model_data + const_tensor.offset());
const_node_data_len = const_tensor.data_size() *
GetEnumTypeSize(const_tensor.data_type());
MACE_CHECK(const_tensor.offset() + const_node_data_len <= model_data_size,
"tensor end (", const_tensor.offset() + const_node_data_len,
") should <= ", model_data_size);
if (model_data_size >= 0) {
MACE_CHECK(
const_tensor.offset() + const_node_data_len <= model_data_size,
"tensor end (", const_tensor.offset() + const_node_data_len,
") should <= ", model_data_size);
}
}
hexagon_hta_nn_append_const_node(nn_id_,
......
......@@ -123,8 +123,10 @@ MaceStatus Workspace::LoadModelTensor(const NetDef &net_def, Device *device,
MACE_LATENCY_LOGGER(1, "Load model tensors");
index_t valid_data_size = GetModelValidSize(net_def);
VLOG(3) << "Model valid data size: " << valid_data_size;
MACE_CHECK(valid_data_size <= model_data_size,
valid_data_size, "should be smaller than", model_data_size);
if (model_data_size >= 0) {
MACE_CHECK(valid_data_size <= model_data_size,
valid_data_size, "should be smaller than", model_data_size);
}
const DeviceType device_type = device->device_type();
if (valid_data_size > 0) {
......@@ -170,8 +172,10 @@ MaceStatus Workspace::LoadModelTensor(const NetDef &net_def, Device *device,
"Tensor's data_size not equal with the shape");
const index_t tensor_end = const_tensor.offset() +
tensor->size() * GetEnumTypeSize(const_tensor.data_type());
MACE_CHECK(tensor_end <= model_data_size, "tensor_end (", tensor_end,
") should <= ", model_data_size);
if (model_data_size >= 0) {
MACE_CHECK(tensor_end <= model_data_size, "tensor_end (", tensor_end,
") should <= ", model_data_size);
}
if (device_type == DeviceType::CPU &&
const_tensor.data_type() == DataType::DT_HALF) {
......
......@@ -119,6 +119,8 @@ genrule(
]) + if_opencl_enabled([
"//mace/ops:opencl_kernels",
"//mace/codegen:generated_opencl",
]) + if_rpcmem_enabled([
"//third_party/rpcmem:rpcmem.a",
]) + if_neon_enabled([
"//mace/ops:arm_neon_kernels",
]),
......@@ -165,6 +167,10 @@ genrule(
"$(locations //mace/codegen:generated_opencl) ",
default_value = "",
) +
if_rpcmem_enabled(
"$(locations //third_party/rpcmem:rpcmem.a) ",
default_value = "",
) +
"$@ " +
"$$tmp_mri_file);" +
if_darwin(
......
......@@ -1032,6 +1032,13 @@ MaceStatus MaceEngine::Run(const std::map<std::string, MaceTensor> &inputs,
return impl_->Run(inputs, outputs, nullptr);
}
MaceStatus MaceEngine::Init(const NetDef *net_def,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes,
const unsigned char *model_data) {
return impl_->Init(net_def, input_nodes, output_nodes, model_data, -1);
}
MaceStatus CreateMaceEngineFromProto(
const unsigned char *model_graph_proto,
const size_t model_graph_proto_size,
......
......@@ -109,10 +109,10 @@ def if_bfloat16_enabled(a):
"//conditions:default": [],
})
def if_rpcmem_enabled(a):
def if_rpcmem_enabled(a, default_value = []):
return select({
"//mace:rpcmem_enabled": a,
"//conditions:default": [],
"//conditions:default": default_value,
})
def mace_version_genrule():
......
# These files are generated fron rpcmem project
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"])
......@@ -10,13 +13,20 @@ load(
"if_android_armv7",
)
cc_library(
name = "rpcmem",
filegroup(
name = "rpcmem.a",
srcs = if_android_armv7([
"armeabi-v7a/rpcmem.a",
]) + if_android_arm64([
"arm64-v8a/rpcmem.a",
]),
)
cc_library(
name = "rpcmem",
srcs = [
":rpcmem.a",
],
hdrs = [
"rpcmem.h",
],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册