diff --git a/mace/core/runtime/hexagon/hexagon_control_wrapper.cc b/mace/core/runtime/hexagon/hexagon_control_wrapper.cc index 4c35b29e5ce7c94787d15182b335904bdee5e37c..aa55fd0cd044e7e08455287adf454c9e703ab27b 100644 --- a/mace/core/runtime/hexagon/hexagon_control_wrapper.cc +++ b/mace/core/runtime/hexagon/hexagon_control_wrapper.cc @@ -64,29 +64,29 @@ bool HexagonControlWrapper::SetupGraph(const NetDef &net_def) { std::thread const_thread([&]() { std::cout << "thread function\n"; std::vector const_node_list; - for (const ConstTensor &tensor_proto: net_def.tensors()) { - std::vector tensor_shape(tensor_proto.dims().begin(), - tensor_proto.dims().end()); + for (const ConstTensor &const_tensor: net_def.tensors()) { + std::vector tensor_shape(const_tensor.dims().begin(), + const_tensor.dims().end()); while (tensor_shape.size() < 4) { tensor_shape.insert(tensor_shape.begin(), 1); } hexagon_nn_const_node const_node; - const_node.node_id = node_id(tensor_proto.node_id()); + const_node.node_id = node_id(const_tensor.node_id()); const_node.tensor.batches = tensor_shape[0]; const_node.tensor.height = tensor_shape[1]; const_node.tensor.width = tensor_shape[2]; const_node.tensor.depth = tensor_shape[3]; - if (tensor_proto.data_type() == DataType::DT_INT32 - && tensor_proto.data_size() == 0) { + if (const_tensor.data_type() == DataType::DT_INT32 + && const_tensor.data_size() == 0) { const_node.tensor.data = NULL; const_node.tensor.dataLen = 0; } else { const_node.tensor.data = - const_cast(tensor_proto.data()); + const_cast(const_tensor.data()); const_node.tensor.dataLen = - tensor_proto.data_size() * GetEnumTypeSize(tensor_proto.data_type()); + const_tensor.data_size() * GetEnumTypeSize(const_tensor.data_type()); } const_node_list.push_back(const_node); // 255 is magic number: why fastrpc limits sequence length to that? diff --git a/mace/core/serializer.cc b/mace/core/serializer.cc index 9b7a51bdad5fa41944ea443f37e9d465ca598c5d..c171205f75ad9f73673958a176957f92372a425d 100644 --- a/mace/core/serializer.cc +++ b/mace/core/serializer.cc @@ -12,56 +12,64 @@ std::unique_ptr Serializer::Serialize(const Tensor &tensor, return nullptr; } -std::unique_ptr Serializer::Deserialize(const ConstTensor &proto, +std::unique_ptr Serializer::Deserialize(const ConstTensor &const_tensor, DeviceType type) { std::unique_ptr tensor( - new Tensor(GetDeviceAllocator(type), proto.data_type())); + new Tensor(GetDeviceAllocator(type), const_tensor.data_type())); std::vector dims; - for (const index_t d : proto.dims()) { + for (const index_t d : const_tensor.dims()) { dims.push_back(d); } tensor->Resize(dims); - switch (proto.data_type()) { + switch (const_tensor.data_type()) { case DT_HALF: - tensor->Copy(reinterpret_cast(proto.data()), - proto.data_size()); + tensor->Copy(reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; case DT_FLOAT: - tensor->Copy(reinterpret_cast(proto.data()), - proto.data_size()); + tensor->Copy(reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; case DT_DOUBLE: - tensor->Copy(reinterpret_cast(proto.data()), - proto.data_size()); + tensor->Copy( + reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; case DT_INT32: - tensor->Copy(reinterpret_cast(proto.data()), - proto.data_size()); + tensor->Copy( + reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; case DT_INT64: - tensor->Copy(reinterpret_cast(proto.data()), - proto.data_size()); + tensor->Copy( + reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; case DT_UINT8: - tensor->Copy(reinterpret_cast(proto.data()), - proto.data_size()); + tensor->Copy( + reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; case DT_INT16: tensor->CopyWithCast( - reinterpret_cast(proto.data()), proto.data_size()); + reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; case DT_INT8: tensor->CopyWithCast( - reinterpret_cast(proto.data()), proto.data_size()); + reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; case DT_UINT16: tensor->CopyWithCast( - reinterpret_cast(proto.data()), proto.data_size()); + reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; case DT_BOOL: tensor->CopyWithCast( - reinterpret_cast(proto.data()), proto.data_size()); + reinterpret_cast(const_tensor.data()), + const_tensor.data_size()); break; default: MACE_NOT_IMPLEMENTED; diff --git a/mace/core/serializer.h b/mace/core/serializer.h index b2e905f93f06c2bd61b060ea6cbadd4b235a2bde..fcc98a7230d15fb106d0e3c7b9e4f23daf37fd0a 100644 --- a/mace/core/serializer.h +++ b/mace/core/serializer.h @@ -15,9 +15,11 @@ class Serializer { Serializer() {} ~Serializer() {} - std::unique_ptr Serialize(const Tensor &tensor, const std::string &name); + std::unique_ptr Serialize(const Tensor &tensor, + const std::string &name); - std::unique_ptr Deserialize(const ConstTensor &proto, DeviceType type); + std::unique_ptr Deserialize(const ConstTensor &const_tensor, + DeviceType type); DISABLE_COPY_AND_ASSIGN(Serializer); }; diff --git a/mace/core/tensor.h b/mace/core/tensor.h index 24990283db1cea39156db3137c861a8d323f40f1..0fcda89c6112f27d100fd5027858d70eea5260a2 100644 --- a/mace/core/tensor.h +++ b/mace/core/tensor.h @@ -324,12 +324,12 @@ class Tensor { } } } + MappingGuard(MappingGuard &&other) { tensor_ = other.tensor_; other.tensor_ = nullptr; } - MappingGuard(const MappingGuard &other) = delete; - MappingGuard & operator = (const MappingGuard &other) = delete; + ~MappingGuard() { if (tensor_ != nullptr) tensor_->Unmap(); } @@ -339,6 +339,8 @@ class Tensor { private: const Tensor *tensor_; std::vector mapped_image_pitch_; + + DISABLE_COPY_AND_ASSIGN(MappingGuard); }; private: diff --git a/mace/core/workspace.cc b/mace/core/workspace.cc index 29491e0fd63aa002892e151b588935083d6f163b..371ab2e06d71a3a0e836eae80db96dc8a5d24a36 100644 --- a/mace/core/workspace.cc +++ b/mace/core/workspace.cc @@ -72,15 +72,15 @@ Tensor *Workspace::GetTensor(const std::string &name) { void Workspace::LoadModelTensor(const NetDef &net_def, DeviceType type) { MACE_LATENCY_LOGGER(1, "Load model tensors"); Serializer serializer; - for (auto &tensor_proto : net_def.tensors()) { - MACE_LATENCY_LOGGER(2, "Load tensor ", tensor_proto.name()); - VLOG(3) << "Tensor name: " << tensor_proto.name() - << ", data type: " << tensor_proto.data_type() + for (auto &const_tensor : net_def.tensors()) { + MACE_LATENCY_LOGGER(2, "Load tensor ", const_tensor.name()); + VLOG(3) << "Tensor name: " << const_tensor.name() + << ", data type: " << const_tensor.data_type() << ", shape: " - << MakeString(std::vector(tensor_proto.dims().begin(), - tensor_proto.dims().end())); - tensor_map_[tensor_proto.name()] = - serializer.Deserialize(tensor_proto, type); + << MakeString(std::vector(const_tensor.dims().begin(), + const_tensor.dims().end())); + tensor_map_[const_tensor.name()] = + serializer.Deserialize(const_tensor, type); } if (type == DeviceType::OPENCL) { CreateImageOutputTensor(net_def);