提交 0b2676c6 编写于 作者: M Megvii Engine Team

fix(lite): fix lite profile bug when using packed model with mode info

GitOrigin-RevId: 707cb8834152be965124ef068399f9d1039e4dfd
上级 5207154e
...@@ -44,7 +44,6 @@ LITE_DYN_TYPE_OBJ_FINAL_IMPL(NetworkImplDft); ...@@ -44,7 +44,6 @@ LITE_DYN_TYPE_OBJ_FINAL_IMPL(NetworkImplDft);
void NetworkImplDft::set_config(const Config& config) { void NetworkImplDft::set_config(const Config& config) {
m_user_config = std::make_unique<Config>(); m_user_config = std::make_unique<Config>();
*m_user_config = config; *m_user_config = config;
m_load_config.comp_graph = mgb::ComputingGraph::make();
m_compnode_locator = to_compnode_locator(m_user_config->device_type); m_compnode_locator = to_compnode_locator(m_user_config->device_type);
m_compnode_locator.device = config.device_id; m_compnode_locator.device = config.device_id;
} }
......
...@@ -39,6 +39,7 @@ class NetworkImplDft final : public Network::NetworkImplBase { ...@@ -39,6 +39,7 @@ class NetworkImplDft final : public Network::NetworkImplBase {
LITE_DYN_TYPE_OBJ_FINAL_DECL; LITE_DYN_TYPE_OBJ_FINAL_DECL;
public: public:
NetworkImplDft() { m_load_config.comp_graph = mgb::ComputingGraph::make(); }
using S = megdnn::param::ExecutionPolicy::Strategy; using S = megdnn::param::ExecutionPolicy::Strategy;
//! set the config of the network, include: //! set the config of the network, include:
//! the inference device //! the inference device
......
...@@ -73,14 +73,14 @@ __attribute__((unused)) static std::shared_ptr<Tensor> get_input_data( ...@@ -73,14 +73,14 @@ __attribute__((unused)) static std::shared_ptr<Tensor> get_input_data(
std::vector<npy::ndarray_len_t> stl_shape; std::vector<npy::ndarray_len_t> stl_shape;
std::vector<int8_t> raw; std::vector<int8_t> raw;
npy::LoadArrayFromNumpy(path, type_str, stl_shape, raw); npy::LoadArrayFromNumpy(path, type_str, stl_shape, raw);
auto lite_tensor = std::make_shared<Tensor>(LiteDeviceType::LITE_CPU); auto lite_tensor = std::make_shared<Tensor>(LiteDeviceType::LITE_CPU);
Layout layout; Layout layout;
layout.ndim = stl_shape.size(); layout.ndim = stl_shape.size();
const std::map<std::string, LiteDataType> type_map = { const std::map<std::string, LiteDataType> type_map = {
{"f4", LiteDataType::LITE_FLOAT}, {"f4", LiteDataType::LITE_FLOAT}, {"f2", LiteDataType::LITE_HALF},
{"i4", LiteDataType::LITE_INT}, {"i8", LiteDataType::LITE_INT64}, {"i4", LiteDataType::LITE_INT},
{"i1", LiteDataType::LITE_INT8}, {"u4", LiteDataType::LITE_UINT}, {"i2", LiteDataType::LITE_INT16},
{"u2", LiteDataType::LITE_UINT16}, {"i1", LiteDataType::LITE_INT8},
{"u1", LiteDataType::LITE_UINT8}}; {"u1", LiteDataType::LITE_UINT8}};
layout.shapes[0] = 1; layout.shapes[0] = 1;
for (size_t i = 0; i < stl_shape.size(); i++) { for (size_t i = 0; i < stl_shape.size(); i++) {
......
...@@ -109,7 +109,7 @@ def main(): ...@@ -109,7 +109,7 @@ def main():
PackModel.PackModelStartModelsVector(builder, 1) PackModel.PackModelStartModelsVector(builder, 1)
builder.PrependUOffsetTRelative(model) builder.PrependUOffsetTRelative(model)
models = builder.EndVector(1) models = builder.EndVector()
PackModel.PackModelStart(builder) PackModel.PackModelStart(builder)
PackModel.PackModelAddModels(builder, models) PackModel.PackModelAddModels(builder, models)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册