提交 551cad49 编写于 作者: M Megvii Engine Team

refactor(megbrain): refactor try infer tensor layout in lite avoiding using megbrain interface

GitOrigin-RevId: 9799e671022002be719a4ee71ccca89d2ec318b8
上级 18d7a97c
......@@ -660,8 +660,10 @@ void NetworkImplDft::set_io(const NetworkIO& network_io) {
}
void NetworkImplDft::try_infer_tensor_layout(std::shared_ptr<Tensor> tensor, Var var) {
if (var.node()->capable_shape_infer()) {
using InferType = mgb::cg::static_infer::InferType;
auto&& static_infer_mgr = m_load_config.comp_graph->static_infer_manager();
if (static_infer_mgr.get_infer_type(var.node()).shape &
(InferType::CONST | InferType::RT_STATIC)) {
auto shape = static_infer_mgr.infer_shape_fallible(var.node());
if (!shape) {
LITE_WARN(
......
......@@ -596,18 +596,6 @@ bool VarNode::is_graph_dest_varnode() {
return ComputingGraphImpl::downcast(owner_graph())->var_receiver(this).size() == 0;
}
bool VarNode::capable_shape_infer() {
auto&& mgr =
ComputingGraphImpl::downcast(owner_graph())->static_infer_manager_impl();
return mgr.has_shape_infer(this);
}
bool VarNode::capable_value_infer() {
auto&& mgr =
ComputingGraphImpl::downcast(owner_graph())->static_infer_manager_impl();
return mgr.has_value_infer(this);
}
VarNode& VarNode::add_flag(Flag flag) {
modify_flag(flag, m_flag | flag);
return *this;
......
......@@ -488,16 +488,6 @@ public:
MGE_WIN_DECLSPEC_FUC MemAllocPlan& init_mem_plan(
const DeviceTensorND* fixed_alloc = nullptr);
/*!
* \brief check infer shape capablity by check m_static_infer_trait's shape infer
*/
MGE_WIN_DECLSPEC_FUC bool capable_shape_infer();
/*!
* \brief check infer shape capablity by check m_static_infer_trait's value infer
*/
MGE_WIN_DECLSPEC_FUC bool capable_value_infer();
//! whether the var is graph output, if it is output, the Flag of
//! NO_SYS_MEM_ALLOC can be modified.
MGE_WIN_DECLSPEC_FUC bool is_graph_dest_varnode();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册