未验证 提交 60fedae3 编写于 作者: N nihui 提交者: GitHub

fix pnnx ghost reshape shape expression inputs, fix intmax overflow on fuse/eval expression (#4923)

上级 0a8cf31a
......@@ -132,6 +132,8 @@ void pass_level1(const torch::jit::Module& mod, const std::shared_ptr<torch::jit
// sub_mod.dump(true, true, true);
op->attrs["data"] = sub_mod.attr(name).toTensor();
op->outputs[0]->type = op->attrs["data"].type;
op->outputs[0]->shape = op->attrs["data"].shape;
}
}
else if (n->kind() == c10::prim::Constant) // || n->kind() == c10::prim::ListConstruct)
......
......@@ -251,6 +251,9 @@ static void fuse_expression(Graph& graph, Operand* operand, std::string& expr, s
int64_t v;
zip.read_file(operand->name, (char*)&v);
if (v == std::numeric_limits<int64_t>::max()) v = INT_MAX;
if (v == std::numeric_limits<int64_t>::min()) v = INT_MIN;
char tmp[32];
sprintf(tmp, "%ld", v);
expr += tmp;
......
......@@ -165,7 +165,10 @@ void eliminate_reshape_shape_expression(Graph& graph)
if (op_expr->outputs[0]->consumers.size() == 0)
{
// remove expression operator
op_expr->inputs[0]->remove_consumer(op_expr);
for (auto x : op_expr->inputs)
{
x->remove_consumer(op_expr);
}
Operand* op_expr_out = op_expr->outputs[0];
......
......@@ -193,6 +193,11 @@ static std::string eval_expression(const Operator* op)
if (t == "int")
{
int r = int(af);
if (token_is_interger_literal(a))
{
r = std::stoi(a);
}
exprstack.push(std::to_string(r));
}
if (t == "abs")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册