提交 616cc80d 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!2378 fix tensor id bug and some yolov3 bug

Merge pull request !2378 from flywind/fix_yolov3_bug
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
namespace mindspore { namespace mindspore {
namespace tensor { namespace tensor {
static uint64_t count = 0;
void DataBuf2Contiguous(const py::array &src, py::array *const dest) { void DataBuf2Contiguous(const py::array &src, py::array *const dest) {
if (dest == nullptr) { if (dest == nullptr) {
MS_LOG(EXCEPTION) << "Failed to copy data to a contiguous buffer as dest is nullptr!"; MS_LOG(EXCEPTION) << "Failed to copy data to a contiguous buffer as dest is nullptr!";
...@@ -213,7 +214,7 @@ void Tensor::init(const py::array &input, const TypeId &data_type) { ...@@ -213,7 +214,7 @@ void Tensor::init(const py::array &input, const TypeId &data_type) {
data_ = input; data_ = input;
} }
dirty_ = true; dirty_ = true;
id_ = std::to_string((uintptr_t)(this)); id_ = std::to_string((uintptr_t)(this)) + std::to_string(count++);
} }
void Tensor::init(TypeId data_type, const std::vector<int> &shape, py::array *const data) { void Tensor::init(TypeId data_type, const std::vector<int> &shape, py::array *const data) {
...@@ -260,7 +261,7 @@ void Tensor::init(TypeId data_type, const std::vector<int> &shape, py::array *co ...@@ -260,7 +261,7 @@ void Tensor::init(TypeId data_type, const std::vector<int> &shape, py::array *co
MS_LOG(EXCEPTION) << "Cannot construct Tensor because of unsupported data type: " << data_type << "."; MS_LOG(EXCEPTION) << "Cannot construct Tensor because of unsupported data type: " << data_type << ".";
break; break;
} }
id_ = std::to_string((uintptr_t)(this)); id_ = std::to_string((uintptr_t)(this)) + std::to_string(count++);
} }
TypePtr Tensor::SetDtype(const TypePtr type_ptr) { TypePtr Tensor::SetDtype(const TypePtr type_ptr) {
......
...@@ -57,7 +57,7 @@ struct OpExecInfo { ...@@ -57,7 +57,7 @@ struct OpExecInfo {
py::dict op_attrs; py::dict op_attrs;
}; };
using OpExecInfoPtr = std::shared_ptr<OpExecInfo>; using OpExecInfoPtr = std::shared_ptr<OpExecInfo>;
OpExecInfoPtr GenerateOpExecInfo(const py::args &args); OpExecInfoPtr GenerateOpExecInfo(const py::args &args, py::list *const out_args);
const std::set<std::string> ignore_infer_prim = {"make_ref"}; const std::set<std::string> ignore_infer_prim = {"make_ref"};
} // namespace pynative } // namespace pynative
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
const char SINGLE_OP_GRAPH[] = "single_op_graph"; const char SINGLE_OP_GRAPH[] = "single_op_graph";
// primitive unable to infer value for constant input in PyNative mode // primitive unable to infer value for constant input in PyNative mode
const std::set<std::string> vm_operators = {"make_ref", "HookBackward"}; const std::set<std::string> vm_operators = {"make_ref", "HookBackward", "stop_gradient"};
namespace mindspore { namespace mindspore {
namespace pynative { namespace pynative {
...@@ -79,15 +79,12 @@ std::string GetId(const py::object &obj) { ...@@ -79,15 +79,12 @@ std::string GetId(const py::object &obj) {
if (p_list.size() == 0) { if (p_list.size() == 0) {
return "empty"; return "empty";
} }
to_process = p_list[0];
prefix = "tuple:"; prefix = "tuple:";
if (!py::isinstance<tensor::Tensor>(to_process)) { std::string key = "";
std::string key = ""; for (size_t i = 0; i < p_list.size(); ++i) {
for (size_t i = 0; i < p_list.size(); ++i) { key += std::string(py::str(GetId(p_list[i]))) + ":";
key += std::string(py::str(p_list[i])) + ":";
}
return prefix + key;
} }
return prefix + key;
} }
if (py::isinstance<py::int_>(to_process)) { if (py::isinstance<py::int_>(to_process)) {
return prefix + std::string(py::str(to_process)); return prefix + std::string(py::str(to_process));
...@@ -143,7 +140,8 @@ std::map<SignatureEnumDType, size_t> GetDstType(const py::tuple &py_args, ...@@ -143,7 +140,8 @@ std::map<SignatureEnumDType, size_t> GetDstType(const py::tuple &py_args,
return dst_type; return dst_type;
} }
py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tuple *const out_args) { py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tuple *const out_args,
py::list *out_args_list) {
auto &py_args = *out_args; auto &py_args = *out_args;
py::tuple input_mask(args.size()); py::tuple input_mask(args.size());
for (size_t i = 0; i < args.size(); ++i) { for (size_t i = 0; i < args.size(); ++i) {
...@@ -171,8 +169,10 @@ py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tu ...@@ -171,8 +169,10 @@ py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tu
auto tensor_ptr = py::cast<tensor::TensorPtr>(py_args[it->second]); auto tensor_ptr = py::cast<tensor::TensorPtr>(py_args[it->second]);
if (py::isinstance<py::int_>(py_args[i])) { if (py::isinstance<py::int_>(py_args[i])) {
py_args[i] = std::make_shared<tensor::Tensor>(py::cast<py::int_>(py_args[i]), tensor_ptr->Dtype()); py_args[i] = std::make_shared<tensor::Tensor>(py::cast<py::int_>(py_args[i]), tensor_ptr->Dtype());
(*out_args_list)[i] = py_args[i];
} else { } else {
py_args[i] = std::make_shared<tensor::Tensor>(py::cast<py::float_>(py_args[i]), tensor_ptr->Dtype()); py_args[i] = std::make_shared<tensor::Tensor>(py::cast<py::float_>(py_args[i]), tensor_ptr->Dtype());
(*out_args_list)[i] = py_args[i];
} }
continue; continue;
} }
...@@ -195,7 +195,7 @@ void PynativeInfer(const PrimitivePyPtr &prim, const py::list &py_args, OpExecIn ...@@ -195,7 +195,7 @@ void PynativeInfer(const PrimitivePyPtr &prim, const py::list &py_args, OpExecIn
op_exec_info->abstract = infer_res; op_exec_info->abstract = infer_res;
} }
OpExecInfoPtr GenerateOpExecInfo(const py::args &args) { OpExecInfoPtr GenerateOpExecInfo(const py::args &args, py::list *const out_args) {
if (args.size() != PY_ARGS_NUM) { if (args.size() != PY_ARGS_NUM) {
MS_LOG(ERROR) << "Three args are needed by RunOp"; MS_LOG(ERROR) << "Three args are needed by RunOp";
return nullptr; return nullptr;
...@@ -213,7 +213,7 @@ OpExecInfoPtr GenerateOpExecInfo(const py::args &args) { ...@@ -213,7 +213,7 @@ OpExecInfoPtr GenerateOpExecInfo(const py::args &args) {
size_t input_num = a.size(); size_t input_num = a.size();
op_exec_info->op_inputs = py::tuple(input_num); op_exec_info->op_inputs = py::tuple(input_num);
op_exec_info->inputs_mask = ConvertInputs(prim, args[PY_INPUTS], &op_exec_info->op_inputs); op_exec_info->inputs_mask = ConvertInputs(prim, args[PY_INPUTS], &op_exec_info->op_inputs, out_args);
// use python infer method // use python infer method
if (ignore_infer_prim.find(op_exec_info->op_name) == ignore_infer_prim.end()) { if (ignore_infer_prim.find(op_exec_info->op_name) == ignore_infer_prim.end()) {
PynativeInfer(prim, op_exec_info->op_inputs, op_exec_info.get()); PynativeInfer(prim, op_exec_info->op_inputs, op_exec_info.get());
...@@ -513,16 +513,15 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const ...@@ -513,16 +513,15 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const
auto prim = op_exec_info->py_primitive; auto prim = op_exec_info->py_primitive;
inputs.push_back(NewValueNode(prim)); inputs.push_back(NewValueNode(prim));
py::tuple op_masks = op_exec_info->inputs_mask; py::tuple op_masks = op_exec_info->inputs_mask;
py::list op_args = args[PY_INPUTS];
AbstractBasePtrList args_spec_list; AbstractBasePtrList args_spec_list;
for (size_t i = 0; i < op_args.size(); i++) { for (size_t i = 0; i < args.size(); i++) {
auto node = GetInput(op_args[i], op_masks[i]); auto node = GetInput(args[i], op_masks[i]);
args_spec_list.push_back(node->abstract()); args_spec_list.push_back(node->abstract());
inputs.push_back(node); inputs.push_back(node);
} }
auto cnode = curr_g_->NewCNode(inputs); auto cnode = curr_g_->NewCNode(inputs);
MS_LOG(DEBUG) << "MakeCnode set node " << cnode->DebugString(); MS_LOG(DEBUG) << "MakeCnode set node " << cnode->DebugString(4);
py::object out_real = out; py::object out_real = out;
if (out.size() == 1) { if (out.size() == 1) {
MS_LOG(DEBUG) << "MakeCnode out size is one."; MS_LOG(DEBUG) << "MakeCnode out size is one.";
...@@ -534,10 +533,12 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const ...@@ -534,10 +533,12 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const
if (value.size() > 1) { if (value.size() > 1) {
for (int i = 0; i < static_cast<int>(value.size()); i++) { for (int i = 0; i < static_cast<int>(value.size()); i++) {
auto value_id = GetId(value[i]); auto value_id = GetId(value[i]);
MS_LOG(DEBUG) << "MakeCnode set node id " << value_id;
set_obj_node_map(curr_g_, value_id, cnode, i); set_obj_node_map(curr_g_, value_id, cnode, i);
} }
} }
} }
MS_LOG(DEBUG) << "MakeCnode set node id " << obj_id;
set_obj_node_map(curr_g_, obj_id, cnode); set_obj_node_map(curr_g_, obj_id, cnode);
set_pyobj(curr_g_, obj_id); set_pyobj(curr_g_, obj_id);
return cnode; return cnode;
...@@ -545,12 +546,17 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const ...@@ -545,12 +546,17 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const
AnfNodePtr PynativeExecutor::GetObjNode(const py::object &obj) { AnfNodePtr PynativeExecutor::GetObjNode(const py::object &obj) {
auto &out = graph_info_map_[curr_g_].obj_node_map[GetId(obj)]; auto &out = graph_info_map_[curr_g_].obj_node_map[GetId(obj)];
if (out.second == -1) { if (out.second.size() == 1 && out.second[0] == -1) {
return out.first; return out.first;
} }
std::vector<AnfNodePtr> tuple_get_item_inputs{NewValueNode(prim::kPrimTupleGetItem), out.first, auto node = out.first;
NewValueNode(out.second)}; MS_LOG(DEBUG) << "output size " << out.second.size() << node->DebugString();
return curr_g_->NewCNode(tuple_get_item_inputs); for (auto &idx : out.second) {
std::vector<AnfNodePtr> tuple_get_item_inputs{NewValueNode(prim::kPrimTupleGetItem), node, NewValueNode(idx)};
node = curr_g_->NewCNode(tuple_get_item_inputs);
}
MS_LOG(DEBUG) << "GetObjNode output" << node->DebugString(6);
return node;
} }
py::tuple RunOp(const OpExecInfoPtr &op_exec_info, const py::args &args) { py::tuple RunOp(const OpExecInfoPtr &op_exec_info, const py::args &args) {
...@@ -594,8 +600,11 @@ py::tuple RunOp(const OpExecInfoPtr &op_exec_info, const py::args &args) { ...@@ -594,8 +600,11 @@ py::tuple RunOp(const OpExecInfoPtr &op_exec_info, const py::args &args) {
py::tuple RunOp(const py::args &args) { py::tuple RunOp(const py::args &args) {
MS_LOG(DEBUG) << "RunOp start" << args.size(); MS_LOG(DEBUG) << "RunOp start" << args.size();
OpExecInfoPtr op_exec_info = GenerateOpExecInfo(args); py::list args_input = args[PY_INPUTS];
OpExecInfoPtr op_exec_info = GenerateOpExecInfo(args, &args_input);
MS_EXCEPTION_IF_NULL(op_exec_info); MS_EXCEPTION_IF_NULL(op_exec_info);
if (op_exec_info->abstract != nullptr) { if (op_exec_info->abstract != nullptr) {
py::dict output = abstract::ConvertAbstractToPython(op_exec_info->abstract); py::dict output = abstract::ConvertAbstractToPython(op_exec_info->abstract);
if (!output["value"].is_none()) { if (!output["value"].is_none()) {
...@@ -609,7 +618,7 @@ py::tuple RunOp(const py::args &args) { ...@@ -609,7 +618,7 @@ py::tuple RunOp(const py::args &args) {
return value_ret; return value_ret;
} }
} }
return RunOp(op_exec_info, args); return RunOp(op_exec_info, args_input);
} }
void ClearPyNativeSession() { session = nullptr; } void ClearPyNativeSession() { session = nullptr; }
...@@ -646,6 +655,14 @@ void PynativeExecutor::NewGraph(const py::object &cell, const py::args &args) { ...@@ -646,6 +655,14 @@ void PynativeExecutor::NewGraph(const py::object &cell, const py::args &args) {
} }
} }
AnfNodePtr PynativeExecutor::MakeValueNode(const py::object &obj, const std::string &obj_id) {
ValuePtr converted_ret = nullptr;
parse::ConvertData(obj, &converted_ret);
auto node = NewValueNode(converted_ret);
set_obj_node_map(curr_g_, obj_id, node);
return node;
}
AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &op_mask) { AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &op_mask) {
AnfNodePtr node = nullptr; AnfNodePtr node = nullptr;
std::string obj_id = GetId(obj); std::string obj_id = GetId(obj);
...@@ -683,10 +700,16 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &o ...@@ -683,10 +700,16 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &o
} else if (py::isinstance<py::tuple>(obj)) { } else if (py::isinstance<py::tuple>(obj)) {
// out = op((x, y)) // out = op((x, y))
// out = cell((x, y)) // out = cell((x, y))
auto tuple = obj.cast<py::tuple>();
// cell((1,2)): support not mix (scalar, tensor)
if (tuple.size() > 0 && !py::isinstance<tensor::Tensor>(tuple[0])) {
return MakeValueNode(obj, obj_id);
}
std::vector<AnfNodePtr> args; std::vector<AnfNodePtr> args;
args.push_back(NewValueNode(prim::kPrimMakeTuple)); args.push_back(NewValueNode(prim::kPrimMakeTuple));
auto tuple = obj.cast<py::tuple>();
auto tuple_size = static_cast<int>(tuple.size()); auto tuple_size = static_cast<int>(tuple.size());
for (int i = 0; i < tuple_size; i++) { for (int i = 0; i < tuple_size; i++) {
args.push_back(GetInput(tuple[i], py::object())); args.push_back(GetInput(tuple[i], py::object()));
...@@ -695,17 +718,26 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &o ...@@ -695,17 +718,26 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &o
set_obj_node_map(curr_g_, GetId(obj), cnode); set_obj_node_map(curr_g_, GetId(obj), cnode);
node = cnode; node = cnode;
} else { } else {
// out = op(x, 1) node = MakeValueNode(obj, obj_id);
ValuePtr converted_ret = nullptr;
parse::ConvertData(obj, &converted_ret);
node = NewValueNode(converted_ret);
set_obj_node_map(curr_g_, obj_id, node);
} }
MS_LOG(DEBUG) << "Now getinput " << py::str(obj) << " node " << node->ToString(); MS_LOG(DEBUG) << "Now getinput node " << node->ToString() << obj_id;
return node; return node;
} }
// for output[0][1] need getitem multi
void PynativeExecutor::SetTupleOutput(const py::object &obj, const AnfNodePtr &cnode, std::vector<int> idx) {
if (py::isinstance<py::tuple>(obj)) {
auto tuple = obj.cast<py::tuple>();
for (int i = 0; i < static_cast<int>(tuple.size()); i++) {
std::vector<int> tmp = idx;
tmp.push_back(i);
set_obj_node_map(curr_g_, GetId(tuple[i]), cnode, tmp);
SetTupleOutput(tuple[i], cnode, tmp);
}
}
}
void PynativeExecutor::Pushp() { graph_p_.push(curr_g_); } void PynativeExecutor::Pushp() { graph_p_.push(curr_g_); }
void PynativeExecutor::Popp() { void PynativeExecutor::Popp() {
...@@ -737,6 +769,7 @@ void PynativeExecutor::EndGraph(const py::object &cell, const py::object &out, c ...@@ -737,6 +769,7 @@ void PynativeExecutor::EndGraph(const py::object &cell, const py::object &out, c
for (int i = 0; i < tuple_size; i++) { for (int i = 0; i < tuple_size; i++) {
args.push_back(GetInput(tuple[i], py::object())); args.push_back(GetInput(tuple[i], py::object()));
set_obj_node_map(curr_g_, GetId(tuple[i]), cnode, i); set_obj_node_map(curr_g_, GetId(tuple[i]), cnode, i);
SetTupleOutput(tuple[i], cnode, std::vector<int>{i});
} }
cnode->set_inputs(args); cnode->set_inputs(args);
set_obj_node_map(curr_g_, out_id, cnode); set_obj_node_map(curr_g_, out_id, cnode);
...@@ -784,6 +817,7 @@ void PynativeExecutor::EndGraphByOutId(const std::string &out_id, const py::obje ...@@ -784,6 +817,7 @@ void PynativeExecutor::EndGraphByOutId(const std::string &out_id, const py::obje
auto out_size = static_cast<int>(out_list.size()); auto out_size = static_cast<int>(out_list.size());
for (int i = 0; i < out_size; i++) { for (int i = 0; i < out_size; i++) {
set_obj_node_map(curr_g_, GetId(out_list[i]), out_cnode, i); set_obj_node_map(curr_g_, GetId(out_list[i]), out_cnode, i);
SetTupleOutput(out_list[i], out_cnode, std::vector<int>{i});
} }
} }
set_obj_node_map(curr_g_, GetId(out), out_cnode); set_obj_node_map(curr_g_, GetId(out), out_cnode);
...@@ -878,6 +912,7 @@ void PynativeExecutor::GradNet(const GradOperationPtr &grad, const py::object &c ...@@ -878,6 +912,7 @@ void PynativeExecutor::GradNet(const GradOperationPtr &grad, const py::object &c
MS_EXCEPTION_IF_NULL(resource_->func_graph()); MS_EXCEPTION_IF_NULL(resource_->func_graph());
auto g = GradGraph(resource_->func_graph(), grad, w_args, size); auto g = GradGraph(resource_->func_graph(), grad, w_args, size);
resource_->set_func_graph(g); resource_->set_func_graph(g);
resource_->manager()->KeepRoots({g});
// get the parameters items and add the value to args_spec // get the parameters items and add the value to args_spec
abstract::AbstractBasePtrList args_spec = GetArgsSpec(args); abstract::AbstractBasePtrList args_spec = GetArgsSpec(args);
......
...@@ -44,13 +44,14 @@ py::object RunOpInVM(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *stat ...@@ -44,13 +44,14 @@ py::object RunOpInVM(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *stat
py::tuple RunOp(const py::args &args); py::tuple RunOp(const py::args &args);
py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &py_args, py::tuple *const out_args); py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &py_args, py::tuple *const out_args,
py::list *out_args_list);
void ClearPyNativeSession(); void ClearPyNativeSession();
struct GraphInfo { struct GraphInfo {
std::unordered_map<std::string, AnfNodePtr> param_map; std::unordered_map<std::string, AnfNodePtr> param_map;
std::unordered_map<std::string, std::pair<AnfNodePtr, int>> obj_node_map; std::unordered_map<std::string, std::pair<AnfNodePtr, std::vector<int>>> obj_node_map;
AnfNodePtr output; AnfNodePtr output;
std::vector<std::string> objects; std::vector<std::string> objects;
}; };
...@@ -81,9 +82,12 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> { ...@@ -81,9 +82,12 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
FuncGraphPtr curr_g() { return curr_g_; } FuncGraphPtr curr_g() { return curr_g_; }
void set_pyobj(FuncGraphPtr g, const std::string obj) { graph_info_map_[g].objects.push_back(obj); } void set_pyobj(FuncGraphPtr g, const std::string obj) { graph_info_map_[g].objects.push_back(obj); }
void set_obj_node_map(FuncGraphPtr g, const std::string obj, AnfNodePtr node) { void set_obj_node_map(FuncGraphPtr g, const std::string obj, AnfNodePtr node) {
graph_info_map_[g].obj_node_map[obj] = std::make_pair(node, -1); graph_info_map_[g].obj_node_map[obj] = std::make_pair(node, std::vector<int>{-1});
} }
void set_obj_node_map(FuncGraphPtr g, const std::string obj, AnfNodePtr node, int index) { void set_obj_node_map(FuncGraphPtr g, const std::string obj, AnfNodePtr node, int index) {
graph_info_map_[g].obj_node_map[obj] = std::make_pair(node, std::vector<int>{index});
}
void set_obj_node_map(FuncGraphPtr g, const std::string obj, AnfNodePtr node, std::vector<int> index) {
graph_info_map_[g].obj_node_map[obj] = std::make_pair(node, index); graph_info_map_[g].obj_node_map[obj] = std::make_pair(node, index);
} }
AnfNodePtr MakeCNode(const OpExecInfoPtr &op_exec_info, const py::args &args, const py::tuple &out); AnfNodePtr MakeCNode(const OpExecInfoPtr &op_exec_info, const py::args &args, const py::tuple &out);
...@@ -93,6 +97,8 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> { ...@@ -93,6 +97,8 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
void Popp(); void Popp();
FuncGraphPtr GradGraph(FuncGraphPtr g, const GradOperationPtr &grad_op, const std::vector<AnfNodePtr> &weights, FuncGraphPtr GradGraph(FuncGraphPtr g, const GradOperationPtr &grad_op, const std::vector<AnfNodePtr> &weights,
size_t arg_size); size_t arg_size);
void SetTupleOutput(const py::object &obj, const AnfNodePtr &cnode, std::vector<int> idx);
AnfNodePtr MakeValueNode(const py::object &obj, const std::string &obj_id);
~PynativeExecutor(); ~PynativeExecutor();
......
...@@ -35,7 +35,7 @@ class TestPynativeExecute : public UT::Common { ...@@ -35,7 +35,7 @@ class TestPynativeExecute : public UT::Common {
TestPynativeExecute() {} TestPynativeExecute() {}
}; };
inline ValuePtr PyAttrValue(const py::object& obj) { inline ValuePtr PyAttrValue(const py::object &obj) {
ValuePtr converted_ret; ValuePtr converted_ret;
bool converted = parse::ConvertData(obj, &converted_ret); bool converted = parse::ConvertData(obj, &converted_ret);
if (!converted) { if (!converted) {
...@@ -63,7 +63,9 @@ OpExecInfoPtr ConstructOpExecInfo() { ...@@ -63,7 +63,9 @@ OpExecInfoPtr ConstructOpExecInfo() {
auto conv_obj = prim::GetPythonOps("conv2d_prim", "gtest_input.pynative"); auto conv_obj = prim::GetPythonOps("conv2d_prim", "gtest_input.pynative");
py::none py_none; py::none py_none;
return GenerateOpExecInfo(py::make_tuple(conv_obj, op_name, op_inputs)); py::args args = py::make_tuple(conv_obj, op_name, op_inputs);
py::list args_input = args[PY_INPUTS];
return GenerateOpExecInfo(args, &args_input);
} }
TEST_F(TestPynativeExecute, TestRunOpInVM) { TEST_F(TestPynativeExecute, TestRunOpInVM) {
...@@ -77,8 +79,8 @@ TEST_F(TestPynativeExecute, TestRunOpInVM) { ...@@ -77,8 +79,8 @@ TEST_F(TestPynativeExecute, TestRunOpInVM) {
TEST_F(TestPynativeExecute, TestRunOp) { TEST_F(TestPynativeExecute, TestRunOp) {
py::none py_none; py::none py_none;
auto op_exec_info_ptr = ConstructOpExecInfo(); auto op_exec_info_ptr = ConstructOpExecInfo();
py::tuple outputs = pynative::RunOp(py::make_tuple(op_exec_info_ptr->py_primitive, op_exec_info_ptr->op_name, py::tuple outputs = pynative::RunOp(
op_exec_info_ptr->op_inputs)); py::make_tuple(op_exec_info_ptr->py_primitive, op_exec_info_ptr->op_name, op_exec_info_ptr->op_inputs));
if (outputs.size() == 0) { if (outputs.size() == 0) {
FAIL(); FAIL();
} else { } else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册