Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
616cc80d
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
616cc80d
编写于
6月 20, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
6月 20, 2020
浏览文件
操作
浏览文件
下载
差异文件
!2378 fix tensor id bug and some yolov3 bug
Merge pull request !2378 from flywind/fix_yolov3_bug
上级
9969a9b0
b79523a9
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
82 addition
and
38 deletion
+82
-38
mindspore/ccsrc/ir/tensor.cc
mindspore/ccsrc/ir/tensor.cc
+3
-2
mindspore/ccsrc/pynative/base.h
mindspore/ccsrc/pynative/base.h
+1
-1
mindspore/ccsrc/pynative/pynative_execute.cc
mindspore/ccsrc/pynative/pynative_execute.cc
+63
-28
mindspore/ccsrc/pynative/pynative_execute.h
mindspore/ccsrc/pynative/pynative_execute.h
+9
-3
tests/ut/cpp/pynative/pynative_execute_test.cc
tests/ut/cpp/pynative/pynative_execute_test.cc
+6
-4
未找到文件。
mindspore/ccsrc/ir/tensor.cc
浏览文件 @
616cc80d
...
...
@@ -30,6 +30,7 @@
namespace
mindspore
{
namespace
tensor
{
static
uint64_t
count
=
0
;
void
DataBuf2Contiguous
(
const
py
::
array
&
src
,
py
::
array
*
const
dest
)
{
if
(
dest
==
nullptr
)
{
MS_LOG
(
EXCEPTION
)
<<
"Failed to copy data to a contiguous buffer as dest is nullptr!"
;
...
...
@@ -213,7 +214,7 @@ void Tensor::init(const py::array &input, const TypeId &data_type) {
data_
=
input
;
}
dirty_
=
true
;
id_
=
std
::
to_string
((
uintptr_t
)(
this
));
id_
=
std
::
to_string
((
uintptr_t
)(
this
))
+
std
::
to_string
(
count
++
)
;
}
void
Tensor
::
init
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
,
py
::
array
*
const
data
)
{
...
...
@@ -260,7 +261,7 @@ void Tensor::init(TypeId data_type, const std::vector<int> &shape, py::array *co
MS_LOG
(
EXCEPTION
)
<<
"Cannot construct Tensor because of unsupported data type: "
<<
data_type
<<
"."
;
break
;
}
id_
=
std
::
to_string
((
uintptr_t
)(
this
));
id_
=
std
::
to_string
((
uintptr_t
)(
this
))
+
std
::
to_string
(
count
++
)
;
}
TypePtr
Tensor
::
SetDtype
(
const
TypePtr
type_ptr
)
{
...
...
mindspore/ccsrc/pynative/base.h
浏览文件 @
616cc80d
...
...
@@ -57,7 +57,7 @@ struct OpExecInfo {
py
::
dict
op_attrs
;
};
using
OpExecInfoPtr
=
std
::
shared_ptr
<
OpExecInfo
>
;
OpExecInfoPtr
GenerateOpExecInfo
(
const
py
::
args
&
args
);
OpExecInfoPtr
GenerateOpExecInfo
(
const
py
::
args
&
args
,
py
::
list
*
const
out_args
);
const
std
::
set
<
std
::
string
>
ignore_infer_prim
=
{
"make_ref"
};
}
// namespace pynative
...
...
mindspore/ccsrc/pynative/pynative_execute.cc
浏览文件 @
616cc80d
...
...
@@ -53,7 +53,7 @@
const
char
SINGLE_OP_GRAPH
[]
=
"single_op_graph"
;
// primitive unable to infer value for constant input in PyNative mode
const
std
::
set
<
std
::
string
>
vm_operators
=
{
"make_ref"
,
"HookBackward"
};
const
std
::
set
<
std
::
string
>
vm_operators
=
{
"make_ref"
,
"HookBackward"
,
"stop_gradient"
};
namespace
mindspore
{
namespace
pynative
{
...
...
@@ -79,15 +79,12 @@ std::string GetId(const py::object &obj) {
if
(
p_list
.
size
()
==
0
)
{
return
"empty"
;
}
to_process
=
p_list
[
0
];
prefix
=
"tuple:"
;
if
(
!
py
::
isinstance
<
tensor
::
Tensor
>
(
to_process
))
{
std
::
string
key
=
""
;
for
(
size_t
i
=
0
;
i
<
p_list
.
size
();
++
i
)
{
key
+=
std
::
string
(
py
::
str
(
p_list
[
i
]))
+
":"
;
}
return
prefix
+
key
;
std
::
string
key
=
""
;
for
(
size_t
i
=
0
;
i
<
p_list
.
size
();
++
i
)
{
key
+=
std
::
string
(
py
::
str
(
GetId
(
p_list
[
i
])))
+
":"
;
}
return
prefix
+
key
;
}
if
(
py
::
isinstance
<
py
::
int_
>
(
to_process
))
{
return
prefix
+
std
::
string
(
py
::
str
(
to_process
));
...
...
@@ -143,7 +140,8 @@ std::map<SignatureEnumDType, size_t> GetDstType(const py::tuple &py_args,
return
dst_type
;
}
py
::
tuple
ConvertInputs
(
const
PrimitivePyPtr
&
prim
,
const
py
::
list
&
args
,
py
::
tuple
*
const
out_args
)
{
py
::
tuple
ConvertInputs
(
const
PrimitivePyPtr
&
prim
,
const
py
::
list
&
args
,
py
::
tuple
*
const
out_args
,
py
::
list
*
out_args_list
)
{
auto
&
py_args
=
*
out_args
;
py
::
tuple
input_mask
(
args
.
size
());
for
(
size_t
i
=
0
;
i
<
args
.
size
();
++
i
)
{
...
...
@@ -171,8 +169,10 @@ py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tu
auto
tensor_ptr
=
py
::
cast
<
tensor
::
TensorPtr
>
(
py_args
[
it
->
second
]);
if
(
py
::
isinstance
<
py
::
int_
>
(
py_args
[
i
]))
{
py_args
[
i
]
=
std
::
make_shared
<
tensor
::
Tensor
>
(
py
::
cast
<
py
::
int_
>
(
py_args
[
i
]),
tensor_ptr
->
Dtype
());
(
*
out_args_list
)[
i
]
=
py_args
[
i
];
}
else
{
py_args
[
i
]
=
std
::
make_shared
<
tensor
::
Tensor
>
(
py
::
cast
<
py
::
float_
>
(
py_args
[
i
]),
tensor_ptr
->
Dtype
());
(
*
out_args_list
)[
i
]
=
py_args
[
i
];
}
continue
;
}
...
...
@@ -195,7 +195,7 @@ void PynativeInfer(const PrimitivePyPtr &prim, const py::list &py_args, OpExecIn
op_exec_info
->
abstract
=
infer_res
;
}
OpExecInfoPtr
GenerateOpExecInfo
(
const
py
::
args
&
args
)
{
OpExecInfoPtr
GenerateOpExecInfo
(
const
py
::
args
&
args
,
py
::
list
*
const
out_args
)
{
if
(
args
.
size
()
!=
PY_ARGS_NUM
)
{
MS_LOG
(
ERROR
)
<<
"Three args are needed by RunOp"
;
return
nullptr
;
...
...
@@ -213,7 +213,7 @@ OpExecInfoPtr GenerateOpExecInfo(const py::args &args) {
size_t
input_num
=
a
.
size
();
op_exec_info
->
op_inputs
=
py
::
tuple
(
input_num
);
op_exec_info
->
inputs_mask
=
ConvertInputs
(
prim
,
args
[
PY_INPUTS
],
&
op_exec_info
->
op_inputs
);
op_exec_info
->
inputs_mask
=
ConvertInputs
(
prim
,
args
[
PY_INPUTS
],
&
op_exec_info
->
op_inputs
,
out_args
);
// use python infer method
if
(
ignore_infer_prim
.
find
(
op_exec_info
->
op_name
)
==
ignore_infer_prim
.
end
())
{
PynativeInfer
(
prim
,
op_exec_info
->
op_inputs
,
op_exec_info
.
get
());
...
...
@@ -513,16 +513,15 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const
auto
prim
=
op_exec_info
->
py_primitive
;
inputs
.
push_back
(
NewValueNode
(
prim
));
py
::
tuple
op_masks
=
op_exec_info
->
inputs_mask
;
py
::
list
op_args
=
args
[
PY_INPUTS
];
AbstractBasePtrList
args_spec_list
;
for
(
size_t
i
=
0
;
i
<
op_
args
.
size
();
i
++
)
{
auto
node
=
GetInput
(
op_
args
[
i
],
op_masks
[
i
]);
for
(
size_t
i
=
0
;
i
<
args
.
size
();
i
++
)
{
auto
node
=
GetInput
(
args
[
i
],
op_masks
[
i
]);
args_spec_list
.
push_back
(
node
->
abstract
());
inputs
.
push_back
(
node
);
}
auto
cnode
=
curr_g_
->
NewCNode
(
inputs
);
MS_LOG
(
DEBUG
)
<<
"MakeCnode set node "
<<
cnode
->
DebugString
();
MS_LOG
(
DEBUG
)
<<
"MakeCnode set node "
<<
cnode
->
DebugString
(
4
);
py
::
object
out_real
=
out
;
if
(
out
.
size
()
==
1
)
{
MS_LOG
(
DEBUG
)
<<
"MakeCnode out size is one."
;
...
...
@@ -534,10 +533,12 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const
if
(
value
.
size
()
>
1
)
{
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
value
.
size
());
i
++
)
{
auto
value_id
=
GetId
(
value
[
i
]);
MS_LOG
(
DEBUG
)
<<
"MakeCnode set node id "
<<
value_id
;
set_obj_node_map
(
curr_g_
,
value_id
,
cnode
,
i
);
}
}
}
MS_LOG
(
DEBUG
)
<<
"MakeCnode set node id "
<<
obj_id
;
set_obj_node_map
(
curr_g_
,
obj_id
,
cnode
);
set_pyobj
(
curr_g_
,
obj_id
);
return
cnode
;
...
...
@@ -545,12 +546,17 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const
AnfNodePtr
PynativeExecutor
::
GetObjNode
(
const
py
::
object
&
obj
)
{
auto
&
out
=
graph_info_map_
[
curr_g_
].
obj_node_map
[
GetId
(
obj
)];
if
(
out
.
second
==
-
1
)
{
if
(
out
.
second
.
size
()
==
1
&&
out
.
second
[
0
]
==
-
1
)
{
return
out
.
first
;
}
std
::
vector
<
AnfNodePtr
>
tuple_get_item_inputs
{
NewValueNode
(
prim
::
kPrimTupleGetItem
),
out
.
first
,
NewValueNode
(
out
.
second
)};
return
curr_g_
->
NewCNode
(
tuple_get_item_inputs
);
auto
node
=
out
.
first
;
MS_LOG
(
DEBUG
)
<<
"output size "
<<
out
.
second
.
size
()
<<
node
->
DebugString
();
for
(
auto
&
idx
:
out
.
second
)
{
std
::
vector
<
AnfNodePtr
>
tuple_get_item_inputs
{
NewValueNode
(
prim
::
kPrimTupleGetItem
),
node
,
NewValueNode
(
idx
)};
node
=
curr_g_
->
NewCNode
(
tuple_get_item_inputs
);
}
MS_LOG
(
DEBUG
)
<<
"GetObjNode output"
<<
node
->
DebugString
(
6
);
return
node
;
}
py
::
tuple
RunOp
(
const
OpExecInfoPtr
&
op_exec_info
,
const
py
::
args
&
args
)
{
...
...
@@ -594,8 +600,11 @@ py::tuple RunOp(const OpExecInfoPtr &op_exec_info, const py::args &args) {
py
::
tuple
RunOp
(
const
py
::
args
&
args
)
{
MS_LOG
(
DEBUG
)
<<
"RunOp start"
<<
args
.
size
();
OpExecInfoPtr
op_exec_info
=
GenerateOpExecInfo
(
args
);
py
::
list
args_input
=
args
[
PY_INPUTS
];
OpExecInfoPtr
op_exec_info
=
GenerateOpExecInfo
(
args
,
&
args_input
);
MS_EXCEPTION_IF_NULL
(
op_exec_info
);
if
(
op_exec_info
->
abstract
!=
nullptr
)
{
py
::
dict
output
=
abstract
::
ConvertAbstractToPython
(
op_exec_info
->
abstract
);
if
(
!
output
[
"value"
].
is_none
())
{
...
...
@@ -609,7 +618,7 @@ py::tuple RunOp(const py::args &args) {
return
value_ret
;
}
}
return
RunOp
(
op_exec_info
,
args
);
return
RunOp
(
op_exec_info
,
args
_input
);
}
void
ClearPyNativeSession
()
{
session
=
nullptr
;
}
...
...
@@ -646,6 +655,14 @@ void PynativeExecutor::NewGraph(const py::object &cell, const py::args &args) {
}
}
AnfNodePtr
PynativeExecutor
::
MakeValueNode
(
const
py
::
object
&
obj
,
const
std
::
string
&
obj_id
)
{
ValuePtr
converted_ret
=
nullptr
;
parse
::
ConvertData
(
obj
,
&
converted_ret
);
auto
node
=
NewValueNode
(
converted_ret
);
set_obj_node_map
(
curr_g_
,
obj_id
,
node
);
return
node
;
}
AnfNodePtr
PynativeExecutor
::
GetInput
(
const
py
::
object
&
obj
,
const
py
::
object
&
op_mask
)
{
AnfNodePtr
node
=
nullptr
;
std
::
string
obj_id
=
GetId
(
obj
);
...
...
@@ -683,10 +700,16 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &o
}
else
if
(
py
::
isinstance
<
py
::
tuple
>
(
obj
))
{
// out = op((x, y))
// out = cell((x, y))
auto
tuple
=
obj
.
cast
<
py
::
tuple
>
();
// cell((1,2)): support not mix (scalar, tensor)
if
(
tuple
.
size
()
>
0
&&
!
py
::
isinstance
<
tensor
::
Tensor
>
(
tuple
[
0
]))
{
return
MakeValueNode
(
obj
,
obj_id
);
}
std
::
vector
<
AnfNodePtr
>
args
;
args
.
push_back
(
NewValueNode
(
prim
::
kPrimMakeTuple
));
auto
tuple
=
obj
.
cast
<
py
::
tuple
>
();
auto
tuple_size
=
static_cast
<
int
>
(
tuple
.
size
());
for
(
int
i
=
0
;
i
<
tuple_size
;
i
++
)
{
args
.
push_back
(
GetInput
(
tuple
[
i
],
py
::
object
()));
...
...
@@ -695,17 +718,26 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &o
set_obj_node_map
(
curr_g_
,
GetId
(
obj
),
cnode
);
node
=
cnode
;
}
else
{
// out = op(x, 1)
ValuePtr
converted_ret
=
nullptr
;
parse
::
ConvertData
(
obj
,
&
converted_ret
);
node
=
NewValueNode
(
converted_ret
);
set_obj_node_map
(
curr_g_
,
obj_id
,
node
);
node
=
MakeValueNode
(
obj
,
obj_id
);
}
MS_LOG
(
DEBUG
)
<<
"Now getinput
"
<<
py
::
str
(
obj
)
<<
" node "
<<
node
->
ToString
()
;
MS_LOG
(
DEBUG
)
<<
"Now getinput
node "
<<
node
->
ToString
()
<<
obj_id
;
return
node
;
}
// for output[0][1] need getitem multi
void
PynativeExecutor
::
SetTupleOutput
(
const
py
::
object
&
obj
,
const
AnfNodePtr
&
cnode
,
std
::
vector
<
int
>
idx
)
{
if
(
py
::
isinstance
<
py
::
tuple
>
(
obj
))
{
auto
tuple
=
obj
.
cast
<
py
::
tuple
>
();
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
tuple
.
size
());
i
++
)
{
std
::
vector
<
int
>
tmp
=
idx
;
tmp
.
push_back
(
i
);
set_obj_node_map
(
curr_g_
,
GetId
(
tuple
[
i
]),
cnode
,
tmp
);
SetTupleOutput
(
tuple
[
i
],
cnode
,
tmp
);
}
}
}
void
PynativeExecutor
::
Pushp
()
{
graph_p_
.
push
(
curr_g_
);
}
void
PynativeExecutor
::
Popp
()
{
...
...
@@ -737,6 +769,7 @@ void PynativeExecutor::EndGraph(const py::object &cell, const py::object &out, c
for
(
int
i
=
0
;
i
<
tuple_size
;
i
++
)
{
args
.
push_back
(
GetInput
(
tuple
[
i
],
py
::
object
()));
set_obj_node_map
(
curr_g_
,
GetId
(
tuple
[
i
]),
cnode
,
i
);
SetTupleOutput
(
tuple
[
i
],
cnode
,
std
::
vector
<
int
>
{
i
});
}
cnode
->
set_inputs
(
args
);
set_obj_node_map
(
curr_g_
,
out_id
,
cnode
);
...
...
@@ -784,6 +817,7 @@ void PynativeExecutor::EndGraphByOutId(const std::string &out_id, const py::obje
auto
out_size
=
static_cast
<
int
>
(
out_list
.
size
());
for
(
int
i
=
0
;
i
<
out_size
;
i
++
)
{
set_obj_node_map
(
curr_g_
,
GetId
(
out_list
[
i
]),
out_cnode
,
i
);
SetTupleOutput
(
out_list
[
i
],
out_cnode
,
std
::
vector
<
int
>
{
i
});
}
}
set_obj_node_map
(
curr_g_
,
GetId
(
out
),
out_cnode
);
...
...
@@ -878,6 +912,7 @@ void PynativeExecutor::GradNet(const GradOperationPtr &grad, const py::object &c
MS_EXCEPTION_IF_NULL
(
resource_
->
func_graph
());
auto
g
=
GradGraph
(
resource_
->
func_graph
(),
grad
,
w_args
,
size
);
resource_
->
set_func_graph
(
g
);
resource_
->
manager
()
->
KeepRoots
({
g
});
// get the parameters items and add the value to args_spec
abstract
::
AbstractBasePtrList
args_spec
=
GetArgsSpec
(
args
);
...
...
mindspore/ccsrc/pynative/pynative_execute.h
浏览文件 @
616cc80d
...
...
@@ -44,13 +44,14 @@ py::object RunOpInVM(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *stat
py
::
tuple
RunOp
(
const
py
::
args
&
args
);
py
::
tuple
ConvertInputs
(
const
PrimitivePyPtr
&
prim
,
const
py
::
list
&
py_args
,
py
::
tuple
*
const
out_args
);
py
::
tuple
ConvertInputs
(
const
PrimitivePyPtr
&
prim
,
const
py
::
list
&
py_args
,
py
::
tuple
*
const
out_args
,
py
::
list
*
out_args_list
);
void
ClearPyNativeSession
();
struct
GraphInfo
{
std
::
unordered_map
<
std
::
string
,
AnfNodePtr
>
param_map
;
std
::
unordered_map
<
std
::
string
,
std
::
pair
<
AnfNodePtr
,
int
>>
obj_node_map
;
std
::
unordered_map
<
std
::
string
,
std
::
pair
<
AnfNodePtr
,
std
::
vector
<
int
>
>>
obj_node_map
;
AnfNodePtr
output
;
std
::
vector
<
std
::
string
>
objects
;
};
...
...
@@ -81,9 +82,12 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
FuncGraphPtr
curr_g
()
{
return
curr_g_
;
}
void
set_pyobj
(
FuncGraphPtr
g
,
const
std
::
string
obj
)
{
graph_info_map_
[
g
].
objects
.
push_back
(
obj
);
}
void
set_obj_node_map
(
FuncGraphPtr
g
,
const
std
::
string
obj
,
AnfNodePtr
node
)
{
graph_info_map_
[
g
].
obj_node_map
[
obj
]
=
std
::
make_pair
(
node
,
-
1
);
graph_info_map_
[
g
].
obj_node_map
[
obj
]
=
std
::
make_pair
(
node
,
std
::
vector
<
int
>
{
-
1
}
);
}
void
set_obj_node_map
(
FuncGraphPtr
g
,
const
std
::
string
obj
,
AnfNodePtr
node
,
int
index
)
{
graph_info_map_
[
g
].
obj_node_map
[
obj
]
=
std
::
make_pair
(
node
,
std
::
vector
<
int
>
{
index
});
}
void
set_obj_node_map
(
FuncGraphPtr
g
,
const
std
::
string
obj
,
AnfNodePtr
node
,
std
::
vector
<
int
>
index
)
{
graph_info_map_
[
g
].
obj_node_map
[
obj
]
=
std
::
make_pair
(
node
,
index
);
}
AnfNodePtr
MakeCNode
(
const
OpExecInfoPtr
&
op_exec_info
,
const
py
::
args
&
args
,
const
py
::
tuple
&
out
);
...
...
@@ -93,6 +97,8 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
void
Popp
();
FuncGraphPtr
GradGraph
(
FuncGraphPtr
g
,
const
GradOperationPtr
&
grad_op
,
const
std
::
vector
<
AnfNodePtr
>
&
weights
,
size_t
arg_size
);
void
SetTupleOutput
(
const
py
::
object
&
obj
,
const
AnfNodePtr
&
cnode
,
std
::
vector
<
int
>
idx
);
AnfNodePtr
MakeValueNode
(
const
py
::
object
&
obj
,
const
std
::
string
&
obj_id
);
~
PynativeExecutor
();
...
...
tests/ut/cpp/pynative/pynative_execute_test.cc
浏览文件 @
616cc80d
...
...
@@ -35,7 +35,7 @@ class TestPynativeExecute : public UT::Common {
TestPynativeExecute
()
{}
};
inline
ValuePtr
PyAttrValue
(
const
py
::
object
&
obj
)
{
inline
ValuePtr
PyAttrValue
(
const
py
::
object
&
obj
)
{
ValuePtr
converted_ret
;
bool
converted
=
parse
::
ConvertData
(
obj
,
&
converted_ret
);
if
(
!
converted
)
{
...
...
@@ -63,7 +63,9 @@ OpExecInfoPtr ConstructOpExecInfo() {
auto
conv_obj
=
prim
::
GetPythonOps
(
"conv2d_prim"
,
"gtest_input.pynative"
);
py
::
none
py_none
;
return
GenerateOpExecInfo
(
py
::
make_tuple
(
conv_obj
,
op_name
,
op_inputs
));
py
::
args
args
=
py
::
make_tuple
(
conv_obj
,
op_name
,
op_inputs
);
py
::
list
args_input
=
args
[
PY_INPUTS
];
return
GenerateOpExecInfo
(
args
,
&
args_input
);
}
TEST_F
(
TestPynativeExecute
,
TestRunOpInVM
)
{
...
...
@@ -77,8 +79,8 @@ TEST_F(TestPynativeExecute, TestRunOpInVM) {
TEST_F
(
TestPynativeExecute
,
TestRunOp
)
{
py
::
none
py_none
;
auto
op_exec_info_ptr
=
ConstructOpExecInfo
();
py
::
tuple
outputs
=
pynative
::
RunOp
(
py
::
make_tuple
(
op_exec_info_ptr
->
py_primitive
,
op_exec_info_ptr
->
op_name
,
op_exec_info_ptr
->
op_inputs
));
py
::
tuple
outputs
=
pynative
::
RunOp
(
py
::
make_tuple
(
op_exec_info_ptr
->
py_primitive
,
op_exec_info_ptr
->
op_name
,
op_exec_info_ptr
->
op_inputs
));
if
(
outputs
.
size
()
==
0
)
{
FAIL
();
}
else
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录