Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
87989791
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
87989791
编写于
5月 16, 2020
作者:
L
lvliang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
pynative-release-memory-after-run-finish
上级
18c94950
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
21 addition
and
17 deletion
+21
-17
mindspore/ccsrc/pynative/pynative_execute.cc
mindspore/ccsrc/pynative/pynative_execute.cc
+8
-12
mindspore/ccsrc/session/ascend_session.cc
mindspore/ccsrc/session/ascend_session.cc
+1
-1
mindspore/ccsrc/session/session_basic.cc
mindspore/ccsrc/session/session_basic.cc
+10
-4
mindspore/ccsrc/utils/utils.h
mindspore/ccsrc/utils/utils.h
+2
-0
未找到文件。
mindspore/ccsrc/pynative/pynative_execute.cc
浏览文件 @
87989791
...
...
@@ -149,23 +149,19 @@ OpExecInfoPtr GenerateOpExecInfo(const py::args &args) {
return
op_exec_info
;
}
std
::
string
GetSingleOpGraphInfo
(
const
OpExecInfoPtr
&
op_exec_info
)
{
std
::
string
GetSingleOpGraphInfo
(
const
OpExecInfoPtr
&
op_exec_info
,
const
std
::
vector
<
tensor
::
TensorPtr
>
&
input_tensors
)
{
MS_EXCEPTION_IF_NULL
(
op_exec_info
);
std
::
string
graph_info
;
MS_EXCEPTION_IF_NULL
(
op_exec_info
->
abstract
);
// get input tensor info
size_t
input_num
=
op_exec_info
->
op_inputs
.
size
();
for
(
size_t
index
=
0
;
index
<
input_num
;
++
index
)
{
if
(
py
::
isinstance
<
tensor
::
Tensor
>
(
op_exec_info
->
op_inputs
[
index
]))
{
auto
tensor_ptr
=
py
::
cast
<
tensor
::
TensorPtr
>
(
op_exec_info
->
op_inputs
[
index
]);
MS_EXCEPTION_IF_NULL
(
tensor_ptr
);
(
void
)
graph_info
.
append
(
tensor_ptr
->
GetShapeAndDataTypeInfo
()
+
"_"
);
}
for
(
const
auto
&
input_tensor
:
input_tensors
)
{
MS_EXCEPTION_IF_NULL
(
input_tensor
);
(
void
)
graph_info
.
append
(
input_tensor
->
GetShapeAndDataTypeInfo
()
+
"_"
);
}
// get prim and abstract info
MS_EXCEPTION_IF_NULL
(
op_exec_info
->
abstract
);
(
void
)
graph_info
.
append
(
std
::
to_string
((
uintptr_t
)(
op_exec_info
->
py_primitive
.
get
()))
+
"_"
+
op_exec_info
->
abstract
->
ToString
());
MS_LOG
(
INFO
)
<<
"Graph info ["
<<
graph_info
<<
"]"
;
return
graph_info
;
}
...
...
@@ -337,14 +333,14 @@ py::object RunOpInMs(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *stat
if
(
session
==
nullptr
)
{
session
=
session
::
SessionFactory
::
Get
().
Create
(
device_target
);
}
MS_EXCEPTION_IF_NULL
(
session
);
session
->
Init
(
ms_context
->
device_id
());
std
::
string
graph_info
=
GetSingleOpGraphInfo
(
op_exec_info
);
std
::
vector
<
tensor
::
TensorPtr
>
input_tensors
;
std
::
vector
<
int
>
tensors_mask
;
ConstructInputTensor
(
op_exec_info
,
&
tensors_mask
,
&
input_tensors
);
// get graph info for checking it whether existing in the cache
std
::
string
graph_info
=
GetSingleOpGraphInfo
(
op_exec_info
,
input_tensors
);
session
->
BuildOp
(
*
op_exec_info
,
graph_info
,
input_tensors
,
tensors_mask
);
EraseValueNodeTensor
(
tensors_mask
,
&
input_tensors
);
py
::
tuple
result
=
session
->
RunOp
(
*
op_exec_info
,
graph_info
,
input_tensors
);
...
...
mindspore/ccsrc/session/ascend_session.cc
浏览文件 @
87989791
...
...
@@ -339,7 +339,7 @@ void AscendSession::BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph
const
std
::
vector
<
tensor
::
TensorPtr
>
&
input_tensors
,
const
std
::
vector
<
int
>
&
tensors_mask
)
{
MS_LOG
(
INFO
)
<<
"Build op "
<<
op_run_info
.
op_name
<<
" start !"
;
if
(
GraphCacheExist
(
graph_info
))
{
MS_LOG
(
INFO
)
<<
"Build op "
<<
op_run_info
.
op_name
<<
"
finish
!"
;
MS_LOG
(
INFO
)
<<
"Build op "
<<
op_run_info
.
op_name
<<
"
graph cache has existed
!"
;
return
;
}
...
...
mindspore/ccsrc/session/session_basic.cc
浏览文件 @
87989791
...
...
@@ -301,9 +301,13 @@ size_t LoadCtrlInputTensor(const std::shared_ptr<Context> &context, std::vector<
ValueNodePtr
ConstructRunOpValueNode
(
const
std
::
shared_ptr
<
KernelGraph
>
&
graph
,
const
tensor
::
TensorPtr
&
input_tensor
)
{
MS_EXCEPTION_IF_NULL
(
graph
);
MS_EXCEPTION_IF_NULL
(
input_tensor
);
auto
abstract
=
std
::
make_shared
<
abstract
::
AbstractTensor
>
(
input_tensor
);
auto
value_node
=
std
::
make_shared
<
ValueNode
>
(
input_tensor
);
// construct abstract of value node
auto
type_of_tensor
=
input_tensor
->
Dtype
();
auto
shape_of_tensor
=
input_tensor
->
shape
();
auto
abstract
=
std
::
make_shared
<
abstract
::
AbstractTensor
>
(
type_of_tensor
,
shape_of_tensor
);
value_node
->
set_abstract
(
abstract
);
// add value node to graph
auto
input_value_node
=
graph
->
NewValueNode
(
value_node
);
graph
->
AddValueNodeToGraph
(
input_value_node
);
return
input_value_node
;
...
...
@@ -313,7 +317,7 @@ ParameterPtr ConstructRunOpParameter(const std::shared_ptr<KernelGraph> &graph,
int
tensor_mask
)
{
auto
param
=
graph
->
NewParameter
();
MS_EXCEPTION_IF_NULL
(
param
);
if
(
tensor_mask
==
1
)
{
if
(
tensor_mask
==
kParameterWeightTensorMask
)
{
py
::
object
obj
;
param
->
set_default_param
(
obj
);
}
...
...
@@ -329,8 +333,10 @@ ParameterPtr ConstructRunOpParameter(const std::shared_ptr<KernelGraph> &graph,
kernel_build_info_builder
->
SetOutputsDeviceType
(
std
::
vector
<
TypeId
>
{
input_tensor
->
device_address
()
->
type_id
()});
}
AnfAlgo
::
SetSelectKernelBuildInfo
(
kernel_build_info_builder
->
Build
(),
param
.
get
());
// ftruct abstract of parameter
auto
abstract
=
std
::
make_shared
<
abstract
::
AbstractTensor
>
(
input_tensor
);
// construct abstract of parameter
auto
type_of_tensor
=
input_tensor
->
Dtype
();
auto
shape_of_tensor
=
input_tensor
->
shape
();
auto
abstract
=
std
::
make_shared
<
abstract
::
AbstractTensor
>
(
type_of_tensor
,
shape_of_tensor
);
param
->
set_abstract
(
abstract
);
return
param
;
}
...
...
mindspore/ccsrc/utils/utils.h
浏览文件 @
87989791
...
...
@@ -199,6 +199,8 @@ const size_t kShape4dDims = 4;
const
size_t
kShape5dDims
=
5
;
const
size_t
kCubeSize
=
16
;
const
size_t
kMemAlignSize
=
512
;
const
int
kParameterDataTensorMask
=
0
;
const
int
kParameterWeightTensorMask
=
1
;
const
int
kValueNodeTensorMask
=
2
;
// define special index in special node
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录