Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
8947afb5
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8947afb5
编写于
7月 13, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
7月 13, 2020
浏览文件
操作
浏览文件
下载
差异文件
!3004 fixed codedex issues
Merge pull request !3004 from mxm/fix_codedex
上级
be9b3c53
3c08fa63
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
44 addition
and
23 deletion
+44
-23
mindspore/ccsrc/ir/pattern_matcher.h
mindspore/ccsrc/ir/pattern_matcher.h
+18
-7
mindspore/ccsrc/ir/tensor.cc
mindspore/ccsrc/ir/tensor.cc
+2
-1
mindspore/ccsrc/optimizer/ad/dfunctor.cc
mindspore/ccsrc/optimizer/ad/dfunctor.cc
+10
-10
mindspore/ccsrc/utils/load_onnx/anf_converter.cc
mindspore/ccsrc/utils/load_onnx/anf_converter.cc
+4
-1
mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc
mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc
+9
-3
mindspore/ccsrc/utils/load_onnx/anf_model_parser.h
mindspore/ccsrc/utils/load_onnx/anf_model_parser.h
+1
-1
未找到文件。
mindspore/ccsrc/ir/pattern_matcher.h
浏览文件 @
8947afb5
...
...
@@ -541,6 +541,9 @@ class PConstant : public PBase<PConstant<T> > {
data_out
[
i
]
*=
data_2
[
0
];
}
}
else
{
if
(
in_data_2_size
<
out_data_size
)
{
MS_EXCEPTION
(
ValueError
)
<<
"in_data_2_size is smaller than out_data_size."
;
}
for
(
int
i
=
0
;
i
<
out_data_size
;
i
++
)
{
data_out
[
i
]
*=
data_2
[
i
];
}
...
...
@@ -595,33 +598,41 @@ class PConstant : public PBase<PConstant<T> > {
return
nullptr
;
}
void
*
data_out
;
auto
new_tensor_ptr
=
std
::
make_shared
<
tensor
::
Tensor
>
(
tensor_3_type_ptr
->
type_id
(),
tensor_out_shape
);
size_t
mem_size
=
GetTypeByte
(
tensor_3_type_ptr
)
*
IntToSize
(
new_tensor_ptr
->
ElementsNum
());
char
*
data
=
reinterpret_cast
<
char
*>
(
new_tensor_ptr
->
data_c
());
int
ret
=
0
;
void
*
data_out
=
nullptr
;
if
((
tensor_3_type_ptr
->
type_id
()
==
TypeId
::
kNumberTypeFloat32
)
||
(
tensor_3_type_ptr
->
type_id
()
==
TypeId
::
kNumberTypeFloat
))
{
Multiply
<
float
>
(
tensor_ptr_1
->
data_c
(),
tensor_ptr_1
->
DataSize
(),
tensor_ptr_2
->
data_c
(),
tensor_ptr_2
->
DataSize
(),
&
data_out
,
data_out_size
);
ret
=
memcpy_s
(
data
,
mem_size
,
data_out
,
mem_size
);
delete
[]
reinterpret_cast
<
float
*>
(
data_out
);
}
else
{
if
(
tensor_3_type_ptr
->
type_id
()
==
TypeId
::
kNumberTypeFloat64
)
{
Multiply
<
double
>
(
tensor_ptr_1
->
data_c
(),
tensor_ptr_1
->
DataSize
(),
tensor_ptr_2
->
data_c
(),
tensor_ptr_2
->
DataSize
(),
&
data_out
,
data_out_size
);
ret
=
memcpy_s
(
data
,
mem_size
,
data_out
,
mem_size
);
delete
[]
reinterpret_cast
<
double
*>
(
data_out
);
}
else
{
if
((
tensor_3_type_ptr
->
type_id
()
==
TypeId
::
kNumberTypeInt32
)
||
(
tensor_3_type_ptr
->
type_id
()
==
TypeId
::
kNumberTypeInt
))
{
Multiply
<
int
>
(
tensor_ptr_1
->
data_c
(),
tensor_ptr_1
->
DataSize
(),
tensor_ptr_2
->
data_c
(),
tensor_ptr_2
->
DataSize
(),
&
data_out
,
data_out_size
);
ret
=
memcpy_s
(
data
,
mem_size
,
data_out
,
mem_size
);
delete
[]
reinterpret_cast
<
int
*>
(
data_out
);
}
else
{
// Un-support data types
return
nullptr
;
}
}
}
auto
new_tensor_ptr
=
std
::
make_shared
<
tensor
::
Tensor
>
(
tensor_3_type_ptr
->
type_id
(),
tensor_out_shape
);
size_t
mem_size
=
GetTypeByte
(
tensor_3_type_ptr
)
*
IntToSize
(
new_tensor_ptr
->
ElementsNum
());
char
*
data
=
reinterpret_cast
<
char
*>
(
new_tensor_ptr
->
data_c
());
memcpy
(
data
,
data_out
,
mem_size
);
if
(
ret
!=
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"memcpy_s error, errorno "
<<
ret
<<
", source size "
<<
mem_size
<<
"dest size"
<<
new_tensor_ptr
->
DataSize
();
}
auto
new_vnode
=
NewValueNode
(
new_tensor_ptr
);
new_vnode
->
set_abstract
(
new_tensor_ptr
->
ToAbstract
());
return
new_vnode
;
...
...
mindspore/ccsrc/ir/tensor.cc
浏览文件 @
8947afb5
...
...
@@ -125,6 +125,7 @@ template <typename T>
class
TensorDataImpl
:
public
TensorData
{
public:
explicit
TensorDataImpl
(
const
std
::
vector
<
int
>
&
shape
)
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
))
{}
~
TensorDataImpl
()
=
default
;
TensorDataImpl
(
const
std
::
vector
<
int
>
&
shape
,
void
*
data
,
size_t
data_len
)
:
ndim_
(
shape
.
size
()),
data_size_
(
SizeOf
(
shape
)),
data_
(
CopyData
<
T
>
(
shape
,
data
,
data_len
))
{}
...
...
@@ -288,7 +289,7 @@ class TensorDataImpl : public TensorData {
};
template
<
typename
...
Args
>
TensorDataPtr
MakeTensorData
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
,
Args
...
args
)
{
TensorDataPtr
MakeTensorData
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
,
const
Args
...
args
)
{
switch
(
data_type
)
{
case
kNumberTypeBool
:
case
kNumberTypeUInt8
:
...
...
mindspore/ccsrc/optimizer/ad/dfunctor.cc
浏览文件 @
8947afb5
...
...
@@ -99,14 +99,14 @@ void DFunctor::BackPropagateFv(const AnfNodePtr &fv, const AnfNodePtr &din) {
fv_adjoint
=
anfnode_to_adjoin_indirect_fv_
.
find
(
fv
);
}
}
auto
key
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEmbed
),
fv_adjoint
->
second
->
k
()});
fv_adjoint
->
second
->
RegisterKUser
(
key
,
1
);
auto
node
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEmbed
),
fv_adjoint
->
second
->
k
()});
fv_adjoint
->
second
->
RegisterKUser
(
node
,
1
);
auto
default_val
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
GetPythonOps
(
"zeros_like"
)),
fv_adjoint
->
second
->
k
()});
fv_adjoint
->
second
->
RegisterKUser
(
default_val
,
1
);
auto
dfv
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEnvGetItem
),
din
,
key
,
default_val
});
auto
dfv
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEnvGetItem
),
din
,
node
,
default_val
});
MS_LOG
(
DEBUG
)
<<
"BackPropagateFv find adjoint in anfnode_to_adjoin_ or anfnode_to_adjoin_indirect_fv_ fv "
<<
fv
->
func_graph
()
->
ToString
()
<<
" "
<<
fv
->
ToString
()
<<
"."
;
MS_LOG
(
DEBUG
)
<<
"BackPropagateFv get item from "
<<
din
->
ToString
()
<<
" key "
<<
key
->
ToString
()
<<
"."
;
MS_LOG
(
DEBUG
)
<<
"BackPropagateFv get item from "
<<
din
->
ToString
()
<<
" key "
<<
node
->
ToString
()
<<
"."
;
fv_adjoint
->
second
->
AccumulateDout
(
dfv
);
}
...
...
@@ -279,13 +279,13 @@ AnfNodePtr DFunctor::AttachFvDoutToTape(const AnfNodePtr &grad_fv) {
if
(
fv_adjoint
==
anfnode_to_adjoin_
.
end
())
{
MS_LOG
(
EXCEPTION
)
<<
"AttachFvDoutToTape fv adjoint does not exist "
<<
fv
->
ToString
()
<<
"."
;
}
auto
key
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEmbed
),
fv_adjoint
->
second
->
k
()});
fv_adjoint
->
second
->
RegisterKUser
(
key
,
1
);
auto
node
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEmbed
),
fv_adjoint
->
second
->
k
()});
fv_adjoint
->
second
->
RegisterKUser
(
node
,
1
);
auto
sens
=
fv_adjoint
->
second
->
dout
();
new_grad_fv
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEnvSetItem
),
new_grad_fv
,
key
,
node
,
sens
,
});
fv_adjoint
->
second
->
RegisterDoutUser
(
new_grad_fv
->
cast
<
CNodePtr
>
(),
3
);
...
...
@@ -301,13 +301,13 @@ AnfNodePtr DFunctor::AttachIndirectFvDoutToTape(const AnfNodePtr &grad_fv) {
for
(
auto
&
fv_adjoint
:
anfnode_to_adjoin_indirect_fv_
)
{
MS_LOG
(
DEBUG
)
<<
"AttachIndirectFvDoutToTape backprop indirect fv "
<<
fv_adjoint
.
first
->
ToString
()
<<
" "
<<
primal_graph_
->
ToString
()
<<
"."
;
auto
key
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEmbed
),
fv_adjoint
.
second
->
k
()});
fv_adjoint
.
second
->
RegisterKUser
(
key
,
1
);
auto
node
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEmbed
),
fv_adjoint
.
second
->
k
()});
fv_adjoint
.
second
->
RegisterKUser
(
node
,
1
);
auto
sens
=
fv_adjoint
.
second
->
dout
();
new_grad_fv
=
tape_
->
NewCNode
({
NewValueNode
(
prim
::
kPrimEnvSetItem
),
new_grad_fv
,
key
,
node
,
sens
,
});
fv_adjoint
.
second
->
RegisterDoutUser
(
new_grad_fv
->
cast
<
CNodePtr
>
(),
3
);
...
...
mindspore/ccsrc/utils/load_onnx/anf_converter.cc
浏览文件 @
8947afb5
...
...
@@ -60,6 +60,9 @@ int AnfConverter::ValidateFileStr(const std::string &modelFile, std::string file
bool
AnfConverter
::
ReadOnnxFromBinary
(
const
std
::
string
&
modelFile
,
google
::
protobuf
::
Message
*
onnx_model
)
{
std
::
unique_ptr
<
char
>
onnx_file
(
new
(
std
::
nothrow
)
char
[
PATH_MAX
]{
0
});
int
fd
=
open
(
onnx_file
.
get
(),
O_RDONLY
);
if
(
fd
<
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"failed to open file"
;
}
google
::
protobuf
::
io
::
FileInputStream
input
(
fd
);
google
::
protobuf
::
io
::
CodedInputStream
code_input
(
&
input
);
code_input
.
SetTotalBytesLimit
(
INT_MAX
,
536870912
);
...
...
@@ -85,7 +88,7 @@ std::shared_ptr<FuncGraph> AnfConverter::RunAnfConverter(const std::string &file
MS_LOG
(
ERROR
)
<<
"Trans data not support input format!"
;
}
else
{
modelFile
=
flagItem
.
substr
(
pos
+
1
);
std
::
cout
<<
"input protobuf file path is: "
<<
flagItem
.
substr
(
pos
+
1
)
<<
std
::
endl
;
std
::
cout
<<
"input protobuf file path is: "
<<
modelFile
<<
std
::
endl
;
}
if
(
ValidateFileStr
(
modelFile
,
".pb"
)
!=
0
)
{
...
...
mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc
浏览文件 @
8947afb5
...
...
@@ -119,7 +119,10 @@ bool MSANFModelParser::BuildParameterForFuncGraph(const ParameterPtr &node, cons
std
::
string
initial_data
=
initialize_proto
.
raw_data
();
auto
*
tensor_data_buf
=
reinterpret_cast
<
uint8_t
*>
(
tensor_info
->
data_c
());
MS_EXCEPTION_IF_NULL
(
tensor_data_buf
);
memcpy_s
(
tensor_data_buf
,
tensor_info
->
data
().
nbytes
(),
initial_data
.
data
(),
initial_data
.
size
());
auto
ret
=
memcpy_s
(
tensor_data_buf
,
tensor_info
->
data
().
nbytes
(),
initial_data
.
data
(),
initial_data
.
size
());
if
(
ret
!=
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"memcpy_s error, errorno"
<<
ret
;
}
auto
param_value
=
std
::
make_shared
<
ParamValue
>
();
MS_EXCEPTION_IF_NULL
(
param_value
);
...
...
@@ -249,7 +252,11 @@ bool MSANFModelParser::ObtainValueNodeInTensorForm(const std::string &value_node
tensor
::
TensorPtr
tensor_info
=
std
::
make_shared
<
tensor
::
Tensor
>
(
kDefaultValueSwitchMap
[
attr_tensor_type
],
shape
);
const
std
::
string
&
tensor_buf
=
attr_tensor
.
raw_data
();
auto
*
tensor_data_buf
=
reinterpret_cast
<
uint8_t
*>
(
tensor_info
->
data_c
());
memcpy_s
(
tensor_data_buf
,
tensor_info
->
data
().
nbytes
(),
tensor_buf
.
data
(),
tensor_buf
.
size
());
auto
ret
=
memcpy_s
(
tensor_data_buf
,
tensor_info
->
data
().
nbytes
(),
tensor_buf
.
data
(),
tensor_buf
.
size
());
if
(
ret
!=
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"memcpy_s error, errorno"
<<
ret
;
}
auto
new_value_node
=
NewValueNode
(
MakeValue
(
tensor_info
));
MS_EXCEPTION_IF_NULL
(
new_value_node
);
auto
tensor_abstract
=
tensor_info
->
ToAbstract
();
...
...
@@ -336,7 +343,6 @@ bool MSANFModelParser::GetAttrValueForValueNode(const std::string &ref_attr_name
MS_LOG
(
ERROR
)
<<
"parse ValueNode value don't support input of ref_attr_name"
;
return
false
;
}
return
true
;
}
bool
MSANFModelParser
::
BuildValueNodeForFuncGraph
(
const
onnx
::
NodeProto
&
node_proto
)
{
...
...
mindspore/ccsrc/utils/load_onnx/anf_model_parser.h
浏览文件 @
8947afb5
...
...
@@ -32,7 +32,7 @@ using uint64 = uint64_t;
using
float16
=
Eigen
::
half
;
class
MSANFModelParser
{
public:
MSANFModelParser
()
=
default
;
MSANFModelParser
()
:
producer_name_
(
""
),
model_version_
(
0
),
ir_version_
(
0
)
{}
~
MSANFModelParser
()
=
default
;
FuncGraphPtr
Parse
(
const
onnx
::
ModelProto
&
model_proto
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录