Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
42b0d49d
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
42b0d49d
编写于
6月 09, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
6月 09, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1921 Adapt TBE ops ApplyProximalAdagrad for GE.
Merge pull request !1921 from liuxiao/ApplyProximalAdagrad-for-GE
上级
db00ea71
6856c2ac
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
21 addition
and
7 deletion
+21
-7
mindspore/ccsrc/transform/convert.cc
mindspore/ccsrc/transform/convert.cc
+2
-0
mindspore/ccsrc/transform/op_declare.cc
mindspore/ccsrc/transform/op_declare.cc
+6
-0
mindspore/ccsrc/transform/op_declare.h
mindspore/ccsrc/transform/op_declare.h
+2
-0
mindspore/ops/operations/nn_ops.py
mindspore/ops/operations/nn_ops.py
+1
-1
tests/ut/python/ops/test_ops.py
tests/ut/python/ops/test_ops.py
+10
-6
未找到文件。
mindspore/ccsrc/transform/convert.cc
浏览文件 @
42b0d49d
...
@@ -182,6 +182,7 @@ const char kNameBinaryCrossEntropy[] = "BinaryCrossEntropy";
...
@@ -182,6 +182,7 @@ const char kNameBinaryCrossEntropy[] = "BinaryCrossEntropy";
const
char
kNameBinaryCrossEntropyGrad
[]
=
"BinaryCrossEntropyGrad"
;
const
char
kNameBinaryCrossEntropyGrad
[]
=
"BinaryCrossEntropyGrad"
;
const
char
kNameSparseApplyAdagrad
[]
=
"SparseApplyAdagrad"
;
const
char
kNameSparseApplyAdagrad
[]
=
"SparseApplyAdagrad"
;
const
char
kNameSparseApplyFtrlD
[]
=
"SparseApplyFtrlD"
;
const
char
kNameSparseApplyFtrlD
[]
=
"SparseApplyFtrlD"
;
const
char
kNameApplyProximalAdagrad
[]
=
"ApplyProximalAdagrad"
;
const
char
kNameAcosh
[]
=
"Acosh"
;
const
char
kNameAcosh
[]
=
"Acosh"
;
const
char
kNameAcoshGrad
[]
=
"AcoshGrad"
;
const
char
kNameAcoshGrad
[]
=
"AcoshGrad"
;
const
char
kNameFloorMod
[]
=
"FloorMod"
;
const
char
kNameFloorMod
[]
=
"FloorMod"
;
...
@@ -386,6 +387,7 @@ std::unordered_map<std::string, OpAdapterDescPtr> &DfGraphConvertor::get_adpt_ma
...
@@ -386,6 +387,7 @@ std::unordered_map<std::string, OpAdapterDescPtr> &DfGraphConvertor::get_adpt_ma
{
string
(
kNameBinaryCrossEntropyGrad
),
ADPT_DESC
(
BinaryCrossEntropyGrad
)},
{
string
(
kNameBinaryCrossEntropyGrad
),
ADPT_DESC
(
BinaryCrossEntropyGrad
)},
{
string
(
kNameSparseApplyAdagrad
),
ADPT_DESC
(
SparseApplyAdagradD
)},
{
string
(
kNameSparseApplyAdagrad
),
ADPT_DESC
(
SparseApplyAdagradD
)},
{
string
(
kNameSparseApplyFtrlD
),
ADPT_DESC
(
SparseApplyFtrlD
)},
{
string
(
kNameSparseApplyFtrlD
),
ADPT_DESC
(
SparseApplyFtrlD
)},
{
string
(
kNameApplyProximalAdagrad
),
ADPT_DESC
(
ApplyProximalAdagrad
)},
{
string
(
kNameAcosh
),
ADPT_DESC
(
Acosh
)},
{
string
(
kNameAcosh
),
ADPT_DESC
(
Acosh
)},
{
string
(
kNameAcoshGrad
),
ADPT_DESC
(
AcoshGrad
)},
{
string
(
kNameAcoshGrad
),
ADPT_DESC
(
AcoshGrad
)},
{
string
(
kNameFloorMod
),
ADPT_DESC
(
FloorMod
)},
{
string
(
kNameFloorMod
),
ADPT_DESC
(
FloorMod
)},
...
...
mindspore/ccsrc/transform/op_declare.cc
浏览文件 @
42b0d49d
...
@@ -1155,6 +1155,12 @@ ATTR_MAP(SparseApplyAdagradD) = {{"lr", ATTR_DESC(lr, AnyTraits<float>())},
...
@@ -1155,6 +1155,12 @@ ATTR_MAP(SparseApplyAdagradD) = {{"lr", ATTR_DESC(lr, AnyTraits<float>())},
{
"use_locking"
,
ATTR_DESC
(
use_locking
,
AnyTraits
<
bool
>
())}};
{
"use_locking"
,
ATTR_DESC
(
use_locking
,
AnyTraits
<
bool
>
())}};
OUTPUT_MAP
(
SparseApplyAdagradD
)
=
{{
0
,
OUTPUT_DESC
(
var
)}};
OUTPUT_MAP
(
SparseApplyAdagradD
)
=
{{
0
,
OUTPUT_DESC
(
var
)}};
// ApplyProximalAdagrad
INPUT_MAP
(
ApplyProximalAdagrad
)
=
{{
1
,
INPUT_DESC
(
var
)},
{
2
,
INPUT_DESC
(
accum
)},
{
3
,
INPUT_DESC
(
lr
)},
{
4
,
INPUT_DESC
(
l1
)},
{
5
,
INPUT_DESC
(
l2
)},
{
6
,
INPUT_DESC
(
grad
)}};
ATTR_MAP
(
ApplyProximalAdagrad
)
=
{{
"use_locking"
,
ATTR_DESC
(
use_locking
,
AnyTraits
<
bool
>
())}};
OUTPUT_MAP
(
ApplyProximalAdagrad
)
=
{{
0
,
OUTPUT_DESC
(
var
)}};
// SparseApplyFtrlD
// SparseApplyFtrlD
INPUT_MAP
(
SparseApplyFtrlD
)
=
{{
1
,
INPUT_DESC
(
var
)},
INPUT_MAP
(
SparseApplyFtrlD
)
=
{{
1
,
INPUT_DESC
(
var
)},
{
2
,
INPUT_DESC
(
accum
)},
{
2
,
INPUT_DESC
(
accum
)},
...
...
mindspore/ccsrc/transform/op_declare.h
浏览文件 @
42b0d49d
...
@@ -442,6 +442,8 @@ DECLARE_OP_ADAPTER(BinaryCrossEntropyGrad)
...
@@ -442,6 +442,8 @@ DECLARE_OP_ADAPTER(BinaryCrossEntropyGrad)
DECLARE_OP_USE_OUTPUT
(
BinaryCrossEntropyGrad
)
DECLARE_OP_USE_OUTPUT
(
BinaryCrossEntropyGrad
)
DECLARE_OP_ADAPTER
(
SparseApplyAdagradD
)
DECLARE_OP_ADAPTER
(
SparseApplyAdagradD
)
DECLARE_OP_USE_OUTPUT
(
SparseApplyAdagradD
)
DECLARE_OP_USE_OUTPUT
(
SparseApplyAdagradD
)
DECLARE_OP_ADAPTER
(
ApplyProximalAdagrad
)
DECLARE_OP_USE_OUTPUT
(
ApplyProximalAdagrad
)
DECLARE_OP_ADAPTER
(
SpaceToDepth
)
DECLARE_OP_ADAPTER
(
SpaceToDepth
)
DECLARE_OP_USE_OUTPUT
(
SpaceToDepth
)
DECLARE_OP_USE_OUTPUT
(
SpaceToDepth
)
DECLARE_OP_ADAPTER
(
DepthToSpace
)
DECLARE_OP_ADAPTER
(
DepthToSpace
)
...
...
mindspore/ops/operations/nn_ops.py
浏览文件 @
42b0d49d
...
@@ -3149,7 +3149,7 @@ class SparseApplyFtrl(PrimitiveWithInfer):
...
@@ -3149,7 +3149,7 @@ class SparseApplyFtrl(PrimitiveWithInfer):
validator
.
check_value_type
(
"l1"
,
l1
,
[
float
],
self
.
name
)
validator
.
check_value_type
(
"l1"
,
l1
,
[
float
],
self
.
name
)
validator
.
check_value_type
(
"l2"
,
l2
,
[
float
],
self
.
name
)
validator
.
check_value_type
(
"l2"
,
l2
,
[
float
],
self
.
name
)
validator
.
check_value_type
(
"lr_power"
,
lr_power
,
[
float
],
self
.
name
)
validator
.
check_value_type
(
"lr_power"
,
lr_power
,
[
float
],
self
.
name
)
self
.
lr
=
validator
.
check_number
(
"lr"
,
lr
,
0.0
,
Rel
.
G
T
,
self
.
name
)
self
.
lr
=
validator
.
check_number
_range
(
"lr"
,
lr
,
0.0
,
float
(
"inf"
),
Rel
.
INC_LEF
T
,
self
.
name
)
self
.
l1
=
validator
.
check_number
(
"l1"
,
l1
,
0.0
,
Rel
.
GE
,
self
.
name
)
self
.
l1
=
validator
.
check_number
(
"l1"
,
l1
,
0.0
,
Rel
.
GE
,
self
.
name
)
self
.
l2
=
validator
.
check_number
(
"l2"
,
l2
,
0.0
,
Rel
.
GE
,
self
.
name
)
self
.
l2
=
validator
.
check_number
(
"l2"
,
l2
,
0.0
,
Rel
.
GE
,
self
.
name
)
self
.
lr_power
=
validator
.
check_number
(
"lr_power"
,
lr_power
,
0
,
Rel
.
LE
,
self
.
name
)
self
.
lr_power
=
validator
.
check_number
(
"lr_power"
,
lr_power
,
0
,
Rel
.
LE
,
self
.
name
)
...
...
tests/ut/python/ops/test_ops.py
浏览文件 @
42b0d49d
...
@@ -244,12 +244,14 @@ class SparseApplyProximalAdagradNet(nn.Cell):
...
@@ -244,12 +244,14 @@ class SparseApplyProximalAdagradNet(nn.Cell):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
SparseApplyProximalAdagradNet
,
self
).
__init__
()
super
(
SparseApplyProximalAdagradNet
,
self
).
__init__
()
self
.
sparse_apply_proximal_adagrad
=
P
.
SparseApplyProximalAdagrad
()
self
.
sparse_apply_proximal_adagrad
=
P
.
SparseApplyProximalAdagrad
()
self
.
var
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"var"
)
self
.
accum
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"accum"
)
self
.
lr
=
0.01
self
.
lr
=
0.01
self
.
l1
=
0.0
self
.
l1
=
0.0
self
.
l2
=
0.0
self
.
l2
=
0.0
def
construct
(
self
,
var
,
accum
,
grad
,
indices
):
def
construct
(
self
,
grad
,
indices
):
out
=
self
.
sparse_apply_proximal_adagrad
(
var
,
accum
,
self
.
lr
,
self
.
l1
,
self
.
l2
,
grad
,
indices
)
out
=
self
.
sparse_apply_proximal_adagrad
(
self
.
var
,
self
.
accum
,
self
.
lr
,
self
.
l1
,
self
.
l2
,
grad
,
indices
)
return
out
return
out
...
@@ -257,12 +259,14 @@ class ApplyProximalAdagradNet(nn.Cell):
...
@@ -257,12 +259,14 @@ class ApplyProximalAdagradNet(nn.Cell):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
ApplyProximalAdagradNet
,
self
).
__init__
()
super
(
ApplyProximalAdagradNet
,
self
).
__init__
()
self
.
apply_proximal_adagrad
=
P
.
ApplyProximalAdagrad
()
self
.
apply_proximal_adagrad
=
P
.
ApplyProximalAdagrad
()
self
.
var
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"var"
)
self
.
accum
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"accum"
)
self
.
lr
=
0.01
self
.
lr
=
0.01
self
.
l1
=
0.0
self
.
l1
=
0.0
self
.
l2
=
0.0
self
.
l2
=
0.0
def
construct
(
self
,
var
,
accum
,
grad
):
def
construct
(
self
,
grad
):
out
=
self
.
apply_proximal_adagrad
(
var
,
accum
,
self
.
lr
,
self
.
l1
,
self
.
l2
,
grad
)
out
=
self
.
apply_proximal_adagrad
(
self
.
var
,
self
.
accum
,
self
.
lr
,
self
.
l1
,
self
.
l2
,
grad
)
return
out
return
out
...
@@ -1061,11 +1065,11 @@ test_case_nn_ops = [
...
@@ -1061,11 +1065,11 @@ test_case_nn_ops = [
'skip'
:
[
'backward'
]}),
'skip'
:
[
'backward'
]}),
(
'ApplyProximalAdagrad'
,
{
(
'ApplyProximalAdagrad'
,
{
'block'
:
ApplyProximalAdagradNet
(),
'block'
:
ApplyProximalAdagradNet
(),
'desc_inputs'
:
[[
3
,
3
]
,
[
3
,
3
],
[
3
,
3
]
],
'desc_inputs'
:
[[
3
,
3
]],
'skip'
:
[
'backward'
]}),
'skip'
:
[
'backward'
]}),
(
'SparseApplyProximalAdagrad'
,
{
(
'SparseApplyProximalAdagrad'
,
{
'block'
:
SparseApplyProximalAdagradNet
(),
'block'
:
SparseApplyProximalAdagradNet
(),
'desc_inputs'
:
[[
3
,
3
],
[
3
,
3
],
[
3
,
3
],
Tensor
(
np
.
ones
((
3
,),
np
.
int32
))],
'desc_inputs'
:
[[
3
,
3
],
Tensor
(
np
.
ones
((
3
,),
np
.
int32
))],
'skip'
:
[
'backward'
]}),
'skip'
:
[
'backward'
]}),
(
'Flatten_1'
,
{
(
'Flatten_1'
,
{
'block'
:
NetForFlatten
(),
'block'
:
NetForFlatten
(),
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录