Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
2097a0e9
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2097a0e9
编写于
6月 24, 2020
作者:
L
liuxiao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add optimizer operators for VM.
上级
efecaf43
变更
9
展开全部
隐藏空白更改
内联
并排
Showing
9 changed file
with
665 addition
and
9 deletion
+665
-9
mindspore/ccsrc/kernel/tbe/tbe_adapter.cc
mindspore/ccsrc/kernel/tbe/tbe_adapter.cc
+2
-0
mindspore/ops/_op_impl/tbe/__init__.py
mindspore/ops/_op_impl/tbe/__init__.py
+4
-0
mindspore/ops/_op_impl/tbe/apply_add_sign.py
mindspore/ops/_op_impl/tbe/apply_add_sign.py
+65
-0
mindspore/ops/_op_impl/tbe/apply_gradient_descent.py
mindspore/ops/_op_impl/tbe/apply_gradient_descent.py
+44
-0
mindspore/ops/_op_impl/tbe/apply_power_sign.py
mindspore/ops/_op_impl/tbe/apply_power_sign.py
+65
-0
mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py
...spore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py
+54
-0
mindspore/ops/operations/__init__.py
mindspore/ops/operations/__init__.py
+5
-0
mindspore/ops/operations/nn_ops.py
mindspore/ops/operations/nn_ops.py
+352
-9
tests/ut/python/ops/test_ops.py
tests/ut/python/ops/test_ops.py
+74
-0
未找到文件。
mindspore/ccsrc/kernel/tbe/tbe_adapter.cc
浏览文件 @
2097a0e9
...
@@ -77,6 +77,8 @@ static std::map<string, string> tbe_func_adapter_map = {
...
@@ -77,6 +77,8 @@ static std::map<string, string> tbe_func_adapter_map = {
{
"sparse_apply_adagrad"
,
"sparse_apply_adagrad_d"
},
{
"sparse_apply_adagrad"
,
"sparse_apply_adagrad_d"
},
{
"apply_proximal_adagrad"
,
"apply_proximal_adagrad_d"
},
{
"apply_proximal_adagrad"
,
"apply_proximal_adagrad_d"
},
{
"sparse_apply_proximal_adagrad"
,
"sparse_apply_proximal_adagrad_d"
},
{
"sparse_apply_proximal_adagrad"
,
"sparse_apply_proximal_adagrad_d"
},
{
"apply_add_sign"
,
"apply_add_sign_d"
},
{
"apply_power_sign"
,
"apply_power_sign_d"
},
{
"transpose"
,
"transpose_d"
},
{
"transpose"
,
"transpose_d"
},
{
"fill"
,
"fill_d"
},
{
"fill"
,
"fill_d"
},
{
"unsorted_segment_sum"
,
"unsorted_segment_sum_d"
},
{
"unsorted_segment_sum"
,
"unsorted_segment_sum_d"
},
...
...
mindspore/ops/_op_impl/tbe/__init__.py
浏览文件 @
2097a0e9
...
@@ -34,6 +34,10 @@ from .apply_ada_max import _apply_ada_max_tbe
...
@@ -34,6 +34,10 @@ from .apply_ada_max import _apply_ada_max_tbe
from
.apply_adadelta
import
_apply_adadelta_tbe
from
.apply_adadelta
import
_apply_adadelta_tbe
from
.apply_adagrad
import
_apply_adagrad_tbe
from
.apply_adagrad
import
_apply_adagrad_tbe
from
.apply_adagrad_v2
import
_apply_adagrad_v2_tbe
from
.apply_adagrad_v2
import
_apply_adagrad_v2_tbe
from
.apply_add_sign
import
_apply_add_sign_tbe
from
.apply_power_sign
import
_apply_power_sign_tbe
from
.apply_gradient_descent
import
_apply_gradient_descent_tbe
from
.apply_proximal_gradient_descent
import
_apply_proximal_gradient_descent_tbe
from
.approximate_equal
import
_approximate_equal_tbe
from
.approximate_equal
import
_approximate_equal_tbe
from
.adam_apply_one
import
_adam_apply_one_tbe
from
.adam_apply_one
import
_adam_apply_one_tbe
from
.assign
import
_assign_tbe
from
.assign
import
_assign_tbe
...
...
mindspore/ops/_op_impl/tbe/apply_add_sign.py
0 → 100644
浏览文件 @
2097a0e9
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ApplyAddSignD op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
apply_add_sign_d_op_info
=
TBERegOp
(
"ApplyAddSign"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"apply_add_sign_d.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"apply_add_sign_d"
)
\
.
partial_flag
(
True
)
\
.
input
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"m"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"lr"
,
False
,
"required"
,
"all"
)
\
.
input
(
3
,
"alpha"
,
False
,
"required"
,
"all"
)
\
.
input
(
4
,
"sign_decay"
,
False
,
"required"
,
"all"
)
\
.
input
(
5
,
"beta"
,
False
,
"required"
,
"all"
)
\
.
input
(
6
,
"grad"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
output
(
1
,
"m"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
)
\
.
get_op_info
()
@
op_info_register
(
apply_add_sign_d_op_info
)
def
_apply_add_sign_tbe
():
"""ApplyAddSignD TBE register"""
return
mindspore/ops/_op_impl/tbe/apply_gradient_descent.py
0 → 100644
浏览文件 @
2097a0e9
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ApplyGradientDescent op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
apply_gradient_descent_op_info
=
TBERegOp
(
"ApplyGradientDescent"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"apply_gradient_descent.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"apply_gradient_descent"
)
\
.
partial_flag
(
True
)
\
.
input
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"alpha"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"delta"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_Default
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F16_FracZ
,
DataType
.
F16_Default
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
)
\
.
dtype_format
(
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_Default
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_Default
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
F32_FracZ
,
DataType
.
F32_Default
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
)
\
.
dtype_format
(
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_Default
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
get_op_info
()
@
op_info_register
(
apply_gradient_descent_op_info
)
def
_apply_gradient_descent_tbe
():
"""ApplyGradientDescent TBE register"""
return
mindspore/ops/_op_impl/tbe/apply_power_sign.py
0 → 100644
浏览文件 @
2097a0e9
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ApplyPowerSignD op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
apply_power_sign_d_op_info
=
TBERegOp
(
"ApplyPowerSign"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"apply_power_sign_d.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"apply_power_sign_d"
)
\
.
partial_flag
(
True
)
\
.
input
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"m"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"lr"
,
False
,
"required"
,
"all"
)
\
.
input
(
3
,
"logbase"
,
False
,
"required"
,
"all"
)
\
.
input
(
4
,
"sign_decay"
,
False
,
"required"
,
"all"
)
\
.
input
(
5
,
"beta"
,
False
,
"required"
,
"all"
)
\
.
input
(
6
,
"grad"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
output
(
1
,
"m"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
)
\
.
get_op_info
()
@
op_info_register
(
apply_power_sign_d_op_info
)
def
_apply_power_sign_tbe
():
"""ApplyPowerSignD TBE register"""
return
mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py
0 → 100644
浏览文件 @
2097a0e9
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ApplyProximalGradientDescent op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
apply_proximal_gradient_descent_op_info
=
TBERegOp
(
"ApplyProximalGradientDescent"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"apply_proximal_gradient_descent.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"apply_proximal_gradient_descent"
)
\
.
partial_flag
(
True
)
\
.
input
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"alpha"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"l1"
,
False
,
"required"
,
"all"
)
\
.
input
(
3
,
"l2"
,
False
,
"required"
,
"all"
)
\
.
input
(
4
,
"delta"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F16_FracZ
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
)
\
.
dtype_format
(
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
F32_FracZ
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
)
\
.
dtype_format
(
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
get_op_info
()
@
op_info_register
(
apply_proximal_gradient_descent_op_info
)
def
_apply_proximal_gradient_descent_tbe
():
"""ApplyProximalGradientDescent TBE register"""
return
mindspore/ops/operations/__init__.py
浏览文件 @
2097a0e9
...
@@ -74,6 +74,7 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
...
@@ -74,6 +74,7 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
TopK
,
BinaryCrossEntropy
,
SparseApplyAdagrad
,
LARSUpdate
,
ApplyFtrl
,
SparseApplyFtrl
,
TopK
,
BinaryCrossEntropy
,
SparseApplyAdagrad
,
LARSUpdate
,
ApplyFtrl
,
SparseApplyFtrl
,
ApplyProximalAdagrad
,
SparseApplyProximalAdagrad
,
ApplyProximalAdagrad
,
SparseApplyProximalAdagrad
,
ApplyAdaMax
,
ApplyAdadelta
,
ApplyAdagrad
,
ApplyAdagradV2
,
ApplyAdaMax
,
ApplyAdadelta
,
ApplyAdagrad
,
ApplyAdagradV2
,
ApplyAddSign
,
ApplyPowerSign
,
ApplyGradientDescent
,
ApplyProximalGradientDescent
,
ApplyRMSProp
,
ApplyCenteredRMSProp
,
BasicLSTMCell
,
InTopK
)
ApplyRMSProp
,
ApplyCenteredRMSProp
,
BasicLSTMCell
,
InTopK
)
from
.other_ops
import
(
Assign
,
IOU
,
BoundingBoxDecode
,
BoundingBoxEncode
,
from
.other_ops
import
(
Assign
,
IOU
,
BoundingBoxDecode
,
BoundingBoxEncode
,
CheckValid
,
MakeRefKey
,
Partial
,
Depend
,
CheckBprop
)
CheckValid
,
MakeRefKey
,
Partial
,
Depend
,
CheckBprop
)
...
@@ -295,6 +296,10 @@ __all__ = [
...
@@ -295,6 +296,10 @@ __all__ = [
"ApplyAdadelta"
,
"ApplyAdadelta"
,
"ApplyAdagrad"
,
"ApplyAdagrad"
,
"ApplyAdagradV2"
,
"ApplyAdagradV2"
,
"ApplyAddSign"
,
"ApplyPowerSign"
,
"ApplyGradientDescent"
,
"ApplyProximalGradientDescent"
,
"BatchToSpace"
,
"BatchToSpace"
,
"Atan2"
,
"Atan2"
,
"ApplyRMSProp"
,
"ApplyRMSProp"
,
...
...
mindspore/ops/operations/nn_ops.py
浏览文件 @
2097a0e9
此差异已折叠。
点击以展开。
tests/ut/python/ops/test_ops.py
浏览文件 @
2097a0e9
...
@@ -351,6 +351,64 @@ class ApplyAdagradV2Net(nn.Cell):
...
@@ -351,6 +351,64 @@ class ApplyAdagradV2Net(nn.Cell):
return
out
return
out
class
ApplyAddSignNet
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
ApplyAddSignNet
,
self
).
__init__
()
self
.
apply_add_sign
=
P
.
ApplyAddSign
()
self
.
lr
=
0.001
self
.
alpha
=
1.0
self
.
sign_decay
=
0.99
self
.
beta
=
0.99
self
.
var
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"var"
)
self
.
m
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"m"
)
def
construct
(
self
,
grad
):
out
=
self
.
apply_add_sign
(
self
.
var
,
self
.
m
,
self
.
lr
,
self
.
alpha
,
self
.
sign_decay
,
self
.
beta
,
grad
)
return
out
class
ApplyPowerSignNet
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
ApplyPowerSignNet
,
self
).
__init__
()
self
.
apply_power_sign
=
P
.
ApplyPowerSign
()
self
.
lr
=
0.001
self
.
logbase
=
np
.
e
self
.
sign_decay
=
0.99
self
.
beta
=
0.99
self
.
var
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"var"
)
self
.
m
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"m"
)
def
construct
(
self
,
grad
):
out
=
self
.
apply_power_sign
(
self
.
var
,
self
.
m
,
self
.
lr
,
self
.
logbase
,
self
.
sign_decay
,
self
.
beta
,
grad
)
return
out
class
ApplyGradientDescentNet
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
ApplyGradientDescentNet
,
self
).
__init__
()
self
.
apply_gradient_descent
=
P
.
ApplyGradientDescent
()
self
.
alpha
=
0.001
self
.
var
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"var"
)
def
construct
(
self
,
delta
):
out
=
self
.
apply_gradient_descent
(
self
.
var
,
self
.
alpha
,
delta
)
return
out
class
ApplyProximalGradientDescentNet
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
ApplyProximalGradientDescentNet
,
self
).
__init__
()
self
.
apply_proximal_gradient_descent
=
P
.
ApplyProximalGradientDescent
()
self
.
alpha
=
0.001
self
.
l1
=
0.0
self
.
l2
=
0.0
self
.
var
=
Parameter
(
Tensor
(
np
.
random
.
rand
(
3
,
3
).
astype
(
np
.
float32
)),
name
=
"var"
)
def
construct
(
self
,
delta
):
out
=
self
.
apply_proximal_gradient_descent
(
self
.
var
,
self
.
alpha
,
self
.
l1
,
self
.
l2
,
delta
)
return
out
class
SparseApplyAdagradNet
(
nn
.
Cell
):
class
SparseApplyAdagradNet
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
SparseApplyAdagradNet
,
self
).
__init__
()
super
(
SparseApplyAdagradNet
,
self
).
__init__
()
...
@@ -1241,6 +1299,22 @@ test_case_nn_ops = [
...
@@ -1241,6 +1299,22 @@ test_case_nn_ops = [
'block'
:
ApplyAdagradV2Net
(),
'block'
:
ApplyAdagradV2Net
(),
'desc_inputs'
:
[[
3
,
3
]],
'desc_inputs'
:
[[
3
,
3
]],
'skip'
:
[
'backward'
]}),
'skip'
:
[
'backward'
]}),
(
'ApplyAddSign'
,
{
'block'
:
ApplyAddSignNet
(),
'desc_inputs'
:
[[
3
,
3
]],
'skip'
:
[
'backward'
]}),
(
'ApplyPowerSign'
,
{
'block'
:
ApplyPowerSignNet
(),
'desc_inputs'
:
[[
3
,
3
]],
'skip'
:
[
'backward'
]}),
(
'ApplyGradientDescent'
,
{
'block'
:
ApplyGradientDescentNet
(),
'desc_inputs'
:
[[
3
,
3
]],
'skip'
:
[
'backward'
]}),
(
'ApplyProximalGradientDescent'
,
{
'block'
:
ApplyProximalGradientDescentNet
(),
'desc_inputs'
:
[[
3
,
3
]],
'skip'
:
[
'backward'
]}),
(
'Flatten_1'
,
{
(
'Flatten_1'
,
{
'block'
:
NetForFlatten
(),
'block'
:
NetForFlatten
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
2
,
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
2
,
12
]).
astype
(
np
.
int32
))],
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
2
,
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
2
,
12
]).
astype
(
np
.
int32
))],
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录