Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
ea0cd5cc
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ea0cd5cc
编写于
6月 18, 2020
作者:
L
lizhenyu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add Sigmoid and SigmoidGrad operation of GPU
上级
21ade668
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
132 addition
and
0 deletion
+132
-0
mindspore/ccsrc/kernel/gpu/nn/activation_gpu_kernel.cc
mindspore/ccsrc/kernel/gpu/nn/activation_gpu_kernel.cc
+5
-0
mindspore/ccsrc/kernel/gpu/nn/activation_grad_kernel.cc
mindspore/ccsrc/kernel/gpu/nn/activation_grad_kernel.cc
+9
-0
tests/st/ops/gpu/test_sigmoid_grad_op.py
tests/st/ops/gpu/test_sigmoid_grad_op.py
+61
-0
tests/st/ops/gpu/test_sigmoid_op.py
tests/st/ops/gpu/test_sigmoid_op.py
+57
-0
未找到文件。
mindspore/ccsrc/kernel/gpu/nn/activation_gpu_kernel.cc
浏览文件 @
ea0cd5cc
...
@@ -27,5 +27,10 @@ MS_REG_GPU_KERNEL_ONE(Tanh, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOut
...
@@ -27,5 +27,10 @@ MS_REG_GPU_KERNEL_ONE(Tanh, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOut
ActivationGpuFwdKernel
,
float
)
ActivationGpuFwdKernel
,
float
)
MS_REG_GPU_KERNEL_ONE
(
Tanh
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat16
).
AddOutputAttr
(
kNumberTypeFloat16
),
MS_REG_GPU_KERNEL_ONE
(
Tanh
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat16
).
AddOutputAttr
(
kNumberTypeFloat16
),
ActivationGpuFwdKernel
,
half
)
ActivationGpuFwdKernel
,
half
)
MS_REG_GPU_KERNEL_ONE
(
Sigmoid
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat32
).
AddOutputAttr
(
kNumberTypeFloat32
),
ActivationGpuFwdKernel
,
float
)
MS_REG_GPU_KERNEL_ONE
(
Sigmoid
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat16
).
AddOutputAttr
(
kNumberTypeFloat16
),
ActivationGpuFwdKernel
,
half
)
}
// namespace kernel
}
// namespace kernel
}
// namespace mindspore
}
// namespace mindspore
mindspore/ccsrc/kernel/gpu/nn/activation_grad_kernel.cc
浏览文件 @
ea0cd5cc
...
@@ -35,5 +35,14 @@ MS_REG_GPU_KERNEL_ONE(
...
@@ -35,5 +35,14 @@ MS_REG_GPU_KERNEL_ONE(
TanhGrad
,
TanhGrad
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat16
).
AddInputAttr
(
kNumberTypeFloat16
).
AddOutputAttr
(
kNumberTypeFloat16
),
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat16
).
AddInputAttr
(
kNumberTypeFloat16
).
AddOutputAttr
(
kNumberTypeFloat16
),
ActivationGradGpuKernel
,
half
)
ActivationGradGpuKernel
,
half
)
MS_REG_GPU_KERNEL_ONE
(
SigmoidGrad
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat32
).
AddInputAttr
(
kNumberTypeFloat32
).
AddOutputAttr
(
kNumberTypeFloat32
),
ActivationGradGpuKernel
,
float
)
MS_REG_GPU_KERNEL_ONE
(
SigmoidGrad
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat16
).
AddInputAttr
(
kNumberTypeFloat16
).
AddOutputAttr
(
kNumberTypeFloat16
),
ActivationGradGpuKernel
,
half
)
}
// namespace kernel
}
// namespace kernel
}
// namespace mindspore
}
// namespace mindspore
tests/st/ops/gpu/test_sigmoid_grad_op.py
0 → 100644
浏览文件 @
ea0cd5cc
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
pytest
import
mindspore.context
as
context
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore.ops.operations
import
_grad_ops
as
G
class
NetSigmoidGrad
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
NetSigmoidGrad
,
self
).
__init__
()
self
.
sigmoid_grad
=
G
.
SigmoidGrad
()
def
construct
(
self
,
y
,
dy
):
return
self
.
sigmoid_grad
(
y
,
dy
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_sigmoid_grad
():
y
=
Tensor
(
np
.
array
([[[[
-
1
,
1
,
2
],
[
1
,
-
1
,
1
],
[
2
,
1
,
-
1
]]]]).
astype
(
np
.
float32
))
dy
=
Tensor
(
np
.
array
([[[[
-
11
,
2
,
4
],
[
-
1
,
1
,
-
1
],
[
-
4
,
4
,
-
4
]]]]).
astype
(
np
.
float32
))
expect
=
np
.
array
([[[[
22
,
0
,
-
8
],
[
0
,
-
2
,
0
],
[
8
,
0
,
8
]]]]).
astype
(
np
.
float32
)
error
=
np
.
ones
(
shape
=
[
1
,
1
,
3
,
3
])
*
1.0e-6
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
sigmoid_grad
=
NetSigmoidGrad
()
output
=
sigmoid_grad
(
y
,
dy
)
diff
=
output
.
asnumpy
()
-
expect
assert
np
.
all
(
abs
(
diff
)
<
error
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
sigmoid_grad
=
NetSigmoidGrad
()
output
=
sigmoid_grad
(
y
,
dy
)
diff
=
output
.
asnumpy
()
-
expect
assert
np
.
all
(
abs
(
diff
)
<
error
)
tests/st/ops/gpu/test_sigmoid_op.py
0 → 100644
浏览文件 @
ea0cd5cc
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
pytest
import
mindspore.context
as
context
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore.ops
import
operations
as
P
class
NetSigmoid
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
NetSigmoid
,
self
).
__init__
()
self
.
sigmoid
=
P
.
Sigmoid
()
def
construct
(
self
,
x
):
return
self
.
sigmoid
(
x
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_sigmoid
():
x
=
Tensor
(
np
.
array
([[[[
-
1
,
1
,
10
],
[
1
,
-
1
,
1
],
[
10
,
1
,
-
1
]]]]).
astype
(
np
.
float32
))
expect
=
np
.
array
([[[[
0.268941
,
0.731059
,
0.999955
],
[
0.731059
,
0.268941
,
0.731059
],
[
0.999955
,
0.731059
,
0.268941
]]]]).
astype
(
np
.
float32
)
error
=
np
.
ones
(
shape
=
[
1
,
1
,
3
,
3
])
*
1.0e-6
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
sigmoid
=
NetSigmoid
()
output
=
sigmoid
(
x
)
diff
=
output
.
asnumpy
()
-
expect
assert
np
.
all
(
abs
(
diff
)
<
error
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
sigmoid
=
NetSigmoid
()
output
=
sigmoid
(
x
)
diff
=
output
.
asnumpy
()
-
expect
assert
np
.
all
(
abs
(
diff
)
<
error
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录