Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_44025039
mindspore
提交
ab6f7420
M
mindspore
项目概览
weixin_44025039
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
ab6f7420
编写于
8月 04, 2020
作者:
H
hanhuifeng2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify some bug and add test case for gpu dropout op
上级
6657adfa
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
74 addition
and
3 deletion
+74
-3
mindspore/ccsrc/backend/kernel_compiler/gpu/nn/dropout_gpu_kernel.h
...ccsrc/backend/kernel_compiler/gpu/nn/dropout_gpu_kernel.h
+9
-3
mindspore/ccsrc/runtime/device/gpu/gpu_common.h
mindspore/ccsrc/runtime/device/gpu/gpu_common.h
+11
-0
tests/st/ops/gpu/test_dropout.py
tests/st/ops/gpu/test_dropout.py
+54
-0
未找到文件。
mindspore/ccsrc/backend/kernel_compiler/gpu/nn/dropout_gpu_kernel.h
浏览文件 @
ab6f7420
...
...
@@ -54,12 +54,18 @@ class DropoutGpuFwdKernel : public GpuKernel {
float
*
mask_f
=
GetDeviceAddress
<
float
>
(
workspace
,
0
);
if
(
!
states_init_
)
{
curandCreateGenerator
(
&
mask_generator_
,
CURAND_RNG_PSEUDO_DEFAULT
);
curandSetPseudoRandomGeneratorSeed
(
mask_generator_
,
time
(
NULL
));
CHECK_CURAND_RET_WITH_EXCEPT
(
curandCreateGenerator
(
&
mask_generator_
,
CURAND_RNG_PSEUDO_DEFAULT
),
"Failed to create generator"
);
CHECK_CURAND_RET_WITH_EXCEPT
(
curandSetPseudoRandomGeneratorSeed
(
mask_generator_
,
time
(
NULL
)),
"Failed to SetPseudoRandomGeneratorSeed"
);
MS_EXCEPTION_IF_NULL
(
mask_generator_
);
states_init_
=
true
;
}
CHECK_CURAND_RET_WITH_EXCEPT
(
curandSetStream
(
mask_generator_
,
reinterpret_cast
<
cudaStream_t
>
(
stream_ptr
)),
"Failed to set stream for generator"
);
// curandGen only support float or double for mask.
curandGenerateUniform
(
mask_generator_
,
mask_f
,
num_count_
);
CHECK_CURAND_RET_WITH_EXCEPT
(
curandGenerateUniform
(
mask_generator_
,
mask_f
,
num_count_
),
"Failed to generate uniform"
);
DropoutForward
(
input
,
mask
,
output
,
mask_f
,
num_count_
,
keep_prob_
,
reinterpret_cast
<
cudaStream_t
>
(
stream_ptr
));
return
true
;
...
...
mindspore/ccsrc/runtime/device/gpu/gpu_common.h
浏览文件 @
ab6f7420
...
...
@@ -20,7 +20,9 @@
#include <iostream>
#include <vector>
#include <algorithm>
#include <map>
#include "utils/log_adapter.h"
#include "include/curand.h"
namespace
mindspore
{
namespace
device
{
...
...
@@ -131,6 +133,15 @@ inline bool CheckNullInput(std::vector<size_t> input_shape) {
return
false
;
}
#define CHECK_NULL_INPUT(input_shape) mindspore::device::gpu::CheckNullInput(input_shape)
#define CHECK_CURAND_RET_WITH_EXCEPT(expression, message) \
{ \
curandStatus_t status = (expression); \
if (status != CURAND_STATUS_SUCCESS) { \
MS_LOG(EXCEPTION) << "CUAD curand Error: " << message << " | curandStatus: " << status; \
} \
}
}
// namespace gpu
}
// namespace device
}
// namespace mindspore
...
...
tests/st/ops/gpu/test_dropout.py
0 → 100644
浏览文件 @
ab6f7420
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
pytest
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore.ops
import
operations
as
P
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
keep_prob
):
super
(
Net
,
self
).
__init__
()
self
.
drop
=
P
.
Dropout
(
keep_prob
)
def
construct
(
self
,
x_
):
return
self
.
drop
(
x_
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_dropout
():
x_shape
=
[
32
,
16
,
2
,
5
]
x
=
np
.
ones
(
x_shape
).
astype
(
np
.
float32
)
keep_prob
=
0.4
dropout
=
Net
(
keep_prob
)
tx
=
Tensor
(
x
)
output
,
mask
=
dropout
(
tx
)
# check output
output_np
=
output
.
asnumpy
()
elem_count
=
x
.
size
nonzero_count
=
np
.
count_nonzero
(
output_np
)
assert
(
elem_count
*
(
keep_prob
-
0.1
))
<
nonzero_count
<
(
elem_count
*
(
keep_prob
+
0.1
))
output_sum
=
np
.
sum
(
output_np
)
x_sum
=
np
.
sum
(
x
)
assert
abs
(
output_sum
-
x_sum
)
/
x_sum
<
0.1
# check mask
mask_np
=
mask
.
asnumpy
()
mask_sum
=
np
.
sum
(
mask_np
)
assert
np
.
count_nonzero
(
mask_np
)
==
nonzero_count
assert
abs
(
mask_sum
-
nonzero_count
)
/
nonzero_count
<
0.1
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录