Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
95048955
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
95048955
编写于
7月 13, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
7月 13, 2020
浏览文件
操作
浏览文件
下载
差异文件
!2925 Add random normal op at GPU
Merge pull request !2925 from peixu_ren/custom_gpu
上级
d3ec05d7
cccb230f
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
213 addition
and
0 deletion
+213
-0
mindspore/ccsrc/kernel/gpu/cuda_impl/random_op_impl.cu
mindspore/ccsrc/kernel/gpu/cuda_impl/random_op_impl.cu
+42
-0
mindspore/ccsrc/kernel/gpu/cuda_impl/random_op_impl.cuh
mindspore/ccsrc/kernel/gpu/cuda_impl/random_op_impl.cuh
+26
-0
mindspore/ccsrc/kernel/gpu/math/random_op_gpu_kernel.cc
mindspore/ccsrc/kernel/gpu/math/random_op_gpu_kernel.cc
+24
-0
mindspore/ccsrc/kernel/gpu/math/random_op_gpu_kernel.h
mindspore/ccsrc/kernel/gpu/math/random_op_gpu_kernel.h
+121
-0
未找到文件。
mindspore/ccsrc/kernel/gpu/cuda_impl/random_op_impl.cu
0 → 100644
浏览文件 @
95048955
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "random_op_impl.cuh"
template
<
typename
T
>
__global__
void
NormalKernel
(
int
seed
,
curandState
*
globalState
,
T
*
output
,
size_t
count
)
{
for
(
size_t
i
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
i
<
(
count
);
i
+=
blockDim
.
x
*
gridDim
.
x
)
{
curand_init
(
seed
,
i
,
0
,
&
globalState
[
i
]);
output
[
i
]
=
curand_normal
(
&
globalState
[
i
]);
}
return
;
}
template
<
typename
T
>
void
StandardNormal
(
int
seed
,
int
seed2
,
curandState
*
globalState
,
T
*
output
,
size_t
count
,
cudaStream_t
cuda_stream
)
{
int
RNG_seed
=
0
;
if
(
seed2
!=
0
)
{
RNG_seed
=
seed2
;
}
else
if
(
seed
!=
0
)
{
RNG_seed
=
seed
;
}
else
{
RNG_seed
=
time
(
NULL
);
}
NormalKernel
<<<
GET_BLOCKS
(
count
),
GET_THREADS
,
0
,
cuda_stream
>>>
(
RNG_seed
,
globalState
,
output
,
count
);
return
;
}
template
void
StandardNormal
<
float
>(
int
seed
,
int
seed2
,
curandState
*
globalState
,
float
*
output
,
size_t
count
,
cudaStream_t
cuda_stream
);
mindspore/ccsrc/kernel/gpu/cuda_impl/random_op_impl.cuh
0 → 100644
浏览文件 @
95048955
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_RANDOMOPIMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_RANDOMOPIMPL_H_
#include <curand_kernel.h>
#include "device/gpu/cuda_common.h"
template
<
typename
T
>
void
StandardNormal
(
int
seed
,
int
seed2
,
curandState
*
globalState
,
T
*
output
,
size_t
count
,
cudaStream_t
cuda_stream
);
#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_RANDOMOPIMPL_H_
mindspore/ccsrc/kernel/gpu/math/random_op_gpu_kernel.cc
0 → 100644
浏览文件 @
95048955
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel/gpu/math/random_op_gpu_kernel.h"
namespace
mindspore
{
namespace
kernel
{
MS_REG_GPU_KERNEL_ONE
(
StandardNormal
,
KernelAttr
().
AddInputAttr
(
kNumberTypeInt32
).
AddOutputAttr
(
kNumberTypeFloat32
),
RandomOpGpuKernel
,
float
)
}
// namespace kernel
}
// namespace mindspore
mindspore/ccsrc/kernel/gpu/math/random_op_gpu_kernel.h
0 → 100644
浏览文件 @
95048955
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_RANDOMOP_GPU_KERNEL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_RANDOMOP_GPU_KERNEL_H_
#include <curand_kernel.h>
#include <cuda_runtime_api.h>
#include <vector>
#include <string>
#include <map>
#include "kernel/gpu/gpu_kernel.h"
#include "kernel/gpu/gpu_kernel_factory.h"
#include "kernel/gpu/cuda_impl/random_op_impl.cuh"
namespace
mindspore
{
namespace
kernel
{
enum
RandomOptype
{
RANDOM_OP_NORMAL
=
0
,
RANDOM_OP_INVALID_TYPE
=
255
};
const
std
::
map
<
std
::
string
,
RandomOptype
>
kRandomOpTypeMap
=
{{
"StandardNormal"
,
RANDOM_OP_NORMAL
}};
template
<
typename
T
>
class
RandomOpGpuKernel
:
public
GpuKernel
{
public:
RandomOpGpuKernel
()
:
random_op_type_
(
RANDOM_OP_INVALID_TYPE
),
input_size_0_
(
0
),
output_size_
(
sizeof
(
T
)),
workspace_size_
(
sizeof
(
curandState
))
{}
~
RandomOpGpuKernel
()
override
=
default
;
const
std
::
vector
<
size_t
>
&
GetInputSizeList
()
const
override
{
return
input_size_list_
;
}
const
std
::
vector
<
size_t
>
&
GetOutputSizeList
()
const
override
{
return
output_size_list_
;
}
const
std
::
vector
<
size_t
>
&
GetWorkspaceSizeList
()
const
override
{
return
workspace_size_list_
;
}
bool
Launch
(
const
std
::
vector
<
AddressPtr
>
&
inputs
,
const
std
::
vector
<
AddressPtr
>
&
workspace
,
const
std
::
vector
<
AddressPtr
>
&
outputs
,
void
*
stream_ptr
)
override
{
void
*
workspace_addr
=
GetDeviceAddress
<
void
*>
(
workspace
,
0
);
curandState
*
devStates
=
reinterpret_cast
<
curandState
*>
(
workspace_addr
);
T
*
output_addr
=
GetDeviceAddress
<
T
>
(
outputs
,
0
);
switch
(
random_op_type_
)
{
case
RANDOM_OP_NORMAL
:
{
StandardNormal
(
seed_
,
seed2_
,
devStates
,
output_addr
,
outputs
[
0
]
->
size
/
sizeof
(
T
),
reinterpret_cast
<
cudaStream_t
>
(
stream_ptr
));
break
;
}
default:
{
MS_LOG
(
EXCEPTION
)
<<
"Random operation "
<<
random_op_type_
<<
" is not supported."
;
}
}
return
true
;
}
bool
Init
(
const
CNodePtr
&
kernel_node
)
override
{
std
::
string
kernel_name
=
AnfAlgo
::
GetCNodeName
(
kernel_node
);
auto
iter
=
kRandomOpTypeMap
.
find
(
kernel_name
);
if
(
iter
==
kRandomOpTypeMap
.
end
())
{
MS_LOG
(
EXCEPTION
)
<<
"Random operation "
<<
kernel_name
<<
" is not supported."
;
}
else
{
random_op_type_
=
iter
->
second
;
}
size_t
input_num
=
AnfAlgo
::
GetInputTensorNum
(
kernel_node
);
if
(
input_num
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"Input number is "
<<
input_num
<<
", but random op needs 1 input."
;
return
false
;
}
size_t
output_num
=
AnfAlgo
::
GetOutputTensorNum
(
kernel_node
);
if
(
output_num
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"Output number is "
<<
output_num
<<
", but random op needs 1 output."
;
return
false
;
}
auto
input_shape_0
=
AnfAlgo
::
GetPrevNodeOutputInferShape
(
kernel_node
,
0
);
for
(
size_t
i
=
0
;
i
<
input_shape_0
.
size
();
i
++
)
{
input_size_0_
+=
input_shape_0
[
i
];
}
input_size_0_
*=
sizeof
(
int
);
auto
output_shape
=
AnfAlgo
::
GetOutputInferShape
(
kernel_node
,
0
);
for
(
size_t
i
=
0
;
i
<
output_shape
.
size
();
i
++
)
{
output_size_
*=
output_shape
[
i
];
workspace_size_
*=
output_shape
[
i
];
}
seed_
=
GetValue
<
int
>
(
AnfAlgo
::
GetCNodePrimitive
(
kernel_node
)
->
GetAttr
(
"seed"
));
seed2_
=
GetValue
<
int
>
(
AnfAlgo
::
GetCNodePrimitive
(
kernel_node
)
->
GetAttr
(
"seed2"
));
InitSizeLists
();
return
true
;
}
protected:
void
InitSizeLists
()
override
{
input_size_list_
.
push_back
(
input_size_0_
);
output_size_list_
.
push_back
(
output_size_
);
workspace_size_list_
.
push_back
(
workspace_size_
);
}
private:
RandomOptype
random_op_type_
;
size_t
input_size_0_
;
size_t
output_size_
;
size_t
workspace_size_
;
int
seed_
;
int
seed2_
;
std
::
vector
<
size_t
>
input_size_list_
;
std
::
vector
<
size_t
>
output_size_list_
;
std
::
vector
<
size_t
>
workspace_size_list_
;
};
}
// namespace kernel
}
// namespace mindspore
#endif // MINDSPORE_CCSRC_KERNEL_GPU_RANDOMOP_GPU_KERNEL_H_
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录