Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
ac3c35c3
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ac3c35c3
编写于
7月 04, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
7月 04, 2020
浏览文件
操作
浏览文件
下载
差异文件
!2749 GPU update argmaxwithvalue
Merge pull request !2749 from VectorSL/argmaxwithvalue
上级
b5066e81
f49b8ad6
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
32 addition
and
27 deletion
+32
-27
mindspore/ccsrc/kernel/gpu/arrays/argmaxwithvalue_gpu_kernel.h
...pore/ccsrc/kernel/gpu/arrays/argmaxwithvalue_gpu_kernel.h
+1
-1
mindspore/ccsrc/kernel/gpu/cuda_impl/argmaxwithvalue_impl.cu
mindspore/ccsrc/kernel/gpu/cuda_impl/argmaxwithvalue_impl.cu
+22
-24
mindspore/ccsrc/kernel/gpu/cuda_impl/argmaxwithvalue_impl.cuh
...spore/ccsrc/kernel/gpu/cuda_impl/argmaxwithvalue_impl.cuh
+2
-2
mindspore/ccsrc/kernel/gpu/cuda_impl/unary_op_impl.cu
mindspore/ccsrc/kernel/gpu/cuda_impl/unary_op_impl.cu
+7
-0
未找到文件。
mindspore/ccsrc/kernel/gpu/arrays/argmaxwithvalue_gpu_kernel.h
浏览文件 @
ac3c35c3
...
...
@@ -38,7 +38,7 @@ class ArgmaxWithValueGpuKernel : public GpuKernel {
T
*
input
=
GetDeviceAddress
<
T
>
(
inputs
,
0
);
T
*
output
=
GetDeviceAddress
<
T
>
(
outputs
,
1
);
S
*
index
=
GetDeviceAddress
<
S
>
(
outputs
,
0
);
CalArgmaxWithValue
(
input
_size_
/
sizeof
(
T
),
input
,
bound_
,
outerSize_
,
innerSize_
,
index
,
output
,
CalArgmaxWithValue
(
input
,
bound_
,
outerSize_
,
innerSize_
,
index
,
output
,
reinterpret_cast
<
cudaStream_t
>
(
stream_ptr
));
return
true
;
}
...
...
mindspore/ccsrc/kernel/gpu/cuda_impl/argmaxwithvalue_impl.cu
浏览文件 @
ac3c35c3
...
...
@@ -18,41 +18,39 @@
#include "device/gpu/cuda_common.h"
#include "include/cuda_fp16.h"
template
<
typename
T
,
typename
S
>
__global__
void
ArgmaxWithValue
(
size_t
size
,
const
T
*
input
,
const
int
bound
,
int
outerSize
,
int
innerSize
,
S
*
index
,
T
*
output
)
{
for
(
size_t
pos
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
pos
<
(
size
);
pos
+=
blockDim
.
x
*
gridDim
.
x
)
{
for
(
int
i
=
0
;
i
<
outerSize
;
i
++
)
{
int
inputOutterOffset
=
i
*
innerSize
*
bound
;
int
outputOutterOffset
=
i
*
innerSize
;
for
(
int
j
=
0
;
j
<
innerSize
;
j
++
)
{
auto
outputInnerOffset
=
outputOutterOffset
+
j
;
S
idx
=
0
;
T
maxData
=
input
[
j
+
inputOutterOffset
];
for
(
S
c
=
0
;
c
<
bound
;
c
++
)
{
int
offset
=
j
+
c
*
innerSize
;
auto
inputData
=
input
[
inputOutterOffset
+
offset
];
idx
=
inputData
>
maxData
?
c
:
idx
;
maxData
=
inputData
>
maxData
?
inputData
:
maxData
;
}
output
[
outputInnerOffset
]
=
maxData
;
index
[
outputInnerOffset
]
=
idx
;
__global__
void
ArgmaxWithValue
(
const
T
*
input
,
const
int
bound
,
int
outerSize
,
int
innerSize
,
S
*
index
,
T
*
output
)
{
for
(
size_t
pos
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
pos
<
(
outerSize
);
pos
+=
blockDim
.
x
*
gridDim
.
x
)
{
int
inputOutterOffset
=
pos
*
innerSize
*
bound
;
int
outputOutterOffset
=
pos
*
innerSize
;
for
(
int
j
=
0
;
j
<
innerSize
;
j
++
)
{
auto
outputInnerOffset
=
outputOutterOffset
+
j
;
S
idx
=
0
;
T
maxData
=
input
[
j
+
inputOutterOffset
];
for
(
S
c
=
0
;
c
<
bound
;
c
++
)
{
int
offset
=
j
+
c
*
innerSize
;
auto
inputData
=
input
[
inputOutterOffset
+
offset
];
idx
=
inputData
>
maxData
?
c
:
idx
;
maxData
=
inputData
>
maxData
?
inputData
:
maxData
;
}
output
[
outputInnerOffset
]
=
maxData
;
index
[
outputInnerOffset
]
=
idx
;
}
}
}
return
;
}
template
<
typename
T
,
typename
S
>
void
CalArgmaxWithValue
(
size_t
size
,
const
T
*
input
,
const
int
bound_
,
const
int
outerSize_
,
const
int
innerSize_
,
void
CalArgmaxWithValue
(
const
T
*
input
,
const
int
bound_
,
const
int
outerSize_
,
const
int
innerSize_
,
S
*
index
,
T
*
output
,
cudaStream_t
cuda_stream
)
{
ArgmaxWithValue
<<<
GET_BLOCKS
(
size
),
GET_THREADS
,
0
,
cuda_stream
>>>
(
size
,
input
,
bound_
,
outerSize_
,
innerSize_
,
index
,
output
);
ArgmaxWithValue
<<<
GET_BLOCKS
(
outerSize_
),
GET_THREADS
,
0
,
cuda_stream
>>>
(
input
,
bound_
,
outerSize_
,
innerSize_
,
index
,
output
);
return
;
}
template
void
CalArgmaxWithValue
<
float
,
int
>(
size_t
size
,
const
float
*
input
,
const
int
bound_
,
const
int
outerSize_
,
template
void
CalArgmaxWithValue
<
float
,
int
>(
const
float
*
input
,
const
int
bound_
,
const
int
outerSize_
,
const
int
innerSize_
,
int
*
index
,
float
*
output
,
cudaStream_t
cuda_stream
);
template
void
CalArgmaxWithValue
<
half
,
int
>(
size_t
size
,
const
half
*
input
,
const
int
bound_
,
const
int
outerSize_
,
template
void
CalArgmaxWithValue
<
half
,
int
>(
const
half
*
input
,
const
int
bound_
,
const
int
outerSize_
,
const
int
innerSize_
,
int
*
index
,
half
*
output
,
cudaStream_t
cuda_stream
);
mindspore/ccsrc/kernel/gpu/cuda_impl/argmaxwithvalue_impl.cuh
浏览文件 @
ac3c35c3
...
...
@@ -17,6 +17,6 @@
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_ARGMAXWITHVALUE_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_ARGMAXWITHVALUE_H_
template
<
typename
T
,
typename
S
>
void
CalArgmaxWithValue
(
size_t
size
,
const
T
*
input
,
const
int
bound_
,
const
int
outerSize_
,
const
int
innerSize_
,
S
*
index
,
T
*
output
,
cudaStream_t
cuda_stream
);
void
CalArgmaxWithValue
(
const
T
*
input
,
const
int
bound_
,
const
int
outerSize_
,
const
int
innerSize_
,
S
*
index
,
T
*
output
,
cudaStream_t
cuda_stream
);
#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_ARGMAXWITHVALUE_H_
mindspore/ccsrc/kernel/gpu/cuda_impl/unary_op_impl.cu
浏览文件 @
ac3c35c3
...
...
@@ -36,6 +36,13 @@ __global__ void LogarithmKernel(T *input, T *output, size_t count) {
}
return
;
}
template
<
>
__global__
void
LogarithmKernel
(
half
*
input
,
half
*
output
,
size_t
count
)
{
for
(
size_t
i
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
i
<
(
count
);
i
+=
blockDim
.
x
*
gridDim
.
x
)
{
output
[
i
]
=
hlog
(
input
[
i
]);
}
return
;
}
template
<
typename
T
>
__global__
void
NegativeKernel
(
T
*
input
,
T
*
output
,
size_t
count
)
{
T
neg_one
=
-
1
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录