Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
486d6572
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
332
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
486d6572
编写于
7月 07, 2020
作者:
W
Wilber
提交者:
GitHub
7月 07, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Code Format] Update code format. (#3890)
上级
45457074
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
432 addition
and
436 deletion
+432
-436
lite/kernels/cuda/assign_value_compute_test.cc
lite/kernels/cuda/assign_value_compute_test.cc
+66
-66
lite/kernels/cuda/fc_compute.cu
lite/kernels/cuda/fc_compute.cu
+8
-8
lite/kernels/cuda/fc_compute_test.cc
lite/kernels/cuda/fc_compute_test.cc
+93
-93
lite/kernels/cuda/sequence_mask_compute.h
lite/kernels/cuda/sequence_mask_compute.h
+0
-5
lite/kernels/cuda/sequence_mask_compute_test.cc
lite/kernels/cuda/sequence_mask_compute_test.cc
+48
-48
lite/kernels/cuda/sequence_pad_compute_test.cc
lite/kernels/cuda/sequence_pad_compute_test.cc
+75
-74
lite/kernels/cuda/sequence_unpad_compute_test.cc
lite/kernels/cuda/sequence_unpad_compute_test.cc
+63
-63
lite/kernels/cuda/transpose_compute.cu
lite/kernels/cuda/transpose_compute.cu
+3
-3
lite/kernels/cuda/transpose_compute.h
lite/kernels/cuda/transpose_compute.h
+1
-1
lite/kernels/cuda/transpose_compute_test.cc
lite/kernels/cuda/transpose_compute_test.cc
+75
-75
未找到文件。
lite/kernels/cuda/assign_value_compute_test.cc
浏览文件 @
486d6572
...
...
@@ -33,91 +33,91 @@ namespace cuda {
class
AssignValueTest
:
public
::
testing
::
Test
{
protected:
AssignValueTest
()
:
dtype
(
5
),
shape
({
1
})
{
int
num
=
s
td
::
accumulate
(
shape
.
begin
(),
shape
.
end
(),
1
,
std
::
multiplies
<
int
>
());
fp32_values
.
resize
(
num
);
int32_values
.
resize
(
num
);
int64_values
.
resize
(
num
);
bool_values
.
resize
(
num
);
AssignValueTest
()
:
dtype
_
(
5
),
shape_
({
1
})
{
int
num
=
std
::
accumulate
(
s
hape_
.
begin
(),
shape_
.
end
(),
1
,
std
::
multiplies
<
int
>
());
fp32_values
_
.
resize
(
num
);
int32_values
_
.
resize
(
num
);
int64_values
_
.
resize
(
num
);
bool_values
_
.
resize
(
num
);
for
(
int
i
=
0
;
i
<
num
;
++
i
)
{
fp32_values
[
i
]
=
i
+
5
;
int32_values
[
i
]
=
i
;
int64_values
[
i
]
=
i
;
bool_values
[
i
]
=
i
;
fp32_values
_
[
i
]
=
i
+
5
;
int32_values
_
[
i
]
=
i
;
int64_values
_
[
i
]
=
i
;
bool_values
_
[
i
]
=
i
;
}
std
::
vector
<
int64_t
>
out_shape
(
shape
.
size
(),
0
);
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
out_shape
[
i
]
=
shape
[
i
];
Out_ref
.
Resize
(
lite
::
DDim
(
out_shape
));
Out_gpu
.
Resize
(
Out_ref
.
dims
());
Out_cpu
.
Resize
(
Out_ref
.
dims
());
std
::
vector
<
int64_t
>
out_shape
(
shape
_
.
size
(),
0
);
for
(
size_t
i
=
0
;
i
<
shape
_
.
size
();
++
i
)
out_shape
[
i
]
=
shape_
[
i
];
out_ref_
.
Resize
(
lite
::
DDim
(
out_shape
));
out_gpu_
.
Resize
(
out_ref_
.
dims
());
out_cpu_
.
Resize
(
out_ref_
.
dims
());
cpu_base
(
&
Out_ref
);
RunBaseLine
(
&
out_ref_
);
device_ini
t
();
InitParamAndContex
t
();
}
void
device_ini
t
()
{
ctx
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
);
auto
&
context
=
ctx
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
);
param
.
shape
=
shape
;
param
.
dtype
=
dtype
;
param
.
fp32_values
=
fp32_values
;
param
.
int32_values
=
int32_values
;
param
.
int64_values
=
int64_values
;
param
.
bool_values
=
bool_values
;
param
.
Out
=
&
Out_gpu
;
void
InitParamAndContex
t
()
{
ctx
_
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
_
);
auto
&
context
=
ctx
_
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
_
);
param
_
.
shape
=
shape_
;
param
_
.
dtype
=
dtype_
;
param
_
.
fp32_values
=
fp32_values_
;
param
_
.
int32_values
=
int32_values_
;
param
_
.
int64_values
=
int64_values_
;
param
_
.
bool_values
=
bool_values_
;
param
_
.
Out
=
&
out_gpu_
;
}
void
float_data_ini
t
()
{}
void
InitFloatInpu
t
()
{}
void
half_data_ini
t
()
{}
void
InitHalfInpu
t
()
{}
void
cpu_base
(
lite
::
Tensor
*
O
ut
)
{
if
(
dtype
==
static_cast
<
int
>
(
lite
::
core
::
FluidType
::
INT32
))
{
for
(
size_t
i
=
0
;
i
<
int32_values
.
size
();
++
i
)
{
Out
->
mutable_data
<
int
>
()[
i
]
=
int32_values
[
i
];
void
RunBaseLine
(
lite
::
Tensor
*
o
ut
)
{
if
(
dtype
_
==
static_cast
<
int
>
(
lite
::
core
::
FluidType
::
INT32
))
{
for
(
size_t
i
=
0
;
i
<
int32_values
_
.
size
();
++
i
)
{
out
->
mutable_data
<
int
>
()[
i
]
=
int32_values_
[
i
];
}
}
else
if
(
dtype
==
static_cast
<
int
>
(
lite
::
core
::
FluidType
::
FP32
))
{
for
(
size_t
i
=
0
;
i
<
fp32_values
.
size
();
++
i
)
{
Out
->
mutable_data
<
float
>
()[
i
]
=
fp32_values
[
i
];
}
else
if
(
dtype
_
==
static_cast
<
int
>
(
lite
::
core
::
FluidType
::
FP32
))
{
for
(
size_t
i
=
0
;
i
<
fp32_values
_
.
size
();
++
i
)
{
out
->
mutable_data
<
float
>
()[
i
]
=
fp32_values_
[
i
];
}
}
else
if
(
dtype
==
static_cast
<
int
>
(
lite
::
core
::
FluidType
::
INT64
))
{
for
(
size_t
i
=
0
;
i
<
int64_values
.
size
();
++
i
)
{
Out
->
mutable_data
<
int64_t
>
()[
i
]
=
int64_values
[
i
];
}
else
if
(
dtype
_
==
static_cast
<
int
>
(
lite
::
core
::
FluidType
::
INT64
))
{
for
(
size_t
i
=
0
;
i
<
int64_values
_
.
size
();
++
i
)
{
out
->
mutable_data
<
int64_t
>
()[
i
]
=
int64_values_
[
i
];
}
}
else
if
(
dtype
==
static_cast
<
bool
>
(
lite
::
core
::
FluidType
::
BOOL
))
{
for
(
size_t
i
=
0
;
i
<
bool_values
.
size
();
++
i
)
{
Out
->
mutable_data
<
bool
>
()[
i
]
=
bool_values
[
i
];
}
else
if
(
dtype
_
==
static_cast
<
bool
>
(
lite
::
core
::
FluidType
::
BOOL
))
{
for
(
size_t
i
=
0
;
i
<
bool_values
_
.
size
();
++
i
)
{
out
->
mutable_data
<
bool
>
()[
i
]
=
bool_values_
[
i
];
}
}
else
{
LOG
(
FATAL
)
<<
"Unsupported dtype
for assign_value_op:"
<<
dtype
;
LOG
(
FATAL
)
<<
"Unsupported dtype
_ for assign_value_op:"
<<
dtype_
;
}
}
int
dtype
;
std
::
vector
<
int
>
shape
;
std
::
vector
<
float
>
fp32_values
;
std
::
vector
<
int
>
int32_values
;
std
::
vector
<
int64_t
>
int64_values
;
std
::
vector
<
int
>
bool_values
;
int
dtype
_
;
std
::
vector
<
int
>
shape
_
;
std
::
vector
<
float
>
fp32_values
_
;
std
::
vector
<
int
>
int32_values
_
;
std
::
vector
<
int64_t
>
int64_values
_
;
std
::
vector
<
int
>
bool_values
_
;
lite
::
Tensor
Out_ref
;
lite
::
Tensor
Out_gpu
;
lite
::
Tensor
Out_cpu
;
lite
::
Tensor
out_ref_
;
lite
::
Tensor
out_gpu_
;
lite
::
Tensor
out_cpu_
;
operators
::
AssignValueParam
param
;
std
::
unique_ptr
<
KernelContext
>
ctx
;
cudaStream_t
stream
;
operators
::
AssignValueParam
param
_
;
std
::
unique_ptr
<
KernelContext
>
ctx
_
;
cudaStream_t
stream
_
;
};
TEST_F
(
AssignValueTest
,
fp32
)
{
float_data_ini
t
();
InitFloatInpu
t
();
AssignValueCompute
kernel
;
kernel
.
SetParam
(
param
);
kernel
.
SetContext
(
std
::
move
(
ctx
));
kernel
.
SetParam
(
param
_
);
kernel
.
SetContext
(
std
::
move
(
ctx
_
));
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
kernel
.
Launch
();
...
...
@@ -135,12 +135,12 @@ TEST_F(AssignValueTest, fp32) {
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
duration
/
FLAGS_repeats
<<
" ms in average."
;
CopySync
<
TARGET
(
kCUDA
)
>
(
Out_cpu
.
mutable_data
<
float
>
(),
Out_gpu
.
data
<
float
>
(),
sizeof
(
float
)
*
Out_gpu
.
numel
(),
CopySync
<
TARGET
(
kCUDA
)
>
(
out_cpu_
.
mutable_data
<
float
>
(),
out_gpu_
.
data
<
float
>
(),
sizeof
(
float
)
*
out_gpu_
.
numel
(),
IoDirection
::
DtoH
);
for
(
int
i
=
0
;
i
<
Out_gpu
.
numel
();
++
i
)
{
EXPECT_NEAR
(
Out_cpu
.
data
<
float
>
()[
i
],
Out_ref
.
data
<
float
>
()[
i
],
1e-5
);
for
(
int
i
=
0
;
i
<
out_gpu_
.
numel
();
++
i
)
{
EXPECT_NEAR
(
out_cpu_
.
data
<
float
>
()[
i
],
out_ref_
.
data
<
float
>
()[
i
],
1e-5
);
}
}
...
...
lite/kernels/cuda/fc_compute.cu
浏览文件 @
486d6572
...
...
@@ -33,7 +33,7 @@ struct FcTypeTraits<float> {
};
template
<
typename
T
>
__global__
void
bias_v
4
(
const
int
num
,
const
T
*
bias
,
T
*
data
,
int
K
)
{
__global__
void
AddBiasV
4
(
const
int
num
,
const
T
*
bias
,
T
*
data
,
int
K
)
{
CUDA_KERNEL_LOOP
(
index
,
num
)
{
int
bias_idx
=
index
%
K
;
const
T
bias_ptr
=
bias
[
bias_idx
];
...
...
@@ -48,7 +48,7 @@ __global__ void bias_v4(const int num, const T* bias, T* data, int K) {
}
template
<
typename
T
>
__global__
void
bias_relu_v
4
(
const
int
num
,
const
T
*
bias
,
T
*
data
,
int
K
)
{
__global__
void
AddBiasReluV
4
(
const
int
num
,
const
T
*
bias
,
T
*
data
,
int
K
)
{
CUDA_KERNEL_LOOP
(
index
,
num
)
{
int
bias_idx
=
index
%
K
;
const
T
bias_ptr
=
bias
[
bias_idx
];
...
...
@@ -63,7 +63,7 @@ __global__ void bias_relu_v4(const int num, const T* bias, T* data, int K) {
}
template
<
typename
T
>
__global__
void
general_b
ias
(
const
int
num
,
const
T
*
bias
,
T
*
data
)
{
__global__
void
AddB
ias
(
const
int
num
,
const
T
*
bias
,
T
*
data
)
{
int
offset
=
blockIdx
.
x
*
num
;
for
(
int
i
=
threadIdx
.
x
;
i
<
num
;
i
+=
blockDim
.
x
)
{
...
...
@@ -78,7 +78,7 @@ __global__ void general_bias(const int num, const T* bias, T* data) {
}
template
<
typename
T
>
__global__
void
general_relu_bias
(
const
int
num
,
const
T
*
bias
,
T
*
data
)
{
__global__
void
AddBiasRelu
(
const
int
num
,
const
T
*
bias
,
T
*
data
)
{
int
offset
=
blockIdx
.
x
*
num
;
for
(
int
i
=
threadIdx
.
x
;
i
<
num
;
i
+=
blockDim
.
x
)
{
...
...
@@ -140,10 +140,10 @@ void FcCompute<T, PType>::Run() {
const
auto
*
bias_ptr_v4
=
reinterpret_cast
<
const
trans_type
*>
(
b_data
);
auto
*
data_ptr_v4
=
reinterpret_cast
<
trans_type
*>
(
out_data
);
if
(
activation_type
==
"relu"
)
{
bias_relu_v
4
<
trans_type
><<<
blocks
,
threads
,
0
,
stream
>>>
(
AddBiasReluV
4
<
trans_type
><<<
blocks
,
threads
,
0
,
stream
>>>
(
num
,
bias_ptr_v4
,
data_ptr_v4
,
N
/
4
);
}
else
if
(
activation_type
==
""
)
{
bias_v
4
<
trans_type
><<<
blocks
,
threads
,
0
,
stream
>>>
(
AddBiasV
4
<
trans_type
><<<
blocks
,
threads
,
0
,
stream
>>>
(
num
,
bias_ptr_v4
,
data_ptr_v4
,
N
/
4
);
}
else
{
LOG
(
FATAL
)
<<
"not supported activation type: "
<<
activation_type
;
...
...
@@ -152,9 +152,9 @@ void FcCompute<T, PType>::Run() {
const
int
threads
=
256
;
const
int
blocks
=
M
;
if
(
activation_type
==
"relu"
)
{
general_relu_bias
<
T
><<<
blocks
,
threads
,
0
,
stream
>>>
(
N
,
b_data
,
out_data
);
AddBiasRelu
<
T
><<<
blocks
,
threads
,
0
,
stream
>>>
(
N
,
b_data
,
out_data
);
}
else
if
(
activation_type
==
""
)
{
general_b
ias
<
T
><<<
blocks
,
threads
,
0
,
stream
>>>
(
N
,
b_data
,
out_data
);
AddB
ias
<
T
><<<
blocks
,
threads
,
0
,
stream
>>>
(
N
,
b_data
,
out_data
);
}
else
{
LOG
(
FATAL
)
<<
"not supported activation type: "
<<
activation_type
;
}
...
...
lite/kernels/cuda/fc_compute_test.cc
浏览文件 @
486d6572
...
...
@@ -31,101 +31,101 @@ namespace cuda {
class
FcTest
:
public
::
testing
::
Test
{
protected:
FcTest
()
:
m
(
128
),
k
(
512
),
n
(
64
),
in_num_col_dims
(
1
),
act_type
(
"relu"
),
x_shape
({
m
,
k
}),
w_shape
({
k
,
n
}),
b_shape
({
n
}),
out_shape
({
m
,
n
})
{
X_gpu
.
Resize
(
lite
::
DDim
(
x_shape
));
X_ref
.
Resize
(
lite
::
DDim
(
x_shape
));
W_gpu
.
Resize
(
lite
::
DDim
(
w_shape
));
W_ref
.
Resize
(
lite
::
DDim
(
w_shape
));
b_
gpu
.
Resize
(
lite
::
DDim
(
b_shape
));
b_
ref
.
Resize
(
lite
::
DDim
(
b_shape
));
auto
x_ref_data
=
X_ref
.
mutable_data
<
float
>
();
auto
w_ref_data
=
W_ref
.
mutable_data
<
float
>
();
auto
b_ref_data
=
b_ref
.
mutable_data
<
float
>
();
:
m
_
(
128
),
k
_
(
512
),
n
_
(
64
),
in_num_col_dims
_
(
1
),
act_type
_
(
"relu"
),
x_shape
_
({
m_
,
k_
}),
w_shape
_
({
k_
,
n_
}),
b_shape
_
({
n_
}),
out_shape
_
({
m_
,
n_
})
{
x_ref_
.
Resize
(
lite
::
DDim
(
x_shape_
));
x_gpu_
.
Resize
(
lite
::
DDim
(
x_shape_
));
w_ref_
.
Resize
(
lite
::
DDim
(
w_shape_
));
w_gpu_
.
Resize
(
lite
::
DDim
(
w_shape_
));
b_
ref_
.
Resize
(
lite
::
DDim
(
b_shape_
));
b_
gpu_
.
Resize
(
lite
::
DDim
(
b_shape_
));
auto
x_ref_data
=
x_ref_
.
mutable_data
<
float
>
();
auto
w_ref_data
=
w_ref_
.
mutable_data
<
float
>
();
auto
b_ref_data
=
b_ref
_
.
mutable_data
<
float
>
();
// prepare input
for
(
int64_t
i
=
0
;
i
<
X_ref
.
numel
();
i
++
)
{
for
(
int64_t
i
=
0
;
i
<
x_ref_
.
numel
();
i
++
)
{
x_ref_data
[
i
]
=
static_cast
<
float
>
(
i
%
10
*
0.2
);
}
for
(
int64_t
i
=
0
;
i
<
W_ref
.
numel
();
i
++
)
{
for
(
int64_t
i
=
0
;
i
<
w_ref_
.
numel
();
i
++
)
{
w_ref_data
[
i
]
=
static_cast
<
float
>
(
i
%
10
*
0.2
);
}
for
(
int64_t
i
=
0
;
i
<
b_ref
.
numel
();
i
++
)
{
for
(
int64_t
i
=
0
;
i
<
b_ref
_
.
numel
();
i
++
)
{
b_ref_data
[
i
]
=
static_cast
<
float
>
(
i
%
10
*
0.2
);
}
Out_ref
.
Resize
(
lite
::
DDim
(
out_shape
));
Out_cpu
.
Resize
(
Out_ref
.
dims
());
Out_gpu
.
Resize
(
Out_ref
.
dims
());
fc_cpu_base
(
&
X_ref
,
&
W_ref
,
&
b_ref
,
&
Out_ref
);
out_ref_
.
Resize
(
lite
::
DDim
(
out_shape_
));
out_cpu_
.
Resize
(
out_ref_
.
dims
());
out_gpu_
.
Resize
(
out_ref_
.
dims
());
RunBaseLine
(
&
x_ref_
,
&
w_ref_
,
&
b_ref_
,
&
out_ref_
);
device_ini
t
();
InitParamAndContex
t
();
}
void
device_ini
t
()
{
ctx
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
);
auto
&
context
=
ctx
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
);
param
.
input
=
&
X_gpu
;
param
.
w
=
&
W_gpu
;
param
.
bias
=
&
b_gpu
;
param
.
in_num_col_dims
=
in_num_col_dims
;
param
.
activation_type
=
act_type
;
param
.
output
=
&
Out_gpu
;
void
InitParamAndContex
t
()
{
ctx
_
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
_
);
auto
&
context
=
ctx
_
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
_
);
param
_
.
input
=
&
x_gpu_
;
param
_
.
w
=
&
w_gpu_
;
param
_
.
bias
=
&
b_gpu_
;
param
_
.
in_num_col_dims
=
in_num_col_dims_
;
param
_
.
activation_type
=
act_type_
;
param
_
.
output
=
&
out_gpu_
;
}
void
float_data_ini
t
()
{
X_gpu
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
X_ref
.
data
<
float
>
(),
X_gpu
.
dims
());
W_gpu
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
W_ref
.
data
<
float
>
(),
W_gpu
.
dims
());
b_gpu
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
b_ref
.
data
<
float
>
(),
b_gpu
.
dims
());
void
InitFloatInpu
t
()
{
x_gpu_
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
x_ref_
.
data
<
float
>
(),
x_gpu_
.
dims
());
w_gpu_
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
w_ref_
.
data
<
float
>
(),
w_gpu_
.
dims
());
b_gpu
_
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
b_ref_
.
data
<
float
>
(),
b_gpu_
.
dims
());
}
void
half_data_ini
t
()
{
X_half
.
Resize
(
lite
::
DDim
(
x_shape
));
auto
x_half_data
=
X_half
.
mutable_data
<
half
>
();
for
(
int64_t
i
=
0
;
i
<
X_half
.
numel
();
i
++
)
{
x_half_data
[
i
]
=
half
(
lite
::
float16
(
X_ref
.
data
<
float
>
()[
i
]));
void
InitHalfInpu
t
()
{
x_half_
.
Resize
(
lite
::
DDim
(
x_shape_
));
auto
x_half_data
=
x_half_
.
mutable_data
<
half
>
();
for
(
int64_t
i
=
0
;
i
<
x_half_
.
numel
();
i
++
)
{
x_half_data
[
i
]
=
half
(
lite
::
float16
(
x_ref_
.
data
<
float
>
()[
i
]));
}
X_gpu
.
Assign
<
half
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
x_half_data
,
X_gpu
.
dims
());
W_half
.
Resize
(
W_ref
.
dims
());
auto
w_half_data
=
W_half
.
mutable_data
<
half
>
();
for
(
int64_t
i
=
0
;
i
<
W_half
.
numel
();
i
++
)
{
w_half_data
[
i
]
=
half
(
lite
::
float16
(
W_ref
.
data
<
float
>
()[
i
]));
x_gpu_
.
Assign
<
half
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
x_half_data
,
x_gpu_
.
dims
());
w_half_
.
Resize
(
w_ref_
.
dims
());
auto
w_half_data
=
w_half_
.
mutable_data
<
half
>
();
for
(
int64_t
i
=
0
;
i
<
w_half_
.
numel
();
i
++
)
{
w_half_data
[
i
]
=
half
(
lite
::
float16
(
w_ref_
.
data
<
float
>
()[
i
]));
}
W_gpu
.
Assign
<
half
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
w_half_data
,
W_gpu
.
dims
());
b_half
.
Resize
(
b_ref
.
dims
());
auto
b_half_data
=
b_half
.
mutable_data
<
half
>
();
for
(
int64_t
i
=
0
;
i
<
b_half
.
numel
();
i
++
)
{
b_half_data
[
i
]
=
half
(
lite
::
float16
(
b_ref
.
data
<
float
>
()[
i
]));
w_gpu_
.
Assign
<
half
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
w_half_data
,
w_gpu_
.
dims
());
b_half
_
.
Resize
(
b_ref_
.
dims
());
auto
b_half_data
=
b_half
_
.
mutable_data
<
half
>
();
for
(
int64_t
i
=
0
;
i
<
b_half
_
.
numel
();
i
++
)
{
b_half_data
[
i
]
=
half
(
lite
::
float16
(
b_ref
_
.
data
<
float
>
()[
i
]));
}
b_gpu
.
Assign
<
half
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
b_half_data
,
b_gpu
.
dims
());
b_gpu
_
.
Assign
<
half
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
b_half_data
,
b_gpu_
.
dims
());
}
void
fc_cpu_base
(
const
lite
::
Tensor
*
X
,
const
lite
::
Tensor
*
W
,
void
RunBaseLine
(
const
lite
::
Tensor
*
x
,
const
lite
::
Tensor
*
w
,
const
lite
::
Tensor
*
b
,
lite
::
Tensor
*
O
ut
)
{
const
float
*
data_in
=
X
->
data
<
float
>
();
lite
::
Tensor
*
o
ut
)
{
const
float
*
data_in
=
x
->
data
<
float
>
();
const
float
*
bias
=
b
->
data
<
float
>
();
const
float
*
weights
=
W
->
data
<
float
>
();
float
*
data_out
=
O
ut
->
mutable_data
<
float
>
();
int
out_rows
=
X
->
dims
()[
0
];
int
in_cols
=
X
->
numel
()
/
out_rows
;
int
out_cols
=
W
->
numel
()
/
in_cols
;
const
float
*
weights
=
w
->
data
<
float
>
();
float
*
data_out
=
o
ut
->
mutable_data
<
float
>
();
int
out_rows
=
x
->
dims
()[
0
];
int
in_cols
=
x
->
numel
()
/
out_rows
;
int
out_cols
=
w
->
numel
()
/
in_cols
;
int
index_out
;
for
(
int
i
=
0
;
i
<
out_rows
;
i
++
)
{
for
(
int
j
=
0
;
j
<
out_cols
;
j
++
)
{
...
...
@@ -135,31 +135,31 @@ class FcTest : public ::testing::Test {
data_out
[
index_out
]
+=
data_in
[
i
*
in_cols
+
k
]
*
weights
[
k
*
out_cols
+
j
];
}
if
(
act_type
==
"relu"
)
{
if
(
act_type
_
==
"relu"
)
{
data_out
[
index_out
]
*=
static_cast
<
int
>
(
data_out
[
index_out
]
>
0
);
}
}
}
}
int
m
,
k
,
n
,
in_num_col_dims
;
std
::
string
act_type
;
std
::
vector
<
int64_t
>
x_shape
,
w_shape
,
b_shape
,
out_shape
;
lite
::
Tensor
X_ref
,
W_ref
,
b_ref
,
Out_ref
;
lite
::
Tensor
X_gpu
,
W_gpu
,
b_gpu
;
lite
::
Tensor
X_half
,
W_half
,
b_half
;
lite
::
Tensor
Out_cpu
,
Out_gpu
;
operators
::
FcParam
param
;
std
::
unique_ptr
<
KernelContext
>
ctx
;
cudaStream_t
stream
;
int
m
_
,
k_
,
n_
,
in_num_col_dims_
;
std
::
string
act_type
_
;
std
::
vector
<
int64_t
>
x_shape
_
,
w_shape_
,
b_shape_
,
out_shape_
;
lite
::
Tensor
x_ref_
,
w_ref_
,
b_ref_
,
out_ref_
;
lite
::
Tensor
x_gpu_
,
w_gpu_
,
b_gpu_
;
lite
::
Tensor
x_half_
,
w_half_
,
b_half_
;
lite
::
Tensor
out_cpu_
,
out_gpu_
;
operators
::
FcParam
param
_
;
std
::
unique_ptr
<
KernelContext
>
ctx
_
;
cudaStream_t
stream
_
;
};
TEST_F
(
FcTest
,
TestFP32
)
{
float_data_ini
t
();
InitFloatInpu
t
();
FcCompute
<
float
,
PRECISION
(
kFloat
)
>
kernel
;
kernel
.
SetParam
(
param
);
kernel
.
SetContext
(
std
::
move
(
ctx
));
kernel
.
SetParam
(
param
_
);
kernel
.
SetContext
(
std
::
move
(
ctx
_
));
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
kernel
.
Launch
();
...
...
@@ -177,14 +177,14 @@ TEST_F(FcTest, TestFP32) {
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
duration
/
FLAGS_repeats
<<
" ms in average."
;
CopySync
<
TARGET
(
kCUDA
)
>
(
Out_cpu
.
mutable_data
<
float
>
(),
Out_gpu
.
data
<
float
>
(),
sizeof
(
float
)
*
Out_gpu
.
numel
(),
CopySync
<
TARGET
(
kCUDA
)
>
(
out_cpu_
.
mutable_data
<
float
>
(),
out_gpu_
.
data
<
float
>
(),
sizeof
(
float
)
*
out_gpu_
.
numel
(),
IoDirection
::
DtoH
);
for
(
int
i
=
0
;
i
<
Out_gpu
.
numel
();
++
i
)
{
float
res
=
Out_cpu
.
data
<
float
>
()[
i
];
float
ref
=
Out_ref
.
data
<
float
>
()[
i
];
for
(
int
i
=
0
;
i
<
out_gpu_
.
numel
();
++
i
)
{
float
res
=
out_cpu_
.
data
<
float
>
()[
i
];
float
ref
=
out_ref_
.
data
<
float
>
()[
i
];
EXPECT_NEAR
(
fabs
(
res
-
ref
)
/
ref
,
0.
f
,
1e-5
);
}
}
...
...
lite/kernels/cuda/sequence_mask_compute.h
浏览文件 @
486d6572
...
...
@@ -28,11 +28,6 @@ class SequenceMaskCompute : public KernelLite<TARGET(kCUDA), Ptype> {
void
Run
()
override
;
virtual
~
SequenceMaskCompute
()
=
default
;
// private:
// lite::Tensor seq_offsets_;
// std::vector<int64_t> seq_len_;
// std::vector<size_t> seq_offsets_vec_;
};
}
// namespace cuda
...
...
lite/kernels/cuda/sequence_mask_compute_test.cc
浏览文件 @
486d6572
...
...
@@ -32,73 +32,73 @@ namespace cuda {
class
SequenceMaskTest
:
public
::
testing
::
Test
{
protected:
SequenceMaskTest
()
:
maxlen
(
4
),
out_dtype
(
5
),
x_data
({
3
,
2
,
1
,
0
}),
out_shape
({
static_cast
<
int64_t
>
(
x_data
.
size
()),
maxlen
})
{
X_ref
.
Resize
(
lite
::
DDim
({
static_cast
<
int64_t
>
(
x_data
.
size
())}));
X_gpu
.
Resize
(
X_ref
.
dims
());
:
maxlen
_
(
4
),
out_dtype
_
(
5
),
x_data
_
({
3
,
2
,
1
,
0
}),
out_shape
_
({
static_cast
<
int64_t
>
(
x_data_
.
size
()),
maxlen_
})
{
x_ref_
.
Resize
(
lite
::
DDim
({
static_cast
<
int64_t
>
(
x_data_
.
size
())}));
x_gpu_
.
Resize
(
x_ref_
.
dims
());
auto
*
x_ref_data
=
X_ref
.
mutable_data
<
int64_t
>
();
auto
*
x_ref_data
=
x_ref_
.
mutable_data
<
int64_t
>
();
// prepare input
for
(
size_t
i
=
0
;
i
<
x_data
.
size
();
i
++
)
{
x_ref_data
[
i
]
=
x_data
[
i
];
for
(
size_t
i
=
0
;
i
<
x_data
_
.
size
();
i
++
)
{
x_ref_data
[
i
]
=
x_data
_
[
i
];
}
Out_ref
.
Resize
(
lite
::
DDim
(
out_shape
));
Out_gpu
.
Resize
(
Out_ref
.
dims
());
Out_cpu
.
Resize
(
Out_ref
.
dims
());
cpu_base
(
&
X_ref
,
&
Out_ref
);
out_ref_
.
Resize
(
lite
::
DDim
(
out_shape_
));
out_gpu_
.
Resize
(
out_ref_
.
dims
());
out_cpu_
.
Resize
(
out_ref_
.
dims
());
RunBaseLine
(
&
x_ref_
,
&
out_ref_
);
device_ini
t
();
InitParamAndContex
t
();
}
void
device_ini
t
()
{
ctx
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
);
auto
&
context
=
ctx
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
);
param
.
X
=
&
X_gpu
;
param
.
Y
=
&
Out_gpu
;
param
.
maxlen
=
maxlen
;
param
.
out_dtype
=
out_dtype
;
void
InitParamAndContex
t
()
{
ctx
_
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
_
);
auto
&
context
=
ctx
_
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
_
);
param
_
.
X
=
&
x_gpu_
;
param
_
.
Y
=
&
out_gpu_
;
param
_
.
maxlen
=
maxlen_
;
param
_
.
out_dtype
=
out_dtype_
;
}
void
float_data_ini
t
()
{
X_gpu
.
Assign
<
int64_t
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
X_ref
.
data
<
int64_t
>
(),
X_gpu
.
dims
());
void
InitFloatInpu
t
()
{
x_gpu_
.
Assign
<
int64_t
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
x_ref_
.
data
<
int64_t
>
(),
x_gpu_
.
dims
());
}
void
half_data_ini
t
()
{}
void
InitHalfInpu
t
()
{}
void
cpu_base
(
const
lite
::
Tensor
*
X
,
lite
::
Tensor
*
O
ut
)
{
auto
*
out_data
=
O
ut
->
mutable_data
<
float
>
();
void
RunBaseLine
(
const
lite
::
Tensor
*
x
,
lite
::
Tensor
*
o
ut
)
{
auto
*
out_data
=
o
ut
->
mutable_data
<
float
>
();
for
(
size_t
i
=
0
;
i
<
x_data
.
size
();
++
i
)
{
for
(
int
j
=
0
;
j
<
maxlen
;
++
j
)
{
out_data
[
i
*
maxlen
+
j
]
=
j
<
x_data
[
i
]
?
1
:
0
;
for
(
size_t
i
=
0
;
i
<
x_data
_
.
size
();
++
i
)
{
for
(
int
j
=
0
;
j
<
maxlen
_
;
++
j
)
{
out_data
[
i
*
maxlen
_
+
j
]
=
j
<
x_data_
[
i
]
?
1
:
0
;
}
}
}
int
maxlen
,
out_dtype
;
std
::
vector
<
int64_t
>
x_data
,
out_shape
;
int
maxlen
_
,
out_dtype_
;
std
::
vector
<
int64_t
>
x_data
_
,
out_shape_
;
lite
::
Tensor
X_ref
,
Out_ref
;
lite
::
Tensor
X_gpu
,
Out_gpu
;
lite
::
Tensor
Out_cpu
;
lite
::
Tensor
x_ref_
,
out_ref_
;
lite
::
Tensor
x_gpu_
,
out_gpu_
;
lite
::
Tensor
out_cpu_
;
operators
::
SequenceMaskParam
param
;
std
::
unique_ptr
<
KernelContext
>
ctx
;
cudaStream_t
stream
;
operators
::
SequenceMaskParam
param
_
;
std
::
unique_ptr
<
KernelContext
>
ctx
_
;
cudaStream_t
stream
_
;
};
TEST_F
(
SequenceMaskTest
,
fp32
)
{
float_data_ini
t
();
InitFloatInpu
t
();
SequenceMaskCompute
<
float
,
PRECISION
(
kFloat
)
>
kernel
;
kernel
.
SetParam
(
param
);
kernel
.
SetContext
(
std
::
move
(
ctx
));
kernel
.
SetParam
(
param
_
);
kernel
.
SetContext
(
std
::
move
(
ctx
_
));
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
kernel
.
Launch
();
...
...
@@ -116,12 +116,12 @@ TEST_F(SequenceMaskTest, fp32) {
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
duration
/
FLAGS_repeats
<<
" ms in average."
;
CopySync
<
TARGET
(
kCUDA
)
>
(
Out_cpu
.
mutable_data
<
float
>
(),
Out_gpu
.
data
<
float
>
(),
sizeof
(
float
)
*
Out_gpu
.
numel
(),
CopySync
<
TARGET
(
kCUDA
)
>
(
out_cpu_
.
mutable_data
<
float
>
(),
out_gpu_
.
data
<
float
>
(),
sizeof
(
float
)
*
out_gpu_
.
numel
(),
IoDirection
::
DtoH
);
for
(
int
i
=
0
;
i
<
Out_gpu
.
numel
();
++
i
)
{
EXPECT_NEAR
(
Out_cpu
.
data
<
float
>
()[
i
],
Out_ref
.
data
<
float
>
()[
i
],
1e-5
);
for
(
int
i
=
0
;
i
<
out_gpu_
.
numel
();
++
i
)
{
EXPECT_NEAR
(
out_cpu_
.
data
<
float
>
()[
i
],
out_ref_
.
data
<
float
>
()[
i
],
1e-5
);
}
}
...
...
lite/kernels/cuda/sequence_pad_compute_test.cc
浏览文件 @
486d6572
...
...
@@ -23,7 +23,7 @@
#include "lite/api/test_helper.h"
#include "lite/backends/cuda/cuda_utils.h"
//
#include "lite/utils/float16.h"
#include "lite/utils/float16.h"
namespace
paddle
{
namespace
lite
{
...
...
@@ -33,72 +33,73 @@ namespace cuda {
class
SequencePadTest
:
public
::
testing
::
Test
{
protected:
SequencePadTest
()
:
batch
(
5
),
features
(
2
),
padded_length
(
3
),
x_lod
({{
0
,
2
,
5
}}),
x_shape
({
batch
,
features
}),
pad_value_shape
({
features
}),
out_shape
({
static_cast
<
int64_t
>
(
x_lod
[
0
].
size
()
-
1
),
padded_length
,
features
})
{
X_ref
.
Resize
(
lite
::
DDim
(
x_shape
));
X_ref
.
set_lod
(
x_lod
);
X_gpu
.
Resize
(
X_ref
.
dims
());
PadValue_ref
.
Resize
(
lite
::
DDim
(
pad_value_shape
));
PadValue_gpu
.
Resize
(
PadValue_ref
.
dims
());
Length_ref
.
Resize
(
lite
::
DDim
({
static_cast
<
int64_t
>
(
x_lod
[
0
].
size
()
-
1
)}));
Length_gpu
.
Resize
(
Length_ref
.
dims
());
auto
x_ref_data
=
X_ref
.
mutable_data
<
float
>
();
auto
pad_value_ref_data
=
PadValue_ref
.
mutable_data
<
float
>
();
:
batch_
(
5
),
features_
(
2
),
padded_length_
(
3
),
x_lod_
({{
0
,
2
,
5
}}),
x_shape_
({
batch_
,
features_
}),
pad_value_shape_
({
features_
}),
out_shape_
({
static_cast
<
int64_t
>
(
x_lod_
[
0
].
size
()
-
1
),
padded_length_
,
features_
})
{
x_ref_
.
Resize
(
lite
::
DDim
(
x_shape_
));
x_ref_
.
set_lod
(
x_lod_
);
x_gpu_
.
Resize
(
x_ref_
.
dims
());
pad_value_ref_
.
Resize
(
lite
::
DDim
(
pad_value_shape_
));
pad_value_gpu_
.
Resize
(
pad_value_ref_
.
dims
());
length_ref_
.
Resize
(
lite
::
DDim
({
static_cast
<
int64_t
>
(
x_lod_
[
0
].
size
()
-
1
)}));
length_gpu_
.
Resize
(
length_ref_
.
dims
());
auto
x_ref_data
=
x_ref_
.
mutable_data
<
float
>
();
auto
pad_value_ref_data
=
pad_value_ref_
.
mutable_data
<
float
>
();
// prepare input
for
(
int64_t
i
=
0
;
i
<
X_ref
.
numel
();
i
++
)
{
for
(
int64_t
i
=
0
;
i
<
x_ref_
.
numel
();
i
++
)
{
x_ref_data
[
i
]
=
static_cast
<
float
>
(
i
);
}
for
(
int64_t
i
=
0
;
i
<
PadValue_ref
.
numel
();
i
++
)
{
for
(
int64_t
i
=
0
;
i
<
pad_value_ref_
.
numel
();
i
++
)
{
pad_value_ref_data
[
i
]
=
static_cast
<
float
>
(
i
);
}
Out_ref
.
Resize
(
lite
::
DDim
(
out_shape
));
Out_gpu
.
Resize
(
Out_ref
.
dims
());
Out_cpu
.
Resize
(
Out_ref
.
dims
());
cpu_base
(
&
X_ref
,
&
PadValue_ref
,
&
Out_ref
,
&
Length_ref
);
out_ref_
.
Resize
(
lite
::
DDim
(
out_shape_
));
out_gpu_
.
Resize
(
out_ref_
.
dims
());
out_cpu_
.
Resize
(
out_ref_
.
dims
());
RunBaseLine
(
&
x_ref_
,
&
pad_value_ref_
,
&
out_ref_
,
&
length_ref_
);
device_ini
t
();
InitParamAndContex
t
();
}
void
device_ini
t
()
{
ctx
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
);
auto
&
context
=
ctx
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
);
param
.
X
=
&
X_gpu
;
param
.
PadValue
=
&
PadValue_gpu
;
param
.
Length
=
&
Length_gpu
;
param
.
Out
=
&
Out_gpu
;
param
.
padded_length
=
padded_length
;
void
InitParamAndContex
t
()
{
ctx
_
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
_
);
auto
&
context
=
ctx
_
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
_
);
param
_
.
X
=
&
x_gpu_
;
param
_
.
PadValue
=
&
pad_value_gpu_
;
param
_
.
Length
=
&
length_gpu_
;
param
_
.
Out
=
&
out_gpu_
;
param
_
.
padded_length
=
padded_length_
;
}
void
float_data_ini
t
()
{
X_gpu
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
X_ref
.
data
<
float
>
(),
X_gpu
.
dims
());
X_gpu
.
set_lod
(
X_ref
.
lod
());
PadValue_gpu
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
PadValue_ref
.
data
<
float
>
(),
PadValue_gpu
.
dims
());
void
InitFloatInpu
t
()
{
x_gpu_
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
x_ref_
.
data
<
float
>
(),
x_gpu_
.
dims
());
x_gpu_
.
set_lod
(
x_ref_
.
lod
());
pad_value_gpu_
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
pad_value_ref_
.
data
<
float
>
(),
pad_value_gpu_
.
dims
());
}
void
half_data_ini
t
()
{}
void
InitHalfInpu
t
()
{}
void
cpu_base
(
const
lite
::
Tensor
*
X
,
const
lite
::
Tensor
*
PadV
alue
,
lite
::
Tensor
*
O
ut
,
lite
::
Tensor
*
L
ength
)
{
auto
*
length_data
=
L
ength
->
mutable_data
<
int64_t
>
();
auto
*
out_data
=
O
ut
->
mutable_data
<
float
>
();
void
RunBaseLine
(
const
lite
::
Tensor
*
x
,
const
lite
::
Tensor
*
pad_v
alue
,
lite
::
Tensor
*
o
ut
,
lite
::
Tensor
*
l
ength
)
{
auto
*
length_data
=
l
ength
->
mutable_data
<
int64_t
>
();
auto
*
out_data
=
o
ut
->
mutable_data
<
float
>
();
length_data
[
0
]
=
2
;
length_data
[
1
]
=
3
;
...
...
@@ -112,24 +113,24 @@ class SequencePadTest : public ::testing::Test {
}
}
int
batch
,
features
,
padded_length
;
LoD
x_lod
;
std
::
vector
<
int64_t
>
x_shape
,
pad_value_shape
,
out_shape
;
int
batch
_
,
features_
,
padded_length_
;
LoD
x_lod
_
;
std
::
vector
<
int64_t
>
x_shape
_
,
pad_value_shape_
,
out_shape_
;
lite
::
Tensor
X_ref
,
PadValue_ref
,
Out_ref
,
Length_ref
;
lite
::
Tensor
X_gpu
,
PadValue_gpu
,
Out_gpu
,
Length_gpu
;
lite
::
Tensor
Out_cpu
,
Length_cpu
;
lite
::
Tensor
x_ref_
,
pad_value_ref_
,
out_ref_
,
length_ref_
;
lite
::
Tensor
x_gpu_
,
pad_value_gpu_
,
out_gpu_
,
length_gpu_
;
lite
::
Tensor
out_cpu_
,
length_cpu_
;
operators
::
SequencePadParam
param
;
std
::
unique_ptr
<
KernelContext
>
ctx
;
cudaStream_t
stream
;
operators
::
SequencePadParam
param
_
;
std
::
unique_ptr
<
KernelContext
>
ctx
_
;
cudaStream_t
stream
_
;
};
TEST_F
(
SequencePadTest
,
fp32
)
{
float_data_ini
t
();
InitFloatInpu
t
();
SequencePadCompute
<
float
,
PRECISION
(
kFloat
)
>
kernel
;
kernel
.
SetParam
(
param
);
kernel
.
SetContext
(
std
::
move
(
ctx
));
kernel
.
SetParam
(
param
_
);
kernel
.
SetContext
(
std
::
move
(
ctx
_
));
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
kernel
.
Launch
();
...
...
@@ -147,20 +148,20 @@ TEST_F(SequencePadTest, fp32) {
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
duration
/
FLAGS_repeats
<<
" ms in average."
;
CopySync
<
TARGET
(
kCUDA
)
>
(
Out_cpu
.
mutable_data
<
float
>
(),
Out_gpu
.
data
<
float
>
(),
sizeof
(
float
)
*
Out_gpu
.
numel
(),
CopySync
<
TARGET
(
kCUDA
)
>
(
out_cpu_
.
mutable_data
<
float
>
(),
out_gpu_
.
data
<
float
>
(),
sizeof
(
float
)
*
out_gpu_
.
numel
(),
IoDirection
::
DtoH
);
CopySync
<
TARGET
(
kCUDA
)
>
(
Length_cpu
.
mutable_data
<
int64_t
>
(),
Length_gpu
.
data
<
int64_t
>
(),
sizeof
(
int64_t
)
*
Length_gpu
.
numel
(),
CopySync
<
TARGET
(
kCUDA
)
>
(
length_cpu_
.
mutable_data
<
int64_t
>
(),
length_gpu_
.
data
<
int64_t
>
(),
sizeof
(
int64_t
)
*
length_gpu_
.
numel
(),
IoDirection
::
DtoH
);
for
(
int
i
=
0
;
i
<
Out_gpu
.
numel
();
++
i
)
{
EXPECT_NEAR
(
Out_cpu
.
data
<
float
>
()[
i
],
Out_ref
.
data
<
float
>
()[
i
],
1e-5
);
for
(
int
i
=
0
;
i
<
out_gpu_
.
numel
();
++
i
)
{
EXPECT_NEAR
(
out_cpu_
.
data
<
float
>
()[
i
],
out_ref_
.
data
<
float
>
()[
i
],
1e-5
);
}
for
(
int
i
=
0
;
i
<
Length_gpu
.
numel
();
++
i
)
{
for
(
int
i
=
0
;
i
<
length_gpu_
.
numel
();
++
i
)
{
EXPECT_NEAR
(
Length_cpu
.
data
<
int64_t
>
()[
i
],
Length_ref
.
data
<
int64_t
>
()[
i
],
1e-5
);
length_cpu_
.
data
<
int64_t
>
()[
i
],
length_ref_
.
data
<
int64_t
>
()[
i
],
1e-5
);
}
}
...
...
lite/kernels/cuda/sequence_unpad_compute_test.cc
浏览文件 @
486d6572
...
...
@@ -23,7 +23,7 @@
#include "lite/api/test_helper.h"
#include "lite/backends/cuda/cuda_utils.h"
//
#include "lite/utils/float16.h"
#include "lite/utils/float16.h"
namespace
paddle
{
namespace
lite
{
...
...
@@ -33,66 +33,66 @@ namespace cuda {
class
SequenceUnpadTest
:
public
::
testing
::
Test
{
protected:
SequenceUnpadTest
()
:
batch
(
5
),
features
(
2
),
padded_length
(
3
),
out_lod
({{
0
,
2
,
5
}}),
x_shape
({
static_cast
<
int64_t
>
(
out_lod
[
0
].
size
()
-
1
),
padded_length
,
features
}),
out_shape
({
batch
,
features
})
{
X_ref
.
Resize
(
lite
::
DDim
(
x_shape
));
X_gpu
.
Resize
(
X_ref
.
dims
());
Length_ref
.
Resize
(
lite
::
DDim
({
static_cast
<
int64_t
>
(
out_lod
[
0
].
size
()
-
1
)}));
Length_gpu
.
Resize
(
Length_ref
.
dims
());
auto
*
x_ref_data
=
X_ref
.
mutable_data
<
float
>
();
auto
*
length_ref_data
=
Length_ref
.
mutable_data
<
int64_t
>
();
:
batch
_
(
5
),
features
_
(
2
),
padded_length
_
(
3
),
out_lod
_
({{
0
,
2
,
5
}}),
x_shape
_
({
static_cast
<
int64_t
>
(
out_lod_
[
0
].
size
()
-
1
),
padded_length_
,
features_
}),
out_shape
_
({
batch_
,
features_
})
{
x_ref_
.
Resize
(
lite
::
DDim
(
x_shape_
));
x_gpu_
.
Resize
(
x_ref_
.
dims
());
length_ref_
.
Resize
(
lite
::
DDim
({
static_cast
<
int64_t
>
(
out_lod
_
[
0
].
size
()
-
1
)}));
length_gpu_
.
Resize
(
length_ref_
.
dims
());
auto
*
x_ref_data
=
x_ref_
.
mutable_data
<
float
>
();
auto
*
length_ref_data
=
length_ref_
.
mutable_data
<
int64_t
>
();
// prepare input
for
(
int64_t
i
=
0
;
i
<
X_ref
.
numel
();
i
++
)
{
for
(
int64_t
i
=
0
;
i
<
x_ref_
.
numel
();
i
++
)
{
x_ref_data
[
i
]
=
static_cast
<
float
>
(
i
);
}
for
(
size_t
i
=
0
;
i
<
out_lod
[
0
].
size
()
-
1
;
++
i
)
{
length_ref_data
[
i
]
=
out_lod
[
0
][
i
+
1
]
-
out_lod
[
0
][
i
];
for
(
size_t
i
=
0
;
i
<
out_lod
_
[
0
].
size
()
-
1
;
++
i
)
{
length_ref_data
[
i
]
=
out_lod
_
[
0
][
i
+
1
]
-
out_lod_
[
0
][
i
];
}
Out_ref
.
Resize
(
lite
::
DDim
(
out_shape
));
Out_ref
.
set_lod
(
out_lod
);
Out_gpu
.
Resize
(
Out_ref
.
dims
());
Out_gpu
.
set_lod
(
Out_ref
.
lod
());
Out_cpu
.
Resize
(
Out_ref
.
dims
());
Out_cpu
.
set_lod
(
Out_ref
.
lod
());
out_ref_
.
Resize
(
lite
::
DDim
(
out_shape_
));
out_ref_
.
set_lod
(
out_lod_
);
out_gpu_
.
Resize
(
out_ref_
.
dims
());
out_gpu_
.
set_lod
(
out_ref_
.
lod
());
out_cpu_
.
Resize
(
out_ref_
.
dims
());
out_cpu_
.
set_lod
(
out_ref_
.
lod
());
cpu_base
(
&
X_ref
,
&
Length_ref
,
&
Out_ref
);
RunBaseLine
(
&
x_ref_
,
&
length_ref_
,
&
out_ref_
);
device_ini
t
();
InitParamAndContex
t
();
}
void
device_ini
t
()
{
ctx
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
);
auto
&
context
=
ctx
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
);
param
.
X
=
&
X_gpu
;
param
.
Length
=
&
Length_gpu
;
param
.
Out
=
&
Out_gpu
;
void
InitParamAndContex
t
()
{
ctx
_
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
_
);
auto
&
context
=
ctx
_
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
_
);
param
_
.
X
=
&
x_gpu_
;
param
_
.
Length
=
&
length_gpu_
;
param
_
.
Out
=
&
out_gpu_
;
}
void
float_data_ini
t
()
{
X_gpu
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
X_ref
.
data
<
float
>
(),
X_gpu
.
dims
());
Length_gpu
.
Assign
<
int64_t
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
Length_ref
.
data
<
int64_t
>
(),
Length_gpu
.
dims
());
void
InitFloatInpu
t
()
{
x_gpu_
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
x_ref_
.
data
<
float
>
(),
x_gpu_
.
dims
());
length_gpu_
.
Assign
<
int64_t
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
length_ref_
.
data
<
int64_t
>
(),
length_gpu_
.
dims
());
}
void
half_data_ini
t
()
{}
void
InitHalfInpu
t
()
{}
void
cpu_bas
e
(
const
lite
::
Tensor
*
X
,
const
lite
::
Tensor
*
Length
,
lite
::
Tensor
*
Out
)
{
void
RunBaseLin
e
(
const
lite
::
Tensor
*
X
,
const
lite
::
Tensor
*
Length
,
lite
::
Tensor
*
Out
)
{
auto
*
out_data
=
Out
->
mutable_data
<
float
>
();
for
(
size_t
i
=
0
;
i
<
4
;
++
i
)
{
...
...
@@ -103,24 +103,24 @@ class SequenceUnpadTest : public ::testing::Test {
}
}
int
batch
,
features
,
padded_length
;
LoD
out_lod
;
std
::
vector
<
int64_t
>
x_shape
,
out_shape
;
int
batch
_
,
features_
,
padded_length_
;
LoD
out_lod
_
;
std
::
vector
<
int64_t
>
x_shape
_
,
out_shape_
;
lite
::
Tensor
X_ref
,
Out_ref
,
Length_ref
;
lite
::
Tensor
X_gpu
,
Out_gpu
,
Length_gpu
;
lite
::
Tensor
Out_cpu
,
Length_cpu
;
lite
::
Tensor
x_ref_
,
out_ref_
,
length_ref_
;
lite
::
Tensor
x_gpu_
,
out_gpu_
,
length_gpu_
;
lite
::
Tensor
out_cpu_
,
length_cpu_
;
operators
::
SequencePadParam
param
;
std
::
unique_ptr
<
KernelContext
>
ctx
;
cudaStream_t
stream
;
operators
::
SequencePadParam
param
_
;
std
::
unique_ptr
<
KernelContext
>
ctx
_
;
cudaStream_t
stream
_
;
};
TEST_F
(
SequenceUnpadTest
,
fp32
)
{
float_data_ini
t
();
InitFloatInpu
t
();
SequenceUnpadCompute
<
float
,
PRECISION
(
kFloat
)
>
kernel
;
kernel
.
SetParam
(
param
);
kernel
.
SetContext
(
std
::
move
(
ctx
));
kernel
.
SetParam
(
param
_
);
kernel
.
SetContext
(
std
::
move
(
ctx
_
));
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
kernel
.
Launch
();
...
...
@@ -138,12 +138,12 @@ TEST_F(SequenceUnpadTest, fp32) {
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
duration
/
FLAGS_repeats
<<
" ms in average."
;
CopySync
<
TARGET
(
kCUDA
)
>
(
Out_cpu
.
mutable_data
<
float
>
(),
Out_gpu
.
data
<
float
>
(),
sizeof
(
float
)
*
Out_gpu
.
numel
(),
CopySync
<
TARGET
(
kCUDA
)
>
(
out_cpu_
.
mutable_data
<
float
>
(),
out_gpu_
.
data
<
float
>
(),
sizeof
(
float
)
*
out_gpu_
.
numel
(),
IoDirection
::
DtoH
);
for
(
int
i
=
0
;
i
<
Out_gpu
.
numel
();
++
i
)
{
EXPECT_NEAR
(
Out_cpu
.
data
<
float
>
()[
i
],
Out_ref
.
data
<
float
>
()[
i
],
1e-5
);
for
(
int
i
=
0
;
i
<
out_gpu_
.
numel
();
++
i
)
{
EXPECT_NEAR
(
out_cpu_
.
data
<
float
>
()[
i
],
out_ref_
.
data
<
float
>
()[
i
],
1e-5
);
}
}
...
...
lite/kernels/cuda/transpose_compute.cu
浏览文件 @
486d6572
...
...
@@ -43,7 +43,7 @@ void TransposeCompute<T, Ptype>::Run() {
// NCHW -> NHWC
if
(
axes
.
size
()
==
4
&&
axes
[
0
]
==
0
&&
axes
[
1
]
==
2
&&
axes
[
2
]
==
3
&&
axes
[
3
]
==
1
)
{
trans
.
NCHW2NHWC
(
dims
[
0
],
dims
[
1
],
dims
[
2
]
*
dims
[
3
],
in
,
out
,
&
stream
);
trans
_
.
NCHW2NHWC
(
dims
[
0
],
dims
[
1
],
dims
[
2
]
*
dims
[
3
],
in
,
out
,
&
stream
);
cudaError_t
error
=
cudaGetLastError
();
if
(
error
!=
cudaSuccess
)
LOG
(
INFO
)
<<
cudaGetErrorString
(
error
);
return
;
...
...
@@ -52,13 +52,13 @@ void TransposeCompute<T, Ptype>::Run() {
// NHWC -> NCHW
if
(
axes
.
size
()
==
4
&&
axes
[
0
]
==
0
&&
axes
[
1
]
==
3
&&
axes
[
2
]
==
1
&&
axes
[
3
]
==
2
)
{
trans
.
NHWC2NCHW
(
dims
[
0
],
dims
[
3
],
dims
[
1
]
*
dims
[
2
],
in
,
out
,
&
stream
);
trans
_
.
NHWC2NCHW
(
dims
[
0
],
dims
[
3
],
dims
[
1
]
*
dims
[
2
],
in
,
out
,
&
stream
);
cudaError_t
error
=
cudaGetLastError
();
if
(
error
!=
cudaSuccess
)
LOG
(
INFO
)
<<
cudaGetErrorString
(
error
);
return
;
}
trans
.
transpose
(
out
,
in
,
dims
,
axes
,
&
stream
);
trans
_
.
transpose
(
out
,
in
,
dims
,
axes
,
&
stream
);
cudaError_t
error
=
cudaGetLastError
();
if
(
error
!=
cudaSuccess
)
LOG
(
INFO
)
<<
cudaGetErrorString
(
error
);
}
...
...
lite/kernels/cuda/transpose_compute.h
浏览文件 @
486d6572
...
...
@@ -30,7 +30,7 @@ class TransposeCompute : public KernelLite<TARGET(kCUDA), Ptype> {
virtual
~
TransposeCompute
()
=
default
;
private:
lite
::
cuda
::
math
::
Transpose
<
Dtype
>
trans
;
lite
::
cuda
::
math
::
Transpose
<
Dtype
>
trans
_
;
};
}
// namespace cuda
...
...
lite/kernels/cuda/transpose_compute_test.cc
浏览文件 @
486d6572
...
...
@@ -36,9 +36,9 @@ namespace {
#define OUT(n, c, h, w) \
output_data[w + h * output_w + c * output_h * output_w + \
n * output_c * output_h * output_w]
void
nchw2nhwc_ref
(
lite
::
Tensor
*
input
,
lite
::
Tensor
*
output
,
const
std
::
vector
<
int
>
axies
)
{
void
Nchw2nhwcBaseLine
(
lite
::
Tensor
*
input
,
lite
::
Tensor
*
output
,
const
std
::
vector
<
int
>
axies
)
{
auto
*
input_data
=
input
->
data
<
float
>
();
auto
*
output_data
=
output
->
mutable_data
<
float
>
();
...
...
@@ -69,9 +69,9 @@ void nchw2nhwc_ref(lite::Tensor* input,
#define OUT(n, h, w, c) \
output_data[c + w * output_c + h * output_w * output_c + \
n * output_h * output_w * output_c]
void
nhwc2nchw_ref
(
lite
::
Tensor
*
input
,
lite
::
Tensor
*
output
,
const
std
::
vector
<
int
>
axies
)
{
void
Nhwc2nchwBaseLine
(
lite
::
Tensor
*
input
,
lite
::
Tensor
*
output
,
const
std
::
vector
<
int
>
axies
)
{
auto
*
input_data
=
input
->
data
<
float
>
();
auto
*
output_data
=
output
->
mutable_data
<
float
>
();
...
...
@@ -94,7 +94,7 @@ void nhwc2nchw_ref(lite::Tensor* input,
}
}
void
transpose_ref
(
const
lite
::
Tensor
*
input
,
void
TransBaseLine
(
const
lite
::
Tensor
*
input
,
lite
::
Tensor
*
output
,
const
std
::
vector
<
int
>
axes
)
{
auto
*
input_data
=
input
->
data
<
float
>
();
...
...
@@ -173,9 +173,9 @@ TEST(transpose_nchw, normal) {
auto
*
out_data
=
out
.
mutable_data
<
float
>
(
TARGET
(
kCUDA
));
CopySync
<
TARGET
(
kCUDA
)
>
(
out_cpu_data
,
out_data
,
sizeof
(
float
)
*
out
.
numel
(),
IoDirection
::
DtoH
);
nchw2nhwc_ref
(
&
x_ref
,
&
out_ref
,
axes
);
Nchw2nhwcBaseLine
(
&
x_ref
,
&
out_ref
,
axes
);
auto
*
out_ref_data
=
out_ref
.
mutable_data
<
float
>
();
//
transpose_ref
(&x_ref, &out_ref, axes);
//
TransBaseLine
(&x_ref, &out_ref, axes);
for
(
int
i
=
0
;
i
<
out
.
numel
();
i
++
)
{
EXPECT_NEAR
(
out_cpu_data
[
i
],
out_ref_data
[
i
],
1e-5
);
}
...
...
@@ -225,8 +225,8 @@ TEST(transpose_nhwc, normal) {
auto
*
out_data
=
out
.
mutable_data
<
float
>
(
TARGET
(
kCUDA
));
CopySync
<
TARGET
(
kCUDA
)
>
(
out_cpu_data
,
out_data
,
sizeof
(
float
)
*
out
.
numel
(),
IoDirection
::
DtoH
);
nhwc2nchw_ref
(
&
x_ref
,
&
out_ref
,
axes
);
//
transpose_ref
(&x_ref, &out_ref, axes);
Nhwc2nchwBaseLine
(
&
x_ref
,
&
out_ref
,
axes
);
//
TransBaseLine
(&x_ref, &out_ref, axes);
auto
*
out_ref_data
=
out_ref
.
mutable_data
<
float
>
();
for
(
int
i
=
0
;
i
<
out
.
numel
();
i
++
)
{
EXPECT_NEAR
(
out_cpu_data
[
i
],
out_ref_data
[
i
],
1e-5
);
...
...
@@ -236,77 +236,77 @@ TEST(transpose_nhwc, normal) {
class
TransposeTest
:
public
::
testing
::
Test
{
protected:
TransposeTest
()
:
C
(
3
),
H
(
128
),
W
(
64
),
axes
({
1
,
2
,
0
}),
x_shape
({
C
,
H
,
W
}),
out_shape
({
H
,
W
,
C
})
{
X_ref
.
Resize
(
lite
::
DDim
(
x_shape
));
X_gpu
.
Resize
(
X_ref
.
dims
());
:
C
_
(
3
),
H
_
(
128
),
W
_
(
64
),
axes
_
({
1
,
2
,
0
}),
x_shape
_
({
C_
,
H_
,
W_
}),
out_shape
_
({
H_
,
W_
,
C_
})
{
x_ref_
.
Resize
(
lite
::
DDim
(
x_shape_
));
x_gpu_
.
Resize
(
x_ref_
.
dims
());
auto
x_ref_data
=
X_ref
.
mutable_data
<
float
>
();
auto
X_ref__data
=
x_ref_
.
mutable_data
<
float
>
();
// prepare input
for
(
int64_t
i
=
0
;
i
<
X_ref
.
numel
();
i
++
)
{
x_ref
_data
[
i
]
=
static_cast
<
float
>
(
i
);
for
(
int64_t
i
=
0
;
i
<
x_ref_
.
numel
();
i
++
)
{
X_ref_
_data
[
i
]
=
static_cast
<
float
>
(
i
);
}
Out_ref
.
Resize
(
lite
::
DDim
(
out_shape
));
Out_gpu
.
Resize
(
Out_ref
.
dims
());
Out_cpu
.
Resize
(
Out_ref
.
dims
());
cpu_base
(
&
X_ref
,
&
Out_ref
);
out_ref_
.
Resize
(
lite
::
DDim
(
out_shape_
));
out_gpu_
.
Resize
(
out_ref_
.
dims
());
out_cpu_
.
Resize
(
out_ref_
.
dims
());
RunBaseLine
(
&
x_ref_
,
&
out_ref_
);
device_ini
t
();
InitParamAndContex
t
();
}
void
device_ini
t
()
{
ctx
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
);
auto
&
context
=
ctx
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
);
param
.
x
=
&
X_gpu
;
param
.
output
=
&
Out_gpu
;
param
.
axis
=
axes
;
void
InitParamAndContex
t
()
{
ctx
_
.
reset
(
new
KernelContext
);
cudaStreamCreate
(
&
stream
_
);
auto
&
context
=
ctx
_
->
As
<
CUDAContext
>
();
context
.
SetExecStream
(
stream
_
);
param
_
.
x
=
&
x_gpu_
;
param
_
.
output
=
&
out_gpu_
;
param
_
.
axis
=
axes_
;
}
void
float_data_ini
t
()
{
X_gpu
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
X_ref
.
data
<
float
>
(),
X_gpu
.
dims
());
void
InitFloatInpu
t
()
{
x_gpu_
.
Assign
<
float
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
x_ref_
.
data
<
float
>
(),
x_gpu_
.
dims
());
}
void
half_data_ini
t
()
{
X_half
.
Resize
(
lite
::
DDim
(
X_ref
.
dims
()));
auto
x_half_data
=
X_half
.
mutable_data
<
half
>
();
for
(
int64_t
i
=
0
;
i
<
X_half
.
numel
();
i
++
)
{
x_half_data
[
i
]
=
half
(
lite
::
float16
(
X_ref
.
data
<
float
>
()[
i
]));
void
InitHalfInpu
t
()
{
x_half_
.
Resize
(
lite
::
DDim
(
x_ref_
.
dims
()));
auto
X_half__data
=
x_half_
.
mutable_data
<
half
>
();
for
(
int64_t
i
=
0
;
i
<
x_half_
.
numel
();
i
++
)
{
X_half__data
[
i
]
=
half
(
lite
::
float16
(
x_ref_
.
data
<
float
>
()[
i
]));
}
X_gpu
.
Assign
<
half
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
x_half_data
,
X_gpu
.
dims
());
x_gpu_
.
Assign
<
half
,
lite
::
DDim
,
TARGET
(
kCUDA
)
>
(
X_half__data
,
x_gpu_
.
dims
());
}
void
cpu_base
(
const
lite
::
Tensor
*
X
,
lite
::
Tensor
*
O
ut
)
{
transpose_ref
(
X
,
Out
,
axes
);
void
RunBaseLine
(
const
lite
::
Tensor
*
x
,
lite
::
Tensor
*
o
ut
)
{
TransBaseLine
(
x
,
out
,
axes_
);
}
int
C
,
H
,
W
;
std
::
vector
<
int
>
axes
;
std
::
vector
<
int64_t
>
x_shape
,
out_shape
;
int
C
_
,
H_
,
W_
;
std
::
vector
<
int
>
axes
_
;
std
::
vector
<
int64_t
>
x_shape
_
,
out_shape_
;
lite
::
Tensor
X_ref
,
Out_ref
;
lite
::
Tensor
X_gpu
,
Out_gpu
;
lite
::
Tensor
X_half
;
lite
::
Tensor
Out_cpu
;
lite
::
Tensor
x_ref_
,
out_ref_
;
lite
::
Tensor
x_gpu_
,
out_gpu_
;
lite
::
Tensor
x_half_
;
lite
::
Tensor
out_cpu_
;
operators
::
TransposeParam
param
;
std
::
unique_ptr
<
KernelContext
>
ctx
;
cudaStream_t
stream
;
operators
::
TransposeParam
param
_
;
std
::
unique_ptr
<
KernelContext
>
ctx
_
;
cudaStream_t
stream
_
;
};
TEST_F
(
TransposeTest
,
fp32
)
{
float_data_ini
t
();
InitFloatInpu
t
();
TransposeCompute
<
float
,
PRECISION
(
kFloat
)
>
kernel
;
kernel
.
SetParam
(
param
);
kernel
.
SetContext
(
std
::
move
(
ctx
));
kernel
.
SetParam
(
param
_
);
kernel
.
SetContext
(
std
::
move
(
ctx
_
));
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
kernel
.
Launch
();
...
...
@@ -324,20 +324,20 @@ TEST_F(TransposeTest, fp32) {
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
duration
/
FLAGS_repeats
<<
" ms in average."
;
CopySync
<
TARGET
(
kCUDA
)
>
(
Out_cpu
.
mutable_data
<
float
>
(),
Out_gpu
.
data
<
float
>
(),
sizeof
(
float
)
*
Out_gpu
.
numel
(),
CopySync
<
TARGET
(
kCUDA
)
>
(
out_cpu_
.
mutable_data
<
float
>
(),
out_gpu_
.
data
<
float
>
(),
sizeof
(
float
)
*
out_gpu_
.
numel
(),
IoDirection
::
DtoH
);
for
(
int
i
=
0
;
i
<
Out_gpu
.
numel
();
++
i
)
{
EXPECT_NEAR
(
Out_cpu
.
data
<
float
>
()[
i
],
Out_ref
.
data
<
float
>
()[
i
],
1e-5
);
for
(
int
i
=
0
;
i
<
out_gpu_
.
numel
();
++
i
)
{
EXPECT_NEAR
(
out_cpu_
.
data
<
float
>
()[
i
],
out_ref_
.
data
<
float
>
()[
i
],
1e-5
);
}
}
TEST_F
(
TransposeTest
,
TestFP16
)
{
half_data_ini
t
();
InitHalfInpu
t
();
TransposeCompute
<
half
,
PRECISION
(
kFP16
)
>
kernel
;
kernel
.
SetParam
(
param
);
kernel
.
SetContext
(
std
::
move
(
ctx
));
kernel
.
SetParam
(
param
_
);
kernel
.
SetContext
(
std
::
move
(
ctx
_
));
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
kernel
.
Launch
();
...
...
@@ -355,16 +355,16 @@ TEST_F(TransposeTest, TestFP16) {
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
duration
/
FLAGS_repeats
<<
" ms in average."
;
const
half
*
out_gpu_data
=
Out_gpu
.
data
<
half
>
();
half
*
out_cpu_data
=
Out_cpu
.
mutable_data
<
half
>
();
CopySync
<
TARGET
(
kCUDA
)
>
(
out_cpu
_data
,
out_gpu
_data
,
sizeof
(
half
)
*
Out_gpu
.
numel
(),
const
half
*
Out_gpu__data
=
out_gpu_
.
data
<
half
>
();
half
*
Out_cpu__data
=
out_cpu_
.
mutable_data
<
half
>
();
CopySync
<
TARGET
(
kCUDA
)
>
(
Out_cpu_
_data
,
Out_gpu_
_data
,
sizeof
(
half
)
*
out_gpu_
.
numel
(),
IoDirection
::
DtoH
);
for
(
int
i
=
0
;
i
<
Out_cpu
.
numel
();
++
i
)
{
float
res
=
static_cast
<
float
>
(
lite
::
float16
(
out_cpu
_data
[
i
]));
float
ref
=
Out_ref
.
data
<
float
>
()[
i
];
for
(
int
i
=
0
;
i
<
out_cpu_
.
numel
();
++
i
)
{
float
res
=
static_cast
<
float
>
(
lite
::
float16
(
Out_cpu_
_data
[
i
]));
float
ref
=
out_ref_
.
data
<
float
>
()[
i
];
EXPECT_NEAR
(
fabs
(
res
-
ref
)
/
(
ref
+
1e-5
),
0.
,
1e-2
);
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录