Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
86044671
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
86044671
编写于
10月 16, 2018
作者:
L
liuruilong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update conv kernel
上级
e81515d3
变更
17
显示空白变更内容
内联
并排
Showing
17 changed file
with
133 addition
and
92 deletion
+133
-92
CMakeLists.txt
CMakeLists.txt
+0
-1
src/framework/cl/cl_half.cpp
src/framework/cl/cl_half.cpp
+6
-0
src/framework/cl/cl_half.h
src/framework/cl/cl_half.h
+6
-0
src/framework/cl/cl_image.cpp
src/framework/cl/cl_image.cpp
+12
-11
src/framework/cl/cl_image.h
src/framework/cl/cl_image.h
+30
-26
src/framework/executor.cpp
src/framework/executor.cpp
+7
-5
src/framework/operator.cpp
src/framework/operator.cpp
+2
-2
src/operators/kernel/cl/batchnorm_kernel.cpp
src/operators/kernel/cl/batchnorm_kernel.cpp
+4
-2
src/operators/kernel/cl/cl_kernel/conv_kernel.cl
src/operators/kernel/cl/cl_kernel/conv_kernel.cl
+1
-0
src/operators/kernel/cl/conv_add_bn_relu_kernel.cpp
src/operators/kernel/cl/conv_add_bn_relu_kernel.cpp
+8
-4
src/operators/kernel/cl/conv_add_kernel.cpp
src/operators/kernel/cl/conv_add_kernel.cpp
+4
-2
src/operators/kernel/cl/conv_kernel.cpp
src/operators/kernel/cl/conv_kernel.cpp
+13
-1
src/operators/kernel/cl/depthwise_conv_kernel.cpp
src/operators/kernel/cl/depthwise_conv_kernel.cpp
+2
-1
src/operators/kernel/cl/feed_kernel.cpp
src/operators/kernel/cl/feed_kernel.cpp
+1
-1
src/operators/kernel/cl/fetch_kernel.cpp
src/operators/kernel/cl/fetch_kernel.cpp
+34
-33
src/operators/kernel/cl/reshape_kernel.cpp
src/operators/kernel/cl/reshape_kernel.cpp
+2
-2
tools/pre-commit.hooks/clang-format.hook
tools/pre-commit.hooks/clang-format.hook
+1
-1
未找到文件。
CMakeLists.txt
浏览文件 @
86044671
...
...
@@ -16,7 +16,6 @@ file(GLOB_RECURSE PADDLE_MOBILE_CC src/*.cc src/*.cpp src/*.c src/*.mm)
file
(
GLOB_RECURSE PADDLE_MOBILE_H src/*.h
)
include_directories
(
src/
)
if
(
IS_IOS
)
set
(
CMAKE_CXX_FLAGS
"-mfpu=neon -marm -fobjc-abi-version=2 -fobjc-arc -std=gnu++11 -stdlib=libc++ -O3 -s -isysroot
${
CMAKE_OSX_SYSROOT
}
${
CMAKE_CXX_FLAGS
}
"
)
else
()
...
...
src/framework/cl/cl_half.cpp
浏览文件 @
86044671
...
...
@@ -16,6 +16,9 @@ limitations under the License. */
#include "framework/cl/cl_half.h"
namespace
paddle_mobile
{
namespace
framework
{
static
const
uint32_t
mantissatable
[
2048
]
=
{
0x00000000
,
0x33800000
,
0x34000000
,
0x34400000
,
0x34800000
,
0x34a00000
,
0x34c00000
,
0x34e00000
,
0x35000000
,
0x35100000
,
0x35200000
,
0x35300000
,
...
...
@@ -510,3 +513,6 @@ void HalfArray2FloatArray(half_t *h_array, float *f_array, int count) {
f_array
[
i
]
=
Half2Float
(
h_array
[
i
]);
}
}
}
// namespace framework
}
// namespace paddle_mobile
src/framework/cl/cl_half.h
浏览文件 @
86044671
...
...
@@ -15,6 +15,9 @@ limitations under the License. */
#pragma once
#include <cstdint>
namespace
paddle_mobile
{
namespace
framework
{
typedef
uint16_t
half_t
;
half_t
Float2Half
(
float
f
);
...
...
@@ -24,3 +27,6 @@ float Half2Float(half_t h);
void
FloatArray2HalfArray
(
float
*
f_array
,
half_t
*
h_array
,
int
count
);
void
HalfArray2FloatArray
(
half_t
*
h_array
,
float
*
f_array
,
int
count
);
}
// namespace framework
}
// namespace paddle_mobile
src/framework/cl/cl_image.cpp
浏览文件 @
86044671
...
...
@@ -12,7 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "cl_image.h"
#include "framework/cl/cl_image.h"
namespace
paddle_mobile
{
namespace
framework
{
void
CLImageToTensor
(
CLImage
*
cl_image
,
Tensor
*
tensor
,
...
...
@@ -63,7 +64,7 @@ void CLImageToTensor(CLImage *cl_image, Tensor *tensor,
}
if
(
err
!=
CL_SUCCESS
)
{
// TODO: error handling
CL_CHECK_ERRORS
(
err
);
}
}
void
TensorToCLImage
(
const
Tensor
*
tensor
,
CLImage
*
cl_image
,
...
...
@@ -97,7 +98,7 @@ void TensorToCLImage(const Tensor *tensor, CLImage *cl_image,
err
=
clEnqueueReadImage
(
commandQueue
,
image
,
CL_TRUE
,
origin
,
region
,
0
,
0
,
imageData
,
0
,
NULL
,
NULL
);
if
(
err
!=
CL_SUCCESS
)
{
// TODO: error handling
CL_CHECK_ERRORS
(
err
);
}
size_t
i0
=
0
;
for
(
int
n
=
0
;
n
<
N
;
n
++
)
{
...
...
@@ -117,7 +118,7 @@ void TensorToCLImage(const Tensor *tensor, CLImage *cl_image,
}
}
#ifdef PADDLE_MOBILE_DEBUG
Print
&
operator
<<
(
Print
&
printer
,
const
CLImage
&
cl_image
){
Print
&
operator
<<
(
Print
&
printer
,
const
CLImage
&
cl_image
)
{
printer
<<
" dims: "
<<
cl_image
.
dims
()
<<
"
\n
"
;
int
stride
=
cl_image
.
numel
()
/
20
;
stride
=
stride
>
0
?
stride
:
1
;
...
...
@@ -148,8 +149,8 @@ Print &operator<<(Print &printer, const CLImage &cl_image){
cl_mem
image
=
cl_image
.
GetCLImage
();
size_t
origin
[
3
]
=
{
0
,
0
,
0
};
size_t
region
[
3
]
=
{
width
,
height
,
1
};
err
=
clEnqueueReadImage
(
cl_image
.
CommandQueue
(),
image
,
CL_TRUE
,
origin
,
region
,
0
,
0
,
imageData
,
0
,
NULL
,
NULL
);
err
=
clEnqueueReadImage
(
cl_image
.
CommandQueue
(),
image
,
CL_TRUE
,
origin
,
region
,
0
,
0
,
imageData
,
0
,
NULL
,
NULL
);
size_t
i0
=
0
;
for
(
int
n
=
0
;
n
<
N
;
n
++
)
{
for
(
int
c
=
0
;
c
<
C
;
c
++
)
{
...
...
@@ -168,13 +169,13 @@ Print &operator<<(Print &printer, const CLImage &cl_image){
}
if
(
err
!=
CL_SUCCESS
)
{
// TODO: error handling
CL_CHECK_ERRORS
(
err
);
}
for
(
int
i
=
0
;
i
<
cl_image
.
numel
();
i
+=
stride
)
{
printer
<<
data
[
i
]
<<
" "
;
}
return
printer
;
}
}
#endif
}
// namespace framework
}
// namespace paddle_mobile
src/framework/cl/cl_image.h
浏览文件 @
86044671
...
...
@@ -46,27 +46,28 @@ class CLImage {
/*
* need call SetTensorData first
* */
void
InitCLImage
(
cl_context
context
,
cl_command_queue
command_queue
)
{
void
InitCLImage
(
cl_context
context
,
cl_command_queue
command_queue
)
{
if
(
tensor_data_
==
nullptr
)
{
PADDLE_MOBILE_THROW_EXCEPTION
(
" need call SetTensorData first"
);
}
if
(
tensor_dims_
.
size
()
<=
2
)
{
InitCLImage2C
(
context
,
command_queue
,
tensor_data_
,
tensor_dims_
);
InitCLImage2C
(
context
,
command_queue
,
tensor_data_
,
tensor_dims_
);
}
else
{
InitCLImage
(
context
,
command_queue
,
tensor_data_
,
tensor_dims_
);
InitCLImage
(
context
,
command_queue
,
tensor_data_
,
tensor_dims_
);
}
delete
[](
tensor_data_
);
tensor_data_
=
nullptr
;
initialized_
=
true
;
}
void
InitEmptyImage
(
cl_context
context
,
cl_command_queue
command_queue
,
const
DDim
&
dim
)
{
void
InitEmptyImage
(
cl_context
context
,
cl_command_queue
command_queue
,
const
DDim
&
dim
)
{
if
(
tensor_data_
!=
nullptr
)
{
PADDLE_MOBILE_THROW_EXCEPTION
(
" empty image tensor data shouldn't have value"
);
}
DLOG
<<
" init empty image "
;
InitCLImage
(
context
,
command_queue
,
nullptr
,
dim
);
InitCLImage
(
context
,
command_queue
,
nullptr
,
dim
);
initialized_
=
true
;
}
...
...
@@ -93,7 +94,7 @@ class CLImage {
* */
inline
size_t
HeightOfOneBlock
()
const
{
return
height_of_one_block_
;
}
inline
cl_command_queue
CommandQueue
()
const
{
return
command_queue_
;
}
inline
cl_command_queue
CommandQueue
()
const
{
return
command_queue_
;
}
/*
* resize original tensor dim
...
...
@@ -124,7 +125,8 @@ class CLImage {
const
DDim
&
dims
()
const
{
return
tensor_dims_
;
}
private:
void
InitCLImage2C
(
cl_context
context
,
cl_command_queue
command_queue
,
float
*
tensor_data
,
const
DDim
&
dim
)
{
void
InitCLImage2C
(
cl_context
context
,
cl_command_queue
command_queue
,
float
*
tensor_data
,
const
DDim
&
dim
)
{
command_queue_
=
command_queue
;
assert
(
dim
.
size
()
<=
2
);
int
tdim
[
2
]
=
{
1
,
1
};
...
...
@@ -141,14 +143,15 @@ class CLImage {
imageData
.
reset
(
new
half_t
[
width
*
height
*
4
]);
for
(
int
h
=
0
;
h
<
tdim
[
0
];
h
++
)
{
for
(
int
w
=
0
;
w
<
tdim
[
1
];
w
++
)
{
imageData
[(
h
*
width
+
w
/
4
)
*
4
+
(
w
%
4
)]
=
Float2Half
(
tensor_data
[
h
*
tdim
[
1
]
+
w
]);
imageData
[(
h
*
width
+
w
/
4
)
*
4
+
(
w
%
4
)]
=
Float2Half
(
tensor_data
[
h
*
tdim
[
1
]
+
w
]);
}
}
}
InitCLImage
(
context
,
width
,
height
,
imageData
.
get
());
}
void
InitCLImage
(
cl_context
context
,
int
width
,
int
height
,
void
*
data
)
{
void
InitCLImage
(
cl_context
context
,
int
width
,
int
height
,
void
*
data
)
{
cl_image_format
cf
=
{.
image_channel_order
=
CL_RGBA
,
.
image_channel_data_type
=
CL_HALF_FLOAT
};
cl_image_desc
cid
=
{
...
...
@@ -170,14 +173,14 @@ class CLImage {
&
cf
,
// const cl_image_format *image_format
&
cid
,
// const cl_image_desc *image_desc
data
,
// void *host_ptr
&
err
);
&
err
);
if
(
err
!=
CL_SUCCESS
)
{
CL_CHECK_ERRORS
(
err
);
PADDLE_MOBILE_THROW_EXCEPTION
(
" create image 2d error "
);
}
}
void
InitCLImage
(
cl_context
context
,
cl_command_queue
command_queue
,
float
*
tensor_data
,
const
DDim
&
dim
)
{
void
InitCLImage
(
cl_context
context
,
cl_command_queue
command_queue
,
float
*
tensor_data
,
const
DDim
&
dim
)
{
DLOG
<<
" tensor dim: "
<<
dim
;
// NCHW -> [W * (C+3)/4, H * N]
tensor_dims_
=
dim
;
...
...
@@ -207,6 +210,7 @@ class CLImage {
image_width_
=
width
;
image_height_
=
height
;
image_dims_
=
make_ddim
({
image_width_
,
image_height_
});
c_block_
=
W
/
width
;
std
::
unique_ptr
<
half_t
[]
>
imageData
{};
int
count
=
0
;
...
...
src/framework/executor.cpp
浏览文件 @
86044671
...
...
@@ -37,7 +37,7 @@ limitations under the License. */
#include "framework/cl/cl_image.h"
#endif
int
debug_to
=
115
;
int
debug_to
=
3
;
namespace
paddle_mobile
{
namespace
framework
{
...
...
@@ -953,13 +953,14 @@ void Executor<GPU_CL, Precision::FP32>::InitMemory() {
if
(
var_desc
->
Type
()
==
framework
::
VARTYPE_TYPE_LOD_TENSOR
)
{
auto
cl_image
=
var
->
template
GetMutable
<
framework
::
CLImage
>();
cl_context
context
=
program_
.
scope
->
GetCLScpoe
()
->
Context
();
cl_command_queue
command_queue
=
program_
.
scope
->
GetCLScpoe
()
->
CommandQueue
();
cl_command_queue
command_queue
=
program_
.
scope
->
GetCLScpoe
()
->
CommandQueue
();
const
framework
::
TensorDesc
&
desc
=
var_desc
->
Tensor_desc
();
// framework::DDim ddim = framework::make_ddim(desc.Dims());
framework
::
DDim
ddim
=
cl_image
->
dims
();
DLOG
<<
var_desc
->
Name
();
cl_image
->
InitEmptyImage
(
context
,
command_queue
,
ddim
);
cl_image
->
InitEmptyImage
(
context
,
command_queue
,
ddim
);
}
}
}
...
...
@@ -1011,11 +1012,12 @@ void Executor<GPU_CL, Precision::FP32>::InitCombineMemory() {
}
else
{
auto
cl_image
=
var
->
template
GetMutable
<
framework
::
CLImage
>();
cl_context
context
=
program_
.
scope
->
GetCLScpoe
()
->
Context
();
cl_command_queue
command_queue
=
program_
.
scope
->
GetCLScpoe
()
->
CommandQueue
();
cl_command_queue
command_queue
=
program_
.
scope
->
GetCLScpoe
()
->
CommandQueue
();
const
framework
::
TensorDesc
&
desc
=
var_desc
->
Tensor_desc
();
framework
::
DDim
ddim
=
cl_image
->
dims
();
// framework::DDim ddim = framework::make_ddim(desc.Dims());
cl_image
->
InitEmptyImage
(
context
,
command_queue
,
ddim
);
cl_image
->
InitEmptyImage
(
context
,
command_queue
,
ddim
);
}
}
}
...
...
src/framework/operator.cpp
浏览文件 @
86044671
...
...
@@ -73,7 +73,7 @@ void OperatorBase<Dtype>::Run() {
}
else
{
CLImage
*
cl_image
=
vari
->
template
GetMutable
<
framework
::
CLImage
>();
if
(
cl_image
)
{
DLOG
<<
type_
<<
" input- "
<<
key
<<
"="
<<
*
cl_image
;
DLOG
<<
type_
<<
" input- "
<<
key
<<
"="
<<
*
cl_image
;
}
}
...
...
@@ -98,7 +98,7 @@ void OperatorBase<Dtype>::Run() {
}
else
{
CLImage
*
cl_image
=
vari
->
template
GetMutable
<
framework
::
CLImage
>();
if
(
cl_image
)
{
DLOG
<<
type_
<<
" output- "
<<
key
<<
"="
<<*
cl_image
;
DLOG
<<
type_
<<
" output- "
<<
key
<<
"="
<<
*
cl_image
;
}
}
...
...
src/operators/kernel/cl/batchnorm_kernel.cpp
浏览文件 @
86044671
...
...
@@ -49,11 +49,13 @@ bool BatchNormKernel<GPU_CL, float>::Init(BatchNormParam<GPU_CL> *param) {
framework
::
CLImage
*
new_scale
=
new
framework
::
CLImage
();
new_scale
->
SetTensorData
(
new_scale_ptr
,
variance
->
dims
());
new_scale
->
InitCLImage
(
this
->
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
new_scale
->
InitCLImage
(
this
->
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
framework
::
CLImage
*
new_bias
=
new
framework
::
CLImage
();
new_bias
->
SetTensorData
(
new_bias_ptr
,
variance
->
dims
());
new_bias
->
InitCLImage
(
this
->
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
new_bias
->
InitCLImage
(
this
->
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
param
->
SetNewScale
(
new_scale
);
param
->
SetNewBias
(
new_bias
);
...
...
src/operators/kernel/cl/cl_kernel/conv_kernel.cl
浏览文件 @
86044671
...
...
@@ -19,6 +19,7 @@ __kernel void conv_3x3(__private const int global_size_dim0,
__private
const
int
global_size_dim2,
__read_only
image2d_t
input_image,
__read_only
image2d_t
filter,
#
ifdef
BIASE
__read_only
image2d_t
bias,
#
endif
...
...
src/operators/kernel/cl/conv_add_bn_relu_kernel.cpp
浏览文件 @
86044671
...
...
@@ -29,8 +29,10 @@ bool ConvAddBNReluKernel<GPU_CL, float>::Init(
param
->
Paddings
()[
0
]
==
param
->
Paddings
()[
1
],
"need equal"
);
param
->
Filter
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
cl_helper_
.
CLCommandQueue
());
param
->
Bias
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
cl_helper_
.
CLCommandQueue
());
param
->
Filter
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
cl_helper_
.
CLCommandQueue
());
param
->
Bias
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
cl_helper_
.
CLCommandQueue
());
// const CL *mean = param->InputMean();
const
framework
::
CLImage
*
mean
=
param
->
InputMean
();
...
...
@@ -62,12 +64,14 @@ bool ConvAddBNReluKernel<GPU_CL, float>::Init(
framework
::
CLImage
*
new_scale
=
new
framework
::
CLImage
();
new_scale
->
SetTensorData
(
new_scale_ptr
,
variance
->
dims
());
new_scale
->
InitCLImage
(
this
->
cl_helper_
.
CLContext
(),
cl_helper_
.
CLCommandQueue
());
new_scale
->
InitCLImage
(
this
->
cl_helper_
.
CLContext
(),
cl_helper_
.
CLCommandQueue
());
framework
::
CLImage
*
new_bias
=
new
framework
::
CLImage
();
new_bias
->
SetTensorData
(
new_bias_ptr
,
variance
->
dims
());
new_bias
->
InitCLImage
(
this
->
cl_helper_
.
CLContext
(),
cl_helper_
.
CLCommandQueue
());
new_bias
->
InitCLImage
(
this
->
cl_helper_
.
CLContext
(),
cl_helper_
.
CLCommandQueue
());
param
->
SetNewScale
(
new_scale
);
param
->
SetNewBias
(
new_bias
);
...
...
src/operators/kernel/cl/conv_add_kernel.cpp
浏览文件 @
86044671
...
...
@@ -25,8 +25,10 @@ bool ConvAddKernel<GPU_CL, float>::Init(FusionConvAddParam<GPU_CL> *param) {
param
->
Filter
()
->
dims
()[
2
]
==
param
->
Filter
()
->
dims
()[
3
]
&&
param
->
Paddings
()[
0
]
==
param
->
Paddings
()[
1
],
"need equal"
);
param
->
Filter
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
param
->
Bias
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
param
->
Filter
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
param
->
Bias
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
int
offset
=
static_cast
<
int
>
(
param
->
Filter
()
->
dims
()[
2
])
/
2
-
static_cast
<
int
>
(
param
->
Paddings
()[
1
]);
...
...
src/operators/kernel/cl/conv_kernel.cpp
浏览文件 @
86044671
...
...
@@ -26,7 +26,8 @@ bool ConvKernel<GPU_CL, float>::Init(ConvParam<GPU_CL> *param) {
param
->
Paddings
()[
0
]
==
param
->
Paddings
()[
1
],
"need equal"
);
param
->
Filter
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
param
->
Filter
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
int
offset
=
static_cast
<
int
>
(
param
->
Filter
()
->
dims
()[
2
])
/
2
-
static_cast
<
int
>
(
param
->
Paddings
()[
1
]);
...
...
@@ -95,6 +96,17 @@ void ConvKernel<GPU_CL, float>::Compute(const ConvParam<GPU_CL> ¶m) {
cl_int
status
;
DLOG
<<
" begin set kernel arg "
;
DLOG
<<
" c block "
<<
c_block
;
DLOG
<<
" w "
<<
w
;
DLOG
<<
" nh "
<<
nh
;
DLOG
<<
" stride "
<<
stride
;
DLOG
<<
" offset "
<<
offset
;
DLOG
<<
" input_c "
<<
input_c
;
DLOG
<<
" dilation "
<<
dilation
;
DLOG
<<
" input width "
<<
input_width
;
DLOG
<<
" input height "
<<
input_height
;
DLOG
<<
" output width "
<<
output_width
;
DLOG
<<
" output height "
<<
output_height
;
status
=
clSetKernelArg
(
kernel
,
0
,
sizeof
(
int
),
&
c_block
);
CL_CHECK_ERRORS
(
status
);
...
...
src/operators/kernel/cl/depthwise_conv_kernel.cpp
浏览文件 @
86044671
...
...
@@ -27,7 +27,8 @@ bool DepthwiseConvKernel<GPU_CL, float>::Init(ConvParam<GPU_CL> *param) {
param
->
Filter
()
->
dims
()[
2
]
==
param
->
Filter
()
->
dims
()[
3
]
&&
param
->
Paddings
()[
0
]
==
param
->
Paddings
()[
1
],
"need equal"
);
param
->
Filter
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
param
->
Filter
()
->
InitCLImage
(
cl_helper_
.
CLContext
(),
this
->
cl_helper_
.
CLCommandQueue
());
int
offset
=
static_cast
<
int
>
(
param
->
Filter
()
->
dims
()[
2
])
/
2
-
static_cast
<
int
>
(
param
->
Paddings
()[
1
]);
param
->
SetOffset
(
offset
);
...
...
src/operators/kernel/cl/feed_kernel.cpp
浏览文件 @
86044671
...
...
@@ -30,7 +30,7 @@ void FeedKernel<GPU_CL, float>::Compute(const FeedParam<GPU_CL> ¶m) {
cl_int
status
;
auto
output
=
param
.
Out
();
const
Tensor
*
input
=
param
.
InputX
();
DLOG
<<
*
input
;
DLOG
<<
*
input
;
const
float
*
input_data
=
input
->
data
<
float
>
();
int
numel
=
input
->
numel
();
cl_mem
cl_image
=
output
->
GetCLImage
();
...
...
src/operators/kernel/cl/fetch_kernel.cpp
浏览文件 @
86044671
...
...
@@ -19,44 +19,45 @@ namespace operators {
template
<
>
bool
FetchKernel
<
GPU_CL
,
float
>::
Init
(
FetchParam
<
GPU_CL
>
*
param
)
{
// this->cl_helper_.AddKernel("fetch", "fetch_kernel.cl");
// this->cl_helper_.AddKernel("fetch", "fetch_kernel.cl");
return
true
;
}
template
<
>
void
FetchKernel
<
GPU_CL
,
float
>::
Compute
(
const
FetchParam
<
GPU_CL
>
&
param
)
{
// auto kernel = this->cl_helper_.KernelAt(0);
// auto default_work_size = this->cl_helper_.DefaultWorkSize(*param.InputX());
//
// auto input = param.InputX()->GetCLImage();
// auto *out = param.Out();
//
// const auto &dims = param.InputX()->dims();
// const int N = dims[0];
// const int C = dims[1];
// const int in_height = dims[2];
// const int in_width = dims[3];
//
// int size_ch = in_height * in_width;
// int size_block = size_ch * 4;
// int size_batch = size_ch * C;
//
// // need create outputBuffer
// cl_image_format imageFormat;
// imageFormat.image_channel_order = CL_RGBA;
// imageFormat.image_channel_data_type = CL_FLOAT;
// cl_mem outputBuffer;
//
// clSetKernelArg(kernel, 0, sizeof(int), &in_height);
// clSetKernelArg(kernel, 1, sizeof(int), &in_width);
// clSetKernelArg(kernel, 2, sizeof(int), &size_ch);
// clSetKernelArg(kernel, 3, sizeof(int), &size_block);
// clSetKernelArg(kernel, 4, sizeof(int), &size_batch);
// clSetKernelArg(kernel, 5, sizeof(cl_mem), &input);
// clSetKernelArg(kernel, 6, sizeof(cl_mem), &outputBuffer);
//
// clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 3, NULL,
// default_work_size.data(), NULL, 0, NULL, NULL);
// auto kernel = this->cl_helper_.KernelAt(0);
// auto default_work_size =
// this->cl_helper_.DefaultWorkSize(*param.InputX());
//
// auto input = param.InputX()->GetCLImage();
// auto *out = param.Out();
//
// const auto &dims = param.InputX()->dims();
// const int N = dims[0];
// const int C = dims[1];
// const int in_height = dims[2];
// const int in_width = dims[3];
//
// int size_ch = in_height * in_width;
// int size_block = size_ch * 4;
// int size_batch = size_ch * C;
//
// // need create outputBuffer
// cl_image_format imageFormat;
// imageFormat.image_channel_order = CL_RGBA;
// imageFormat.image_channel_data_type = CL_FLOAT;
// cl_mem outputBuffer;
//
// clSetKernelArg(kernel, 0, sizeof(int), &in_height);
// clSetKernelArg(kernel, 1, sizeof(int), &in_width);
// clSetKernelArg(kernel, 2, sizeof(int), &size_ch);
// clSetKernelArg(kernel, 3, sizeof(int), &size_block);
// clSetKernelArg(kernel, 4, sizeof(int), &size_batch);
// clSetKernelArg(kernel, 5, sizeof(cl_mem), &input);
// clSetKernelArg(kernel, 6, sizeof(cl_mem), &outputBuffer);
//
// clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 3, NULL,
// default_work_size.data(), NULL, 0, NULL, NULL);
}
template
class
FetchKernel
<
GPU_CL
,
float
>;
...
...
src/operators/kernel/cl/reshape_kernel.cpp
浏览文件 @
86044671
...
...
@@ -37,10 +37,10 @@ void ReshapeKernel<GPU_CL, float>::Compute(const ReshapeParam<GPU_CL> ¶m) {
int
dims
[
4
]
=
{
1
,
1
,
1
,
1
};
int
odims
[
4
]
=
{
1
,
1
,
1
,
1
};
for
(
int
i
=
0
;
i
<
inputDim
.
size
();
i
++
)
{
dims
[
4
-
inputDim
.
size
()
+
i
]
=
inputDim
[
i
];
dims
[
4
-
inputDim
.
size
()
+
i
]
=
inputDim
[
i
];
}
for
(
int
i
=
0
;
i
<
outputDim
.
size
();
i
++
)
{
odims
[
4
-
outputDim
.
size
()
+
i
]
=
outputDim
[
i
];
odims
[
4
-
outputDim
.
size
()
+
i
]
=
outputDim
[
i
];
}
clSetKernelArg
(
kernel
,
2
,
sizeof
(
cl_int
),
&
dims
);
clSetKernelArg
(
kernel
,
3
,
sizeof
(
cl_int
),
&
dims
[
1
]);
...
...
tools/pre-commit.hooks/clang-format.hook
浏览文件 @
86044671
...
...
@@ -17,7 +17,7 @@ shift
perl
-i
-pe
's|^\s+#pragma\s+omp|// <TRICKY-CLANG-FORMAT-PRAGMA-FIX> #pragma omp|'
"
$@
"
(
# remove clang format ios_io folder
flist
=
$(
echo
"
$@
"
| perl
-pe
's|src/ios_io/[^ ]*||'
)
flist
=
$(
echo
"
$@
"
| perl
-pe
's|src/io
/io
s_io/[^ ]*||'
)
clang-format
-i
$flist
)
perl
-i
-pe
's|// <TRICKY-CLANG-FORMAT-PRAGMA-FIX> ||'
"
$@
"
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录