提交 f15275d1 编写于 作者: Y yangfei

imp add op kernel

上级 25ce72b7
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
__kernel void channel_add(__global image2d_t input, __global image2d_t bias,__write_only image2d_t outputImage,int w) {
int x = get_global_id(0);
int y = get_global_id(1);
const sampler_t sampler = CLK_NORMALIZED_COORDS_TRUE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
int2 coords;
coords.x = x;
coords.y = y;
int2 coords_bias;
coords_bias.x = x/w;
coords_bias.y = 1;
half4 in = read_imageh(input, sampler, coords);
half4 biase = read_imageh(bias, sampler, coords_bias);
half4 output = in + biase;
write_imageh(outputImage,coords,output);
}
...@@ -11,8 +11,16 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,8 +11,16 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
__kernel void elementwise_add(__global float* in, __global float* out) { __kernel void elementwise_add(__global image2d_t input, __global image2d_t bias,__write_only image2d_t outputImage) {
int num = get_global_id(0); int x = get_global_id(0);
out[num] = in[num] * 0.1 + 102; int y = get_global_id(1);
const sampler_t sampler = CLK_NORMALIZED_COORDS_TRUE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
int2 coords;
coords.x = x;
coords.y = y;
half4 in = read_imageh(input, sampler, coords);
half4 biase = read_imageh(bias, sampler, coords);
half4 output = in + biase;
write_imageh(outputImage,coords,output);
} }
#pragma OPENCL EXTENSION cl_khr_fp16 : enable #pragma OPENCL EXTENSION cl_khr_fp16 : enable
__kernel void feed(__global float *in, __write_only image2d_t outputImage,int h,int w) __kernel void feed(__global float *in, __write_only image2d_t outputImage,int h,int w)
{ {
int j = get_global_id(0); int i = get_global_id(0);
int i = get_global_id(1); int j = get_global_id(1);
half4 pixel; half4 pixel;
pixel.x = convert_half(in[(i * w + j)]); pixel.x = convert_half(in[(i * w + j)]);
pixel.y = convert_half(in[h * w + (i * w + j)]); pixel.y = convert_half(in[h * w + (i * w + j)]);
......
...@@ -22,14 +22,68 @@ namespace operators { ...@@ -22,14 +22,68 @@ namespace operators {
template <> template <>
bool ElementwiseAddKernel<GPU_CL, float>::Init( bool ElementwiseAddKernel<GPU_CL, float>::Init(
ElementwiseAddParam<GPU_CL> *param) { ElementwiseAddParam<GPU_CL> *param) {
// this->cl_helper_.AddKernel("elementwise_add", CLImage *bias = (CLImage*)param->InputY();
// "elementwise_add_kernel.cl"); bias->InitCLImage(cl_helper_.CLContext());
if(bias->dims().size()==4){
this->cl_helper_.AddKernel("elementwise_add", "elementwise_add_kernel.cl");
}else if(param->InputY()->dims().size()==1){
DLOG<<"-----init add-----";
this->cl_helper_.AddKernel("channel_add", "channel_add_kernel.cl");
}else{
DLOG << "error:bias dims is error";
}
return true; return true;
} }
template <> template <>
void ElementwiseAddKernel<GPU_CL, float>::Compute( void ElementwiseAddKernel<GPU_CL, float>::Compute(
const ElementwiseAddParam<GPU_CL> &param) {} const ElementwiseAddParam<GPU_CL> &param) {
auto input = param.InputX();
auto bias = param.InputY();
auto output = param.Out();
cl_int status;
auto kernel = this->cl_helper_.KernelAt(0);
if(bias->dims().size()==4){
cl_mem input_image = input->GetCLImage();
cl_mem bias_image = bias->GetCLImage();
cl_mem output_image = output->GetCLImage();
status = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&input_image);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&bias_image);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&output_image);
CL_CHECK_ERRORS(status);
int width = input->ImageWidth();
int height = input->ImageHeight();
size_t global_work_size[2] = {width, height};
status = clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 2,
NULL, global_work_size, NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status);
}else if(bias->dims().size()==1){
cl_mem input_image = input->GetCLImage();
cl_mem bias_image = bias->GetCLImage();
cl_mem output_image = output->GetCLImage();
int tensor_w = input->dims()[4];
status = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&input_image);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&bias_image);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&output_image);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 3, sizeof(cl_int), (void *)&tensor_w);
CL_CHECK_ERRORS(status);
int width = input->ImageWidth();
int height = input->ImageHeight();
size_t global_work_size[2] = {width, height};
status = clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 2,
NULL, global_work_size, NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status);
}else{
DLOG << "error:bias dims is error";
}
}
template class ElementwiseAddKernel<GPU_CL, float>; template class ElementwiseAddKernel<GPU_CL, float>;
......
...@@ -49,7 +49,7 @@ void FeedKernel<GPU_CL, float>::Compute(const FeedParam<GPU_CL> &param) { ...@@ -49,7 +49,7 @@ void FeedKernel<GPU_CL, float>::Compute(const FeedParam<GPU_CL> &param) {
status = clSetKernelArg(kernel, 3, sizeof(cl_int), (void *)&height); status = clSetKernelArg(kernel, 3, sizeof(cl_int), (void *)&height);
CL_CHECK_ERRORS(status); CL_CHECK_ERRORS(status);
size_t global_work_size[2] = {height, width}; size_t global_work_size[2] = {width, height};
status = clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 2, status = clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 2,
NULL, global_work_size, NULL, 0, NULL, NULL); NULL, global_work_size, NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status); CL_CHECK_ERRORS(status);
...@@ -61,7 +61,6 @@ void FeedKernel<GPU_CL, float>::Compute(const FeedParam<GPU_CL> &param) { ...@@ -61,7 +61,6 @@ void FeedKernel<GPU_CL, float>::Compute(const FeedParam<GPU_CL> &param) {
size_t region[3] = {height, width, 1}; size_t region[3] = {height, width, 1};
clEnqueueReadImage(commandQueue, cl_image, CL_TRUE, origin, region, 0, 0, out, clEnqueueReadImage(commandQueue, cl_image, CL_TRUE, origin, region, 0, 0, out,
0, NULL, NULL); 0, NULL, NULL);
} }
template class FeedKernel<GPU_CL, float>; template class FeedKernel<GPU_CL, float>;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册