提交 5214ff03 编写于 作者: R Ray Liu 提交者: GitHub

Merge pull request #1507 from PaddlePaddle/develop_cl_increase1x1_package

gpu 1x1 optimise close #1506
......@@ -187,7 +187,7 @@ else()
set(NET "default" CACHE STRING "select net type")
endif()
set_property(CACHE NET PROPERTY STRINGS "default" "googlenet" "mobilenet" "yolo" "squeezenet" "FPGA_NET_V1" "FPGA_NET_V2" "NLP")
set_property(CACHE NET PROPERTY STRINGS "default" "googlenet" "mobilenet" "yolo" "squeezenet" "FPGA_NET_V1" "FPGA_NET_V2" "NLP" "op")
include("${CMAKE_CURRENT_LIST_DIR}/tools/op.cmake")
# build library
......
......@@ -27,9 +27,9 @@ bool CLEngine::Init() {
return true;
}
cl_int status;
SetPlatform();
SetClDeviceId();
bool is_setplatform_success = SetPlatform();
bool is_setcldeviceid_success = SetClDeviceId();
is_init_success_ = is_setplatform_success && is_setcldeviceid_success;
initialized_ = true;
return initialized_;
// setClCommandQueue();
......@@ -44,11 +44,14 @@ CLEngine *CLEngine::Instance() {
return &cl_engine_;
}
bool CLEngine::isInitSuccess() { return is_init_success_; }
bool CLEngine::SetPlatform() {
platform_ = NULL; // the chosen platform
cl_uint numPlatforms; // the NO. of platforms
cl_int status = clGetPlatformIDs(0, NULL, &numPlatforms);
if (status != CL_SUCCESS) {
return false;
}
/**For clarity, choose the first available platform. */
if (numPlatforms > 0) {
cl_platform_id *platforms = reinterpret_cast<cl_platform_id *>(
......@@ -56,10 +59,10 @@ bool CLEngine::SetPlatform() {
status = clGetPlatformIDs(numPlatforms, platforms, NULL);
platform_ = platforms[0];
free(platforms);
return true;
} else {
return false;
return status == CL_SUCCESS;
}
return false;
}
bool CLEngine::SetClDeviceId() {
......@@ -67,13 +70,15 @@ bool CLEngine::SetClDeviceId() {
devices_ = NULL;
cl_int status =
clGetDeviceIDs(platform_, CL_DEVICE_TYPE_GPU, 0, NULL, &numDevices);
if (status != CL_SUCCESS) {
return false;
}
if (numDevices > 0) {
devices_ = reinterpret_cast<cl_device_id *>(
malloc(numDevices * sizeof(cl_device_id)));
status = clGetDeviceIDs(platform_, CL_DEVICE_TYPE_GPU, numDevices, devices_,
NULL);
return true;
return status == CL_SUCCESS;
}
return false;
}
......
......@@ -31,7 +31,7 @@ class CLEngine {
static CLEngine *Instance();
bool Init();
bool isInitSuccess();
std::unique_ptr<_cl_context, CLContextDeleter> CreateContext() {
cl_int status;
cl_context c = clCreateContext(NULL, 1, devices_, NULL, NULL, &status);
......@@ -51,6 +51,20 @@ class CLEngine {
return std::move(command_queue_ptr);
}
cl_context getContext() {
if (context_ == nullptr) {
context_ = CreateContext();
}
return context_.get();
}
cl_command_queue getClCommandQueue() {
if (command_queue_ == nullptr) {
command_queue_ = CreateClCommandQueue(getContext());
}
return command_queue_.get();
}
std::unique_ptr<_cl_program, CLProgramDeleter> CreateProgramWith(
cl_context context, std::string file_name) {
FILE *file = fopen(file_name.c_str(), "rb");
......@@ -137,6 +151,11 @@ class CLEngine {
std::string cl_path_;
std::unique_ptr<_cl_program, CLProgramDeleter> program_;
std::unique_ptr<_cl_context, CLContextDeleter> context_ = nullptr;
std::unique_ptr<_cl_command_queue, CLCommQueueDeleter> command_queue_ =
nullptr;
// bool SetClContext();
// bool SetClCommandQueue();
......@@ -144,6 +163,7 @@ class CLEngine {
// bool LoadKernelFromFile(const char *kernel_file);
// bool BuildProgram();
bool is_init_success_ = false;
};
} // namespace framework
......
......@@ -29,12 +29,12 @@ namespace framework {
class CLScope {
public:
CLScope() {
CLEngine *engin = CLEngine::Instance();
context_ = engin->CreateContext();
command_queue_ = engin->CreateClCommandQueue(context_.get());
CLEngine *engine = CLEngine::Instance();
context_ = engine->getContext();
command_queue_ = engine->getClCommandQueue();
}
cl_command_queue CommandQueue() { return command_queue_.get(); }
cl_command_queue CommandQueue() { return command_queue_; }
std::unique_ptr<_cl_kernel, CLKernelDeleter> GetKernel(
const std::string &kernel_name, const std::string &file_name) {
......@@ -49,7 +49,7 @@ class CLScope {
return std::move(kernel);
}
cl_context Context() { return context_.get(); }
cl_context Context() { return context_; }
cl_program Program(const std::string &file_name) {
auto it = programs_.find(file_name);
......@@ -58,7 +58,7 @@ class CLScope {
}
auto program = CLEngine::Instance()->CreateProgramWith(
context_.get(),
context_,
CLEngine::Instance()->GetCLPath() + "/cl_kernel/" + file_name);
DLOG << " --- begin build program -> " << file_name << " --- ";
......@@ -72,8 +72,8 @@ class CLScope {
private:
cl_int status_;
std::unique_ptr<_cl_context, CLContextDeleter> context_;
std::unique_ptr<_cl_command_queue, CLCommQueueDeleter> command_queue_;
cl_context context_;
cl_command_queue command_queue_;
std::unordered_map<std::string,
std::unique_ptr<_cl_program, CLProgramDeleter>>
programs_;
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_MOBILE_CL
#include "io/opencl_interface.h"
#include "framework/cl/cl_engine.h"
#include "framework/cl/cl_scope.h"
namespace paddle_mobile {
cl_context getContext() {
return framework::CLEngine::Instance()->getContext();
}
cl_command_queue getClCommandQueue() {
return framework::CLEngine::Instance()->getClCommandQueue();
}
bool isInitSuccess() {
return framework::CLEngine::Instance()->isInitSuccess();
}
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifdef PADDLE_MOBILE_CL
#include "CL/cl.h"
namespace paddle_mobile {
cl_context getContext();
cl_command_queue getClCommandQueue();
bool isInitSuccess();
#endif
} // namespace paddle_mobile
......@@ -20,6 +20,8 @@ limitations under the License. */
#endif // _OPENMP
#ifdef PADDLE_MOBILE_CL
#include <CL/cl.h>
#include <mutex>
#include "framework/cl/cl_engine.h"
#include "framework/cl/cl_tensor.h"
#endif
#include "operators/math/gemm.h"
......@@ -202,11 +204,15 @@ double PaddleMobile<CPU, float>::GetPredictTime() {
operators::math::Gemm gemm;
auto time1 = paddle_mobile::time();
gemm.Sgemm(m, n, k, static_cast<float>(1), a, lda, b, ldb,
static_cast<float>(0), c, ldc, false,
static_cast<float *>(nullptr));
int times = 4;
for (int j = 0; j < times; ++j) {
gemm.Sgemm(m, n, k, static_cast<float>(1), a, lda, b, ldb,
static_cast<float>(0), c, ldc, false,
static_cast<float *>(nullptr));
}
auto time2 = paddle_mobile::time();
double cost = paddle_mobile::time_diff(time1, time2);
double cost = paddle_mobile::time_diff(time1, time2) / times;
paddle_mobile::memory::Free(a);
paddle_mobile::memory::Free(b);
paddle_mobile::memory::Free(c);
......@@ -282,21 +288,11 @@ void PaddleMobile<Device, T>::SetCLPath(std::string path) {
template <>
double PaddleMobile<GPU_CL, float>::GetPredictTime() {
cl_int status;
cl_uint nPlatform;
clGetPlatformIDs(0, NULL, &nPlatform);
cl_platform_id *listPlatform = reinterpret_cast<cl_platform_id *>(
malloc(nPlatform * sizeof(cl_platform_id)));
clGetPlatformIDs(nPlatform, listPlatform, NULL);
cl_uint nDevice = 0;
clGetDeviceIDs(listPlatform[0], CL_DEVICE_TYPE_GPU, 0, NULL, &nDevice);
cl_device_id *listDevice =
reinterpret_cast<cl_device_id *>(malloc(nDevice * sizeof(cl_device_id)));
clGetDeviceIDs(listPlatform[0], CL_DEVICE_TYPE_GPU, nDevice, listDevice,
NULL);
cl_context context =
clCreateContext(NULL, nDevice, listDevice, NULL, NULL, &status);
cl_command_queue queue =
clCreateCommandQueue(context, listDevice[0], 0, &status);
if (!framework::CLEngine::Instance()->isInitSuccess()) {
return -1;
}
cl_context context = framework::CLEngine::Instance()->getContext();
cl_command_queue queue = framework::CLEngine::Instance()->getClCommandQueue();
int n = 1;
int c = 3;
......@@ -410,7 +406,7 @@ double PaddleMobile<GPU_CL, float>::GetPredictTime() {
CL_CHECK_ERRORS(status);
clFinish(queue);
queue = clCreateCommandQueue(context, listDevice[0], 0, &status);
// queue = clCreateCommandQueue(context, listDevice[0], 0, &status);
path = framework::CLEngine::Instance()->GetCLPath() +
"/cl_kernel/conv_kernel.cl";
......@@ -465,15 +461,18 @@ double PaddleMobile<GPU_CL, float>::GetPredictTime() {
// cl_event wait_event = param.Input()->GetClEvent();
size_t global_work_size2[3] = {8, 224, 224};
auto time1 = paddle_mobile::time();
status = clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size2,
NULL, 0, NULL, NULL);
int times = 10;
for (int i = 0; i < times; ++i) {
status = clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size2,
NULL, 0, NULL, NULL);
}
CL_CHECK_ERRORS(status);
clFinish(queue);
auto time2 = paddle_mobile::time();
paddle_mobile::memory::Free(input);
paddle_mobile::memory::Free(filter);
if (status == CL_SUCCESS) {
return paddle_mobile::time_diff(time1, time2);
return paddle_mobile::time_diff(time1, time2) / times;
} else {
return -1;
}
......
......@@ -66,6 +66,9 @@ REGISTER_OPERATOR_CL(relu, ops::ReluOp);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(sigmoid, ops::SigmoidOp);
#endif
#ifdef PADDLE_MOBILE_CL
REGISTER_OPERATOR_CL(sigmoid, ops::SigmoidOp);
#endif
#endif // SIGMOID_OP
#ifdef TANH_OP
......
......@@ -561,7 +561,858 @@ __kernel void conv_1x1(__private const int global_size_dim0,
write_imageh(output_image, output_pos, output);
}
__kernel void conv_1x1_spl(
__private const int global_size_dim0, __private const int global_size_dim1,
__private const int global_size_dim2, __read_only image2d_t input_image,
__read_only image2d_t filter,
#ifdef BIASE
__read_only image2d_t bias,
#endif
#ifdef BATCH_NORM
__read_only image2d_t new_scale, __read_only image2d_t new_biase,
#endif
__write_only image2d_t output_image, __private const int stride,
__private const int offset, __private const int input_c,
__private const int dilation,
__private const int input_width, /* of one block */
__private const int input_height, /* of one block */
__private const int output_width,
__private const int output_height,
__private const int old_w
) {
const int out_c = get_global_id(0);
const int out_w = get_global_id(1);
const int out_nh = get_global_id(2);
int out_w0 = out_w;
int out_w1 = out_w + global_size_dim1;
int out_w2 = out_w + global_size_dim1 * 2;
int out_w3 = out_w + global_size_dim1 * 3;
// int out_w1 = out_w + global_size_dim1;
// int out_w2 = out_w + global_size_dim1 * 2;
// int out_w3 = out_w + global_size_dim1 * 3;
const sampler_t sampler =
CLK_NORMALIZED_COORDS_TRUE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
int2 stride_xy = (int2)(stride, stride);
int2 ouput_pos_in_one_block0 = (int2)(out_w0, out_nh);
int2 in_pos_in_one_block0 =
ouput_pos_in_one_block0 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block1 = (int2)(out_w1, out_nh);
int2 in_pos_in_one_block1 =
ouput_pos_in_one_block1 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block2 = (int2)(out_w2, out_nh);
int2 in_pos_in_one_block2 =
ouput_pos_in_one_block2 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block3 = (int2)(out_w3, out_nh);
int2 in_pos_in_one_block3 =
ouput_pos_in_one_block3 * stride_xy + (int2)(offset, offset);
#ifdef BIASE
half4 output0= read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output1 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output2 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output3 = read_imageh(bias, sampler, (int2)(out_c, 0));
// half4 output0 = 0.0f;
// half4 output1 = 0.0f;
// half4 output2 = 0.0f;
// half4 output3 = 0.0f;
#else
half4 output0 = 0.0f;
half4 output1 = 0.0f;
half4 output2 = 0.0f;
half4 output3 = 0.0f;
#endif
for (int i = 0; i < input_c; ++i) {
// ------------0---------------
int2 pos_in = (int2)(i * input_width + in_pos_in_one_block0.x, in_pos_in_one_block0.y);
half4 input0 = read_imageh(input_image, sampler, pos_in);
half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 0));
half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 1));
half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 2));
half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 3));
output0 = mad(input0.x, weight0, output0);
output0 = mad(input0.y, weight1, output0);
output0 = mad(input0.z, weight2, output0);
output0 = mad(input0.w, weight3, output0);
// -------------1--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block1.x, in_pos_in_one_block1.y);
half4 input1 = read_imageh(input_image, sampler, pos_in);
//
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output1 = mad(input1.x, weight0, output1);
output1 = mad(input1.y, weight1, output1);
output1 = mad(input1.z, weight2, output1);
output1 = mad(input1.w, weight3, output1);
// -------------2--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block2.x, in_pos_in_one_block2.y);
half4 input2 = read_imageh(input_image, sampler, pos_in);
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output2 = mad(input2.x, weight0, output2);
output2 = mad(input2.y, weight1, output2);
output2 = mad(input2.z, weight2, output2);
output2 = mad(input2.w, weight3, output2);
// -------------3--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block3.x, in_pos_in_one_block3.y);
half4 input3 = read_imageh(input_image, sampler, pos_in);
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output3 = mad(input3.x, weight0, output3);
output3 = mad(input3.y, weight1, output3);
output3 = mad(input3.z, weight2, output3);
output3 = mad(input3.w, weight3, output3);
}
#ifdef BATCH_NORM
output0 = output0 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output1 = output1 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output2 = output2 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output3 = output3 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
#endif
#ifdef RELU
output0 = activation(output0);
output1 = activation(output1);
output2 = activation(output2);
output3 = activation(output3);
#endif
int outpos_main = mul24(out_c , old_w);
int2 output_pos0 = (int2)(outpos_main + out_w0, out_nh);
if (out_w0 < old_w) {
write_imageh(output_image, output_pos0, output0);
}
int2 output_pos1 = (int2)(outpos_main + out_w1, out_nh);
if (out_w1 < old_w){
write_imageh(output_image, output_pos1, output1);
}
int2 output_pos2 = (int2)(outpos_main + out_w2, out_nh);
if (out_w2 < old_w){
write_imageh(output_image, output_pos2, output2);
}
int2 output_pos3 = (int2)(outpos_main + out_w3, out_nh);
if (out_w3 < old_w){
write_imageh(output_image, output_pos3, output3);
}
}
__kernel void conv_1x1_spl2(
__private const int global_size_dim0, __private const int global_size_dim1,
__private const int global_size_dim2, __read_only image2d_t input_image,
__read_only image2d_t filter,
#ifdef BIASE
__read_only image2d_t bias,
#endif
#ifdef BATCH_NORM
__read_only image2d_t new_scale, __read_only image2d_t new_biase,
#endif
__write_only image2d_t output_image, __private const int stride,
__private const int offset, __private const int input_c,
__private const int dilation,
__private const int input_width, /* of one block */
__private const int input_height, /* of one block */
__private const int output_width,
__private const int output_height,
__private const int old_w
) {
const int out_c = get_global_id(0);
const int out_w = get_global_id(1);
const int out_nh = get_global_id(2);
int out_w0 = out_w;
int out_w1 = out_w + global_size_dim1;
int out_w2 = out_w + global_size_dim1 * 2;
int out_w3 = out_w + global_size_dim1 * 3;
int out_w4 = out_w + global_size_dim1 * 4;
int out_w5 = out_w + global_size_dim1 * 5;
int out_w6 = out_w + global_size_dim1 * 6;
int out_w7 = out_w + global_size_dim1 * 7;
// int out_w1 = out_w + global_size_dim1;
// int out_w2 = out_w + global_size_dim1 * 2;
// int out_w3 = out_w + global_size_dim1 * 3;
const sampler_t sampler =
CLK_NORMALIZED_COORDS_TRUE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
int2 stride_xy = (int2)(stride, stride);
int2 ouput_pos_in_one_block0 = (int2)(out_w0, out_nh);
int2 in_pos_in_one_block0 =
ouput_pos_in_one_block0 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block1 = (int2)(out_w1, out_nh);
int2 in_pos_in_one_block1 =
ouput_pos_in_one_block1 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block2 = (int2)(out_w2, out_nh);
int2 in_pos_in_one_block2 =
ouput_pos_in_one_block2 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block3 = (int2)(out_w3, out_nh);
int2 in_pos_in_one_block3 =
ouput_pos_in_one_block3 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block4 = (int2)(out_w4, out_nh);
int2 in_pos_in_one_block4 =
ouput_pos_in_one_block4 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block5 = (int2)(out_w5, out_nh);
int2 in_pos_in_one_block5 =
ouput_pos_in_one_block5 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block6 = (int2)(out_w6, out_nh);
int2 in_pos_in_one_block6 =
ouput_pos_in_one_block6 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block7 = (int2)(out_w7, out_nh);
int2 in_pos_in_one_block7 =
ouput_pos_in_one_block7 * stride_xy + (int2)(offset, offset);
#ifdef BIASE
half4 output0 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output1 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output2 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output3 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output4 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output5 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output6 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output7 = read_imageh(bias, sampler, (int2)(out_c, 0));
// half4 output0 = 0.0f;
// half4 output1 = 0.0f;
// half4 output2 = 0.0f;
// half4 output3 = 0.0f;
#else
half4 output0 = 0.0f;
half4 output1 = 0.0f;
half4 output2 = 0.0f;
half4 output3 = 0.0f;
half4 output4 = 0.0f;
half4 output5 = 0.0f;
half4 output6 = 0.0f;
half4 output7 = 0.0f;
#endif
for (int i = 0; i < input_c; ++i) {
// ------------0---------------
int2 pos_in = (int2)(i * input_width + in_pos_in_one_block0.x, in_pos_in_one_block0.y);
half4 input0 = read_imageh(input_image, sampler, pos_in);
half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 0));
half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 1));
half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 2));
half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 3));
output0 = mad(input0.x, weight0, output0);
output0 = mad(input0.y, weight1, output0);
output0 = mad(input0.z, weight2, output0);
output0 = mad(input0.w, weight3, output0);
// -------------1--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block1.x, in_pos_in_one_block1.y);
half4 input1 = read_imageh(input_image, sampler, pos_in);
//
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output1 = mad(input1.x, weight0, output1);
output1 = mad(input1.y, weight1, output1);
output1 = mad(input1.z, weight2, output1);
output1 = mad(input1.w, weight3, output1);
// -------------2--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block2.x, in_pos_in_one_block2.y);
half4 input2 = read_imageh(input_image, sampler, pos_in);
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output2 = mad(input2.x, weight0, output2);
output2 = mad(input2.y, weight1, output2);
output2 = mad(input2.z, weight2, output2);
output2 = mad(input2.w, weight3, output2);
// -------------3--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block3.x, in_pos_in_one_block3.y);
half4 input3 = read_imageh(input_image, sampler, pos_in);
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output3 = mad(input3.x, weight0, output3);
output3 = mad(input3.y, weight1, output3);
output3 = mad(input3.z, weight2, output3);
output3 = mad(input3.w, weight3, output3);
// -------------4--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block4.x, in_pos_in_one_block4.y);
half4 input4 = read_imageh(input_image, sampler, pos_in);
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output4 = mad(input4.x, weight0, output4);
output4 = mad(input4.y, weight1, output4);
output4 = mad(input4.z, weight2, output4);
output4 = mad(input4.w, weight3, output4);
// -------------5--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block5.x, in_pos_in_one_block5.y);
half4 input5 = read_imageh(input_image, sampler, pos_in);
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output5= mad(input5.x, weight0, output5);
output5 = mad(input5.y, weight1, output5);
output5 = mad(input5.z, weight2, output5);
output5 = mad(input5.w, weight3, output5);
// -------------6--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block6.x, in_pos_in_one_block6.y);
half4 input6 = read_imageh(input_image, sampler, pos_in);
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output6 = mad(input6.x, weight0, output6);
output6 = mad(input6.y, weight1, output6);
output6 = mad(input6.z, weight2, output6);
output6 = mad(input6.w, weight3, output6);
// -------------7--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block7.x, in_pos_in_one_block7.y);
half4 input7 = read_imageh(input_image, sampler, pos_in);
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output7 = mad(input7.x, weight0, output7);
output7 = mad(input7.y, weight1, output7);
output7 = mad(input7.z, weight2, output7);
output7 = mad(input7.w, weight3, output7);
}
#ifdef BATCH_NORM
output0 = output0 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output1 = output1 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output2 = output2 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output3 = output3 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output4 = output4 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output5 = output5 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output6 = output6 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output7 = output7 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
#endif
#ifdef RELU
output0 = activation(output0);
output1 = activation(output1);
output2 = activation(output2);
output3 = activation(output3);
output4 = activation(output4);
output5 = activation(output5);
output6 = activation(output6);
output7 = activation(output7);
#endif
int outpos_main = mul24(out_c , old_w);
int2 output_pos0 = (int2)(outpos_main + out_w0, out_nh);
if (out_w0 < old_w) {
write_imageh(output_image, output_pos0, output0);
}
int2 output_pos1 = (int2)(outpos_main + out_w1, out_nh);
if (out_w1 < old_w){
write_imageh(output_image, output_pos1, output1);
}
int2 output_pos2 = (int2)(outpos_main + out_w2, out_nh);
if (out_w2 < old_w){
write_imageh(output_image, output_pos2, output2);
}
int2 output_pos3 = (int2)(outpos_main + out_w3, out_nh);
if (out_w3 < old_w){
write_imageh(output_image, output_pos3, output3);
}
int2 output_pos4 = (int2)(outpos_main + out_w4, out_nh);
if (out_w4 < old_w){
write_imageh(output_image, output_pos4, output4);
}
int2 output_pos5 = (int2)(outpos_main + out_w5, out_nh);
if (out_w5 < old_w){
write_imageh(output_image, output_pos5, output5);
}
int2 output_pos6 = (int2)(outpos_main + out_w6, out_nh);
if (out_w6 < old_w){
write_imageh(output_image, output_pos6, output6);
}
int2 output_pos7 = (int2)(outpos_main + out_w7, out_nh);
if (out_w7 < old_w){
write_imageh(output_image, output_pos7, output7);
}
}
__kernel void conv_1x1_spl3(
__private const int global_size_dim0, __private const int global_size_dim1,
__private const int global_size_dim2, __read_only image2d_t input_image,
__read_only image2d_t filter,
#ifdef BIASE
__read_only image2d_t bias,
#endif
#ifdef BATCH_NORM
__read_only image2d_t new_scale, __read_only image2d_t new_biase,
#endif
__write_only image2d_t output_image, __private const int stride,
__private const int offset, __private const int input_c,
__private const int dilation,
__private const int input_width, /* of one block */
__private const int input_height, /* of one block */
__private const int output_width,
__private const int output_height,
__private const int old_w
) {
const int out_c = get_global_id(0);
const int out_w = get_global_id(1);
const int out_nh = get_global_id(2);
int out_w0 = out_w;
int out_w1 = out_w + global_size_dim1;
int out_w2 = out_w + global_size_dim1 * 2;
// int out_w3 = out_w + global_size_dim1 * 3;
// int out_w4 = out_w + global_size_dim1 * 4;
// int out_w5 = out_w + global_size_dim1 * 5;
// int out_w6 = out_w + global_size_dim1 * 6;
// int out_w7 = out_w + global_size_dim1 * 7;
// int out_w1 = out_w + global_size_dim1;
// int out_w2 = out_w + global_size_dim1 * 2;
// int out_w3 = out_w + global_size_dim1 * 3;
const sampler_t sampler =
CLK_NORMALIZED_COORDS_TRUE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
int2 stride_xy = (int2)(stride, stride);
int2 ouput_pos_in_one_block0 = (int2)(out_w0, out_nh);
int2 in_pos_in_one_block0 =
ouput_pos_in_one_block0 * stride_xy + (int2)(offset, offset);
int2 ouput_pos_in_one_block1 = (int2)(out_w1, out_nh);
int2 in_pos_in_one_block1 =
ouput_pos_in_one_block1 * stride_xy + (int2)(offset, offset);
// int2 ouput_pos_in_one_block2 = (int2)(out_w2, out_nh);
// int2 in_pos_in_one_block2 =
// ouput_pos_in_one_block2 * stride_xy + (int2)(offset, offset);
//
// int2 ouput_pos_in_one_block3 = (int2)(out_w3, out_nh);
// int2 in_pos_in_one_block3 =
// ouput_pos_in_one_block3 * stride_xy + (int2)(offset, offset);
//
// int2 ouput_pos_in_one_block4 = (int2)(out_w4, out_nh);
// int2 in_pos_in_one_block4 =
// ouput_pos_in_one_block4 * stride_xy + (int2)(offset, offset);
//
// int2 ouput_pos_in_one_block5 = (int2)(out_w5, out_nh);
// int2 in_pos_in_one_block5 =
// ouput_pos_in_one_block5 * stride_xy + (int2)(offset, offset);
//
// int2 ouput_pos_in_one_block6 = (int2)(out_w6, out_nh);
// int2 in_pos_in_one_block6 =
// ouput_pos_in_one_block6 * stride_xy + (int2)(offset, offset);
//
// int2 ouput_pos_in_one_block7 = (int2)(out_w7, out_nh);
// int2 in_pos_in_one_block7 =
// ouput_pos_in_one_block7 * stride_xy + (int2)(offset, offset);
#ifdef BIASE
half4 output0 = read_imageh(bias, sampler, (int2)(out_c, 0));
half4 output1 = read_imageh(bias, sampler, (int2)(out_c, 0));
// half4 output2 = read_imageh(bias, sampler, (int2)(out_c, 0));
// half4 output3 = read_imageh(bias, sampler, (int2)(out_c, 0));
// half4 output4 = read_imageh(bias, sampler, (int2)(out_c, 0));
// half4 output5 = read_imageh(bias, sampler, (int2)(out_c, 0));
// half4 output6 = read_imageh(bias, sampler, (int2)(out_c, 0));
// half4 output7 = read_imageh(bias, sampler, (int2)(out_c, 0));
// half4 output0 = 0.0f;
// half4 output1 = 0.0f;
// half4 output2 = 0.0f;
// half4 output3 = 0.0f;
#else
half4 output0 = 0.0f;
half4 output1 = 0.0f;
// half4 output2 = 0.0f;
// half4 output3 = 0.0f;
// half4 output4 = 0.0f;
// half4 output5 = 0.0f;
// half4 output6 = 0.0f;
// half4 output7 = 0.0f;
#endif
for (int i = 0; i < input_c; ++i) {
// ------------0---------------
int2 pos_in = (int2)(i * input_width + in_pos_in_one_block0.x, in_pos_in_one_block0.y);
half4 input0 = read_imageh(input_image, sampler, pos_in);
half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 0));
half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 1));
half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 2));
half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 3));
output0 = mad(input0.x, weight0, output0);
output0 = mad(input0.y, weight1, output0);
output0 = mad(input0.z, weight2, output0);
output0 = mad(input0.w, weight3, output0);
// -------------1--------------
pos_in = (int2)(i * input_width + in_pos_in_one_block1.x, in_pos_in_one_block1.y);
half4 input1 = read_imageh(input_image, sampler, pos_in);
//
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// * 4 + 3));
output1 = mad(input1.x, weight0, output1);
output1 = mad(input1.y, weight1, output1);
output1 = mad(input1.z, weight2, output1);
output1 = mad(input1.w, weight3, output1);
//
// // -------------2--------------
// pos_in = (int2)(i * input_width + in_pos_in_one_block2.x, in_pos_in_one_block2.y);
// half4 input2 = read_imageh(input_image, sampler, pos_in);
//
// // half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// // 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// // + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// // 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// // * 4 + 3));
//
// output2 = mad(input2.x, weight0, output2);
// output2 = mad(input2.y, weight1, output2);
// output2 = mad(input2.z, weight2, output2);
// output2 = mad(input2.w, weight3, output2);
//
// // -------------3--------------
// pos_in = (int2)(i * input_width + in_pos_in_one_block3.x, in_pos_in_one_block3.y);
// half4 input3 = read_imageh(input_image, sampler, pos_in);
//
// // half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// // 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// // + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// // 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// // * 4 + 3));
//
// output3 = mad(input3.x, weight0, output3);
// output3 = mad(input3.y, weight1, output3);
// output3 = mad(input3.z, weight2, output3);
// output3 = mad(input3.w, weight3, output3);
//
//
// // -------------4--------------
// pos_in = (int2)(i * input_width + in_pos_in_one_block4.x, in_pos_in_one_block4.y);
// half4 input4 = read_imageh(input_image, sampler, pos_in);
//
// // half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// // 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// // + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// // 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// // * 4 + 3));
//
// output4 = mad(input4.x, weight0, output4);
// output4 = mad(input4.y, weight1, output4);
// output4 = mad(input4.z, weight2, output4);
// output4 = mad(input4.w, weight3, output4);
//
//
//
// // -------------5--------------
// pos_in = (int2)(i * input_width + in_pos_in_one_block5.x, in_pos_in_one_block5.y);
// half4 input5 = read_imageh(input_image, sampler, pos_in);
//
// // half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// // 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// // + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// // 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// // * 4 + 3));
//
// output5= mad(input5.x, weight0, output5);
// output5 = mad(input5.y, weight1, output5);
// output5 = mad(input5.z, weight2, output5);
// output5 = mad(input5.w, weight3, output5);
//
//
// // -------------6--------------
// pos_in = (int2)(i * input_width + in_pos_in_one_block6.x, in_pos_in_one_block6.y);
// half4 input6 = read_imageh(input_image, sampler, pos_in);
//
// // half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// // 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// // + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// // 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// // * 4 + 3));
//
// output6 = mad(input6.x, weight0, output6);
// output6 = mad(input6.y, weight1, output6);
// output6 = mad(input6.z, weight2, output6);
// output6 = mad(input6.w, weight3, output6);
//
//
// // -------------7--------------
// pos_in = (int2)(i * input_width + in_pos_in_one_block7.x, in_pos_in_one_block7.y);
// half4 input7 = read_imageh(input_image, sampler, pos_in);
//
// // half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 +
// // 0)); half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4
// // + 1)); half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i *
// // 4 + 2)); half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i
// // * 4 + 3));
//
// output7 = mad(input7.x, weight0, output7);
// output7 = mad(input7.y, weight1, output7);
// output7 = mad(input7.z, weight2, output7);
// output7 = mad(input7.w, weight3, output7);
}
#ifdef BATCH_NORM
output0 = output0 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
output1 = output1 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
read_imageh(new_biase, sampler, (int2)(out_c, 0));
//
// output2 = output2 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
// read_imageh(new_biase, sampler, (int2)(out_c, 0));
//
// output3 = output3 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
// read_imageh(new_biase, sampler, (int2)(out_c, 0));
//
// output4 = output4 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
// read_imageh(new_biase, sampler, (int2)(out_c, 0));
//
// output5 = output5 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
// read_imageh(new_biase, sampler, (int2)(out_c, 0));
//
// output6 = output6 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
// read_imageh(new_biase, sampler, (int2)(out_c, 0));
//
// output7 = output7 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) +
// read_imageh(new_biase, sampler, (int2)(out_c, 0));
#endif
#ifdef RELU
output0 = activation(output0);
output1 = activation(output1);
// output2 = activation(output2);
// output3 = activation(output3);
// output4 = activation(output4);
// output5 = activation(output5);
// output6 = activation(output6);
// output7 = activation(output7);
#endif
int outpos_main = mul24(out_c , old_w);
int2 output_pos0 = (int2)(outpos_main + out_w0, out_nh);
if (out_w0 < old_w) {
write_imageh(output_image, output_pos0, output0);
}
int2 output_pos1 = (int2)(outpos_main + out_w1, out_nh);
if (out_w1 < old_w){
write_imageh(output_image, output_pos1, output1);
}
//
// int2 output_pos2 = (int2)(outpos_main + out_w2, out_nh);
// if (out_w2 < old_w){
// write_imageh(output_image, output_pos2, output2);
// }
//
// int2 output_pos3 = (int2)(outpos_main + out_w3, out_nh);
// if (out_w3 < old_w){
// write_imageh(output_image, output_pos3, output3);
// }
//
// int2 output_pos4 = (int2)(outpos_main + out_w4, out_nh);
// if (out_w4 < old_w){
// write_imageh(output_image, output_pos4, output4);
// }
//
// int2 output_pos5 = (int2)(outpos_main + out_w5, out_nh);
// if (out_w5 < old_w){
// write_imageh(output_image, output_pos5, output5);
//
// }
// int2 output_pos6 = (int2)(outpos_main + out_w6, out_nh);
// if (out_w6 < old_w){
// write_imageh(output_image, output_pos6, output6);
// }
//
// int2 output_pos7 = (int2)(outpos_main + out_w7, out_nh);
// if (out_w7 < old_w){
// write_imageh(output_image, output_pos7, output7);
// }
}
//__kernel void conv_1x1_c(
// __private const int global_size_dim0,
// __private const int global_size_dim1,
// __private const int global_size_dim2,
// __read_only image2d_t input_image,
// __read_only image2d_t filter,
//#ifdef BIASE
// __read_only image2d_t bias,
//#endif
//#ifdef BATCH_NORM
// __read_only image2d_t new_scale,
// __read_only image2d_t new_biase,
//#endif
// __write_only image2d_t output_image,
// __private const int stride,
// __private const int offset,
// __private const int input_c,
// __private const int dilation,
// __private const int input_width, /* of one block */
// __private const int input_height, /* of one block */
// __private const int output_width,
// __private const int output_height,
// __private const int old_w) {
//
// const int out_c = get_global_id(0);
// const int out_w = get_global_id(1);
// const int out_nh = get_global_id(2);
//
// const sampler_t sampler =
// CLK_NORMALIZED_COORDS_TRUE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
// const int2 stride_xy = (int2)(stride, stride);
//
// for (int i = 0; i < input_c; ++i) {
// half4 weight0 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 0));
// half4 weight1 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 1));
// half4 weight2 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 2));
// half4 weight3 = read_imageh(filter, sampler, (int2)(out_c, i * 4 + 3));
//
//#pragma unroll
// for (int j = 0; j < 4; ++j) {
// int out_w0 = out_w + global_size_dim1 * j;
// int2 ouput_pos_in_one_block0 = (int2)(out_w0, out_nh);
// int2 in_pos_in_one_block0 = ouput_pos_in_one_block0 * stride_xy + (int2)(offset, offset);
//
//#ifdef BIASE
// half4 output0 = read_imageh(bias, sampler, (int2)(out_c, 0));
//#else
// half4 output0 = 0.0f;
//#endif
// int2 pos_in = (int2)(i * input_width + in_pos_in_one_block0.x, in_pos_in_one_block0.y);
// half4 input0 = read_imageh(input_image, sampler, pos_in);
//
// output0 = mad(input0.x, weight0, output0);
// output0 = mad(input0.y, weight1, output0);
// output0 = mad(input0.z, weight2, output0);
// output0 = mad(input0.w, weight3, output0);
//
//#ifdef BATCH_NORM
// output0 = output0 * read_imageh(new_scale, sampler, (int2)(out_c, 0)) + read_imageh(new_biase, sampler, (int2)(out_c, 0));
//#endif
//
//#ifdef RELU
// output0 = activation(output0);
//#endif
// int outpos_main = mul24(out_c, old_w);
// int2 output_pos0 = (int2)(outpos_main + out_w0, out_nh);
//
// if (out_w0 < old_w) {
// write_imageh(output_image, output_pos0, output0);
// }
// }
// }
//}
/*
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
__kernel void sigmoid(__read_only image2d_t input,
__write_only image2d_t output){
const int x = get_global_id(0);
const int y = get_global_id(1);
const sampler_t sampler = CLK_NORMALIZED_COORDS_TRUE |
CLK_ADDRESS_CLAMP |
CLK_FILTER_NEAREST;
half4 in = read_imageh(input, sampler, (int2)(x, y));
in = 1.0f / (1 + exp(-in));
write_imageh(output, (int2)(x, y), in);
}
\ No newline at end of file
......@@ -21,7 +21,7 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
bool optimise = true;
template <>
bool ConvAddBNReluKernel<GPU_CL, float>::Init(
FusionConvAddBNReluParam<GPU_CL> *param) {
......@@ -139,7 +139,12 @@ bool ConvAddBNReluKernel<GPU_CL, float>::Init(
if (param->Filter()->dims()[2] == 1 && param->Filter()->dims()[3] == 1) {
param->Filter()->InitNImage(cl_helper_.CLContext(),
cl_helper_.CLCommandQueue());
this->cl_helper_.AddKernel("conv_1x1", "conv_add_bn_relu_kernel.cl");
if (optimise) {
this->cl_helper_.AddKernel("conv_1x1_spl", "conv_add_bn_relu_kernel.cl");
} else {
this->cl_helper_.AddKernel("conv_1x1", "conv_add_bn_relu_kernel.cl");
}
DLOG << " conv add bn relu conv 1x1";
} else if (param->Filter()->dims()[1] == 1 &&
param->Input()->dims()[1] == param->Output()->dims()[1] &&
......@@ -205,81 +210,186 @@ void ConvAddBNReluKernel<GPU_CL, float>::Compute(
cl_int status;
status = clSetKernelArg(kernel, 0, sizeof(int), &c_block);
CL_CHECK_ERRORS(status);
if (optimise) {
if (param.Filter()->dims()[2] == 1 && param.Filter()->dims()[3] == 1) {
status = clSetKernelArg(kernel, 0, sizeof(int), &c_block);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 1, sizeof(int), &w);
CL_CHECK_ERRORS(status);
int maped_w = maptofactor(w, 4);
status = clSetKernelArg(kernel, 1, sizeof(int), &maped_w);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 2, sizeof(int), &nh);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 2, sizeof(int), &nh);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 3, sizeof(cl_mem), &input);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 3, sizeof(cl_mem), &input);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 4, sizeof(cl_mem), &filter);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 4, sizeof(cl_mem), &filter);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 5, sizeof(cl_mem), &biase);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 5, sizeof(cl_mem), &biase);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 6, sizeof(cl_mem), &new_scale);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 6, sizeof(cl_mem), &new_scale);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 7, sizeof(cl_mem), &new_bias);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 7, sizeof(cl_mem), &new_bias);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 8, sizeof(cl_mem), &output);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 8, sizeof(cl_mem), &output);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 9, sizeof(int), &stride);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 9, sizeof(int), &stride);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 10, sizeof(int), &offset);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 10, sizeof(int), &offset);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 11, sizeof(int), &input_c);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 11, sizeof(int), &input_c);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 12, sizeof(int), &dilation);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 12, sizeof(int), &dilation);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 13, sizeof(int), &input_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 13, sizeof(int), &input_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 14, sizeof(int), &input_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 14, sizeof(int), &input_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 15, sizeof(int), &output_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 15, sizeof(int), &output_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 16, sizeof(int), &output_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 16, sizeof(int), &output_height);
CL_CHECK_ERRORS(status);
// cl_event out_event = param.Output()->GetClEvent();
// cl_event wait_event = param.Input()->GetClEvent();
status = clSetKernelArg(kernel, 17, sizeof(int), &w);
CL_CHECK_ERRORS(status);
/*
if (param.Filter()->dims()[2] == 1 &&
param.Filter()->dims()[3] == 1 &&
param.Filter()->dims()[0] % 16 == 0) {
DLOG << " before modifi work size: " << default_work_size;
const size_t work_size[3] = {
static_cast<const uint32_t>(default_work_size.data()[0]),
static_cast<const uint32_t>(maped_w),
static_cast<const uint32_t>(default_work_size.data()[2])};
default_work_size[0] = default_work_size[0] / 4;
status = clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel,
default_work_size.size(), NULL, work_size,
NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status);
} else {
status = clSetKernelArg(kernel, 0, sizeof(int), &c_block);
CL_CHECK_ERRORS(status);
DLOG << " modification work size: " << default_work_size;
DLOG << " input dims " << param.Input()->dims();
DLOG << " output dims " << param.Output()->dims();
DLOG << " filter dims: " << param.Filter()->dims();
DLOG << " biase dims : " << param.Bias()->dims();
status = clSetKernelArg(kernel, 1, sizeof(int), &w);
CL_CHECK_ERRORS(status);
}
*/
status = clSetKernelArg(kernel, 2, sizeof(int), &nh);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 3, sizeof(cl_mem), &input);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 4, sizeof(cl_mem), &filter);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 5, sizeof(cl_mem), &biase);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 6, sizeof(cl_mem), &new_scale);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 7, sizeof(cl_mem), &new_bias);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 8, sizeof(cl_mem), &output);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 9, sizeof(int), &stride);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 10, sizeof(int), &offset);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 11, sizeof(int), &input_c);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 12, sizeof(int), &dilation);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 13, sizeof(int), &input_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 14, sizeof(int), &input_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 15, sizeof(int), &output_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 16, sizeof(int), &output_height);
CL_CHECK_ERRORS(status);
status = clEnqueueNDRangeKernel(
this->cl_helper_.CLCommandQueue(), kernel, default_work_size.size(), NULL,
default_work_size.data(), NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status);
status = clEnqueueNDRangeKernel(
this->cl_helper_.CLCommandQueue(), kernel, default_work_size.size(),
NULL, default_work_size.data(), NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status);
}
} else {
status = clSetKernelArg(kernel, 0, sizeof(int), &c_block);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 1, sizeof(int), &w);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 2, sizeof(int), &nh);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 3, sizeof(cl_mem), &input);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 4, sizeof(cl_mem), &filter);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 5, sizeof(cl_mem), &biase);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 6, sizeof(cl_mem), &new_scale);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 7, sizeof(cl_mem), &new_bias);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 8, sizeof(cl_mem), &output);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 9, sizeof(int), &stride);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 10, sizeof(int), &offset);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 11, sizeof(int), &input_c);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 12, sizeof(int), &dilation);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 13, sizeof(int), &input_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 14, sizeof(int), &input_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 15, sizeof(int), &output_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 16, sizeof(int), &output_height);
CL_CHECK_ERRORS(status);
status = clEnqueueNDRangeKernel(
this->cl_helper_.CLCommandQueue(), kernel, default_work_size.size(),
NULL, default_work_size.data(), NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status);
}
}
template class ConvAddBNReluKernel<GPU_CL, float>;
......
......@@ -18,6 +18,7 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
bool optimise_convadd = true;
template <>
bool ConvAddKernel<GPU_CL, float>::Init(FusionConvAddParam<GPU_CL> *param) {
......@@ -35,8 +36,11 @@ bool ConvAddKernel<GPU_CL, float>::Init(FusionConvAddParam<GPU_CL> *param) {
if (param->Filter()->dims()[2] == 1 && param->Filter()->dims()[3] == 1) {
param->Filter()->InitNImage(cl_helper_.CLContext(),
cl_helper_.CLCommandQueue());
this->cl_helper_.AddKernel("conv_1x1", "conv_add_kernel.cl");
if (optimise_convadd) {
this->cl_helper_.AddKernel("conv_1x1_spl", "conv_add_kernel.cl");
} else {
this->cl_helper_.AddKernel("conv_1x1", "conv_add_kernel.cl");
}
} else if (param->Filter()->dims()[1] == 1 &&
param->Input()->dims()[1] == param->Output()->dims()[1] &&
param->Filter()->dims()[2] == 3) {
......@@ -95,58 +99,117 @@ void ConvAddKernel<GPU_CL, float>::Compute(
cl_int status;
status = clSetKernelArg(kernel, 0, sizeof(int), &c_block);
CL_CHECK_ERRORS(status);
if (optimise_convadd && param.Filter()->dims()[2] == 1 &&
param.Filter()->dims()[3] == 1) {
status = clSetKernelArg(kernel, 0, sizeof(int), &c_block);
CL_CHECK_ERRORS(status);
int maped_w = maptofactor(w, 4);
status = clSetKernelArg(kernel, 1, sizeof(int), &maped_w);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 2, sizeof(int), &nh);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 3, sizeof(cl_mem), &input);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 4, sizeof(cl_mem), &filter);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 5, sizeof(cl_mem), &biase);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 6, sizeof(cl_mem), &output);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 7, sizeof(int), &stride);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 8, sizeof(int), &offset);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 1, sizeof(int), &w);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 9, sizeof(int), &input_c);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 2, sizeof(int), &nh);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 10, sizeof(int), &dilation);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 3, sizeof(cl_mem), &input);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 11, sizeof(int), &input_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 4, sizeof(cl_mem), &filter);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 12, sizeof(int), &input_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 5, sizeof(cl_mem), &biase);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 13, sizeof(int), &output_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 6, sizeof(cl_mem), &output);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 14, sizeof(int), &output_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 7, sizeof(int), &stride);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 15, sizeof(int), &w);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 8, sizeof(int), &offset);
CL_CHECK_ERRORS(status);
const size_t work_size[3] = {
static_cast<const uint32_t>(default_work_size.data()[0]),
static_cast<const uint32_t>(maped_w),
static_cast<const uint32_t>(default_work_size.data()[2])};
status = clSetKernelArg(kernel, 9, sizeof(int), &input_c);
CL_CHECK_ERRORS(status);
status = clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel,
default_work_size.size(), NULL, work_size,
NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status);
} else {
status = clSetKernelArg(kernel, 0, sizeof(int), &c_block);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 10, sizeof(int), &dilation);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 1, sizeof(int), &w);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 11, sizeof(int), &input_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 2, sizeof(int), &nh);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 12, sizeof(int), &input_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 3, sizeof(cl_mem), &input);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 13, sizeof(int), &output_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 4, sizeof(cl_mem), &filter);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 14, sizeof(int), &output_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 5, sizeof(cl_mem), &biase);
CL_CHECK_ERRORS(status);
// cl_event out_event = param.Output()->GetClEvent();
// cl_event wait_event = param.Input()->GetClEvent();
status = clSetKernelArg(kernel, 6, sizeof(cl_mem), &output);
CL_CHECK_ERRORS(status);
status = clEnqueueNDRangeKernel(
this->cl_helper_.CLCommandQueue(), kernel, default_work_size.size(), NULL,
default_work_size.data(), NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 7, sizeof(int), &stride);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 8, sizeof(int), &offset);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 9, sizeof(int), &input_c);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 10, sizeof(int), &dilation);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 11, sizeof(int), &input_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 12, sizeof(int), &input_height);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 13, sizeof(int), &output_width);
CL_CHECK_ERRORS(status);
status = clSetKernelArg(kernel, 14, sizeof(int), &output_height);
CL_CHECK_ERRORS(status);
status = clEnqueueNDRangeKernel(
this->cl_helper_.CLCommandQueue(), kernel, default_work_size.size(),
NULL, default_work_size.data(), NULL, 0, NULL, NULL);
CL_CHECK_ERRORS(status);
}
}
template class ConvAddKernel<GPU_CL, float>;
......
......@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef RESHAPE_OP
#include "operators/kernel/reshape_kernel.h"
......@@ -102,3 +103,4 @@ template class ReshapeKernel<GPU_CL, float>;
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef SIGMOID_OP
#include "operators/kernel/activation_kernel.h"
namespace paddle_mobile {
namespace operators {
template <>
bool SigmoidKernel<GPU_CL, float>::Init(SigmoidParam<GPU_CL>* param) {
this->cl_helper_.AddKernel("sigmoid", "sigmoid.cl");
return true;
}
template <>
void SigmoidKernel<GPU_CL, float>::Compute(const SigmoidParam<GPU_CL>& param) {
auto kernel = this->cl_helper_.KernelAt(0);
const auto* input = param.InputX();
auto* output = param.Out();
auto default_work_size = this->cl_helper_.DefaultWorkSize(*output);
auto inputImage = input->GetCLImage();
auto outputImage = output->GetCLImage();
clSetKernelArg(kernel, 0, sizeof(cl_mem), &inputImage);
clSetKernelArg(kernel, 1, sizeof(cl_mem), &outputImage);
const size_t work_size[2] = {input->ImageWidth(), input->ImageHeight()};
clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 2, NULL,
work_size, NULL, 0, NULL, NULL);
}
template class SigmoidKernel<GPU_CL, float>;
} // namespace operators
} // namespace paddle_mobile
#endif
......@@ -36,6 +36,9 @@ class ConvAddBNReluKernel
public:
void Compute(const FusionConvAddBNReluParam<DeviceType> &param);
bool Init(FusionConvAddBNReluParam<DeviceType> *param);
inline int maptofactor(int i, int factor) {
return (i + factor - 1) / factor;
}
};
} // namespace operators
......
......@@ -41,6 +41,9 @@ class ConvAddKernel
public:
void Compute(const FusionConvAddParam<DeviceType> &param);
bool Init(FusionConvAddParam<DeviceType> *param);
inline int maptofactor(int i, int factor) {
return (i + factor - 1) / factor;
}
};
} // namespace operators
......
......@@ -44,12 +44,17 @@ template class ConvParam<FPGA>;
template class ConvParam<GPU_MALI>;
#endif
#ifdef ELEMENTWISEADD_OP
template class ElementwiseAddParam<CPU>;
template class ElementwiseAddParam<FPGA>;
template class ElementwiseAddParam<GPU_MALI>;
#endif
#ifdef ELEMENTWISEMUL_OP
template class ElementwiseMulParam<CPU>;
template class ElementwiseMulParam<FPGA>;
template class ElementwiseMulParam<GPU_MALI>;
#endif
#ifdef MUL_OP
template class MulParam<CPU>;
......
......@@ -154,6 +154,15 @@ if (CON GREATER -1)
endif ()
list(FIND NET "op" CON)
if (CON GREATER -1)
# gen test
ADD_EXECUTABLE(test-sigmoid operators/test_sigmoid_op.cpp test_include.h)
target_link_libraries(test-sigmoid paddle-mobile)
set(FOUND_MATCH ON)
endif ()
if (NOT FOUND_MATCH)
# gen test
ADD_EXECUTABLE(test-resnet net/test_resnet.cpp test_helper.h test_include.h executor_for_test.h)
......
......@@ -25,11 +25,11 @@ int main() {
paddle_mobile.SetCLPath("/data/local/tmp/bin");
#endif
auto isok = paddle_mobile.Load(
std::string(g_mobilenet_vision) + "/vision_mobilenet_model",
std::string(g_mobilenet_vision) + "/vision_mobilenet_params", true);
// auto isok = paddle_mobile.Load(
// std::string(g_mobilenet_vision) + "/vision_mobilenet_model",
// std::string(g_mobilenet_vision) + "/vision_mobilenet_params", true);
// auto isok = paddle_mobile.Load(std::string(g_mobilenet), true);
auto isok = paddle_mobile.Load(std::string(g_mobilenet), true);
if (isok) {
auto time2 = paddle_mobile::time();
std::cout << "load cost :" << paddle_mobile::time_diff(time1, time2) << "ms"
......@@ -37,8 +37,7 @@ int main() {
std::vector<float> input;
std::vector<int64_t> dims{1, 3, 224, 224};
GetInput<float>(g_test_image_1x3x224x224_vision_mobilenet_input, &input,
dims);
GetInput<float>(g_test_image_1x3x224x224_banana, &input, dims);
std::vector<float> vec_result = paddle_mobile.Predict(input, dims);
......
#!/usr/bin/env bash
NETS=""
declare -a supportedNets=("googlenet" "mobilenet" "yolo" "squeezenet" "resnet" "mobilenetssd" "nlp" "mobilenetfssd" "genet" "super")
declare -a supportedNets=("googlenet" "mobilenet" "yolo" "squeezenet" "resnet" "mobilenetssd" "nlp" "mobilenetfssd" "genet" "super" "op")
build_for_mac() {
if [ ! `which brew` ]; then
......
......@@ -228,6 +228,12 @@ if (CON GREATER -1)
set(FOUND_MATCH ON)
endif()
list(FIND NET "op" CON)
if (CON GREATER -1)
message("op enabled")
set(SIGMOID_OP ON)
set(FOUND_MATCH ON)
endif()
if(NOT FOUND_MATCH)
message("--default--")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册