diff --git a/mindspore/lite/src/runtime/kernel/opencl/cl/activation.cl b/mindspore/lite/src/runtime/kernel/opencl/cl/activation.cl index 26b65e626fb22970d5c9bdcdb1bd96c3c88b29e0..7698a0228acc8b33180782b7fb0899a15a44893b 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/cl/activation.cl +++ b/mindspore/lite/src/runtime/kernel/opencl/cl/activation.cl @@ -5,63 +5,71 @@ #define MIN(X, Y) (X < Y ? X : Y) __constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; -__kernel void LeakyRelu(__read_only image2d_t input, __write_only image2d_t output, const int4 input_shape, - __global FLT *alpha) { - int C = input_shape.w; // channel size - int Y = get_global_id(0); // height id - int X = get_global_id(1); // weight id - for (int num = 0; num < UP_DIV(C, SLICES); ++num) { - FLT4 in_c4 = READ_IMAGE(input, smp_zero, (int2)(X * UP_DIV(C, SLICES) + num, Y)); // NHWC4: H WC - FLT4 tmp; - tmp.x = in_c4.x > 0.0f ? in_c4.x : in_c4.x * alpha[0]; - tmp.y = in_c4.y > 0.0f ? in_c4.y : in_c4.y * alpha[0]; - tmp.z = in_c4.z > 0.0f ? in_c4.z : in_c4.z * alpha[0]; - tmp.w = in_c4.w > 0.0f ? in_c4.w : in_c4.w * alpha[0]; - WRITE_IMAGE(output, (int2)(X * UP_DIV(C, SLICES) + num, Y), tmp); // NHWC4: H WC - } +__kernel void LeakyRelu_NHWC4(__read_only image2d_t input, __write_only image2d_t output, const int4 img_shape, + __global FLT4 *alpha, const int4 input_shape) { + int Y = get_global_id(0); // H + int X = get_global_id(1); // W C4 + if (X >= img_shape.z || Y >= img_shape.y) return; + int C = X % UP_DIV(input_shape.w, SLICES); + FLT4 in_c4 = READ_IMAGE(input, smp_zero, (int2)(X, Y)); + FLT4 tmp; + tmp.x = in_c4.x > 0.0f ? in_c4.x : in_c4.x * alpha[C].x; + tmp.y = in_c4.y > 0.0f ? in_c4.y : in_c4.y * alpha[C].y; + tmp.z = in_c4.z > 0.0f ? in_c4.z : in_c4.z * alpha[C].z; + tmp.w = in_c4.w > 0.0f ? in_c4.w : in_c4.w * alpha[C].w; + WRITE_IMAGE(output, (int2)(X, Y), tmp); +} + +__kernel void LeakyRelu_NC4HW4(__read_only image2d_t input, __write_only image2d_t output, const int4 img_shape, + __global FLT4 *alpha, const int4 input_shape) { + int Y = get_global_id(0); // C4 H + int X = get_global_id(1); // W + if (X >= img_shape.z || Y >= img_shape.y) return; + int C = Y / input_shape.y; + FLT4 in_c4 = READ_IMAGE(input, smp_zero, (int2)(X, Y)); + FLT4 tmp; + tmp.x = in_c4.x > 0.0f ? in_c4.x : in_c4.x * alpha[C].x; + tmp.y = in_c4.y > 0.0f ? in_c4.y : in_c4.y * alpha[C].y; + tmp.z = in_c4.z > 0.0f ? in_c4.z : in_c4.z * alpha[C].z; + tmp.w = in_c4.w > 0.0f ? in_c4.w : in_c4.w * alpha[C].w; + WRITE_IMAGE(output, (int2)(X, Y), tmp); } __kernel void Relu(__read_only image2d_t input, __write_only image2d_t output, const int4 input_shape) { - int C = input_shape.w; // channel size - int Y = get_global_id(0); // height id - int X = get_global_id(1); // weight id - for (int num = 0; num < UP_DIV(C, SLICES); ++num) { - FLT4 in_c4 = READ_IMAGE(input, smp_zero, (int2)(X * UP_DIV(C, SLICES) + num, Y)); // NHWC4: H WC - FLT4 tmp; - tmp.x = in_c4.x > 0.0f ? in_c4.x : 0.0f; - tmp.y = in_c4.y > 0.0f ? in_c4.y : 0.0f; - tmp.z = in_c4.z > 0.0f ? in_c4.z : 0.0f; - tmp.w = in_c4.w > 0.0f ? in_c4.w : 0.0f; - WRITE_IMAGE(output, (int2)(X * UP_DIV(C, SLICES) + num, Y), tmp); // NHWC4: H WC - } + int Y = get_global_id(0); + int X = get_global_id(1); + if (X >= input_shape.z || Y >= input_shape.y) return; + FLT4 in_c4 = READ_IMAGE(input, smp_zero, (int2)(X, Y)); + FLT4 tmp; + tmp.x = in_c4.x > 0.0f ? in_c4.x : 0.0f; + tmp.y = in_c4.y > 0.0f ? in_c4.y : 0.0f; + tmp.z = in_c4.z > 0.0f ? in_c4.z : 0.0f; + tmp.w = in_c4.w > 0.0f ? in_c4.w : 0.0f; + WRITE_IMAGE(output, (int2)(X, Y), tmp); } __kernel void Relu6(__read_only image2d_t input, __write_only image2d_t output, const int4 input_shape) { - int C = input_shape.w; // channel size - int Y = get_global_id(0); // height id - int X = get_global_id(1); // weight id - for (int num = 0; num < UP_DIV(C, SLICES); ++num) { - FLT4 in_c4 = READ_IMAGE(input, smp_zero, (int2)(X * UP_DIV(C, SLICES) + num, Y)); // NHWC4: H WC - FLT4 tmp; - tmp.x = in_c4.x > 0.0f ? MIN(in_c4.x, 6.0f) : 0.0f; - tmp.y = in_c4.y > 0.0f ? MIN(in_c4.y, 6.0f) : 0.0f; - tmp.z = in_c4.z > 0.0f ? MIN(in_c4.z, 6.0f) : 0.0f; - tmp.w = in_c4.w > 0.0f ? MIN(in_c4.w, 6.0f) : 0.0f; - WRITE_IMAGE(output, (int2)(X * UP_DIV(C, SLICES) + num, Y), tmp); // NHWC4: H WC - } + int Y = get_global_id(0); + int X = get_global_id(1); + if (X >= input_shape.z || Y >= input_shape.y) return; + FLT4 in_c4 = READ_IMAGE(input, smp_zero, (int2)(X, Y)); + FLT4 tmp; + tmp.x = in_c4.x > 0.0f ? MIN(in_c4.x, 6.0f) : 0.0f; + tmp.y = in_c4.y > 0.0f ? MIN(in_c4.y, 6.0f) : 0.0f; + tmp.z = in_c4.z > 0.0f ? MIN(in_c4.z, 6.0f) : 0.0f; + tmp.w = in_c4.w > 0.0f ? MIN(in_c4.w, 6.0f) : 0.0f; + WRITE_IMAGE(output, (int2)(X, Y), tmp); } __kernel void Sigmoid(__read_only image2d_t input, __write_only image2d_t output, const int4 input_shape) { - int C = input_shape.w; // channel size - int Y = get_global_id(0); // height id - int X = get_global_id(1); // weight id - for (int num = 0; num < UP_DIV(C, SLICES); ++num) { - FLT4 in_c4 = READ_IMAGE(input, smp_zero, (int2)(X * UP_DIV(C, SLICES) + num, Y)); // NHWC4: H WC - FLT4 tmp; - tmp.x = 1.0f / (1.0f + exp(-in_c4.x)); - tmp.y = 1.0f / (1.0f + exp(-in_c4.y)); - tmp.z = 1.0f / (1.0f + exp(-in_c4.z)); - tmp.w = 1.0f / (1.0f + exp(-in_c4.w)); - WRITE_IMAGE(output, (int2)(X * UP_DIV(C, SLICES) + num, Y), tmp); // NHWC4: H WC - } + int Y = get_global_id(0); + int X = get_global_id(1); + if (X >= input_shape.z || Y >= input_shape.y) return; + FLT4 in_c4 = READ_IMAGE(input, smp_zero, (int2)(X, Y)); + FLT4 tmp; + tmp.x = 1.0f / (1.0f + exp(-in_c4.x)); + tmp.y = 1.0f / (1.0f + exp(-in_c4.y)); + tmp.z = 1.0f / (1.0f + exp(-in_c4.z)); + tmp.w = 1.0f / (1.0f + exp(-in_c4.w)); + WRITE_IMAGE(output, (int2)(X, Y), tmp); } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc index 19685cec4f1d72f9c5fb39738be755f7c07f7b83..4030148870f37d572776838e96064df24fb09bdd 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc @@ -24,6 +24,7 @@ #include "src/kernel_registry.h" #include "src/runtime/runtime_api.h" #include "include/errorcode.h" +#include "nnacl/fp32/common_func.h" #include "src/runtime/kernel/opencl/cl/activation.cl.inc" using mindspore::kernel::KERNEL_ARCH::kGPU; @@ -40,14 +41,41 @@ namespace mindspore::kernel { void ActivationOpenClKernel::InitBuffer() { auto allocator = lite::opencl::OpenCLRuntime::GetInstance()->GetAllocator(); - alpha_buff_ = allocator->Malloc(fp_size); + int elem_num = UP_ROUND(nhwc_shape_[3], C4NUM); + alpha_buff_ = allocator->Malloc(elem_num * fp_size); alpha_buff_ = allocator->MapBuffer(alpha_buff_, CL_MAP_WRITE, nullptr, true); - memset(alpha_buff_, 0x00, fp_size); - if (enable_fp16_) { - auto fp16 = (int16_t)alpha_; - memcpy(alpha_buff_, &fp16, fp_size); + memset(alpha_buff_, 0x00, elem_num * fp_size); + if (in_tensors_.size() == 1) { + if (enable_fp16_) { + uint16_t alpha_fp16 = Float32ToShort(alpha_); + auto alpha_buff_fp16 = reinterpret_cast(alpha_buff_); + for (int i = 0; i < nhwc_shape_[3]; i++) { + alpha_buff_fp16[i] = alpha_fp16; + } + } else { + auto alpha_buff_fp16 = reinterpret_cast(alpha_buff_); + for (int i = 0; i < nhwc_shape_[3]; i++) { + alpha_buff_fp16[i] = alpha_; + } + } } else { - memcpy(alpha_buff_, &alpha_, fp_size); + if (enable_fp16_) { + if (in_tensors_[1]->data_type() == kNumberTypeFloat32) { + auto alpha_buff_fp16 = reinterpret_cast(alpha_buff_); + for (int i = 0; i < nhwc_shape_[3]; i++) { + alpha_buff_fp16[i] = Float32ToShort(reinterpret_cast(in_tensors_[0]->Data())[i]); + } + } else { + memcpy(alpha_buff_, in_tensors_[0]->Data(), nhwc_shape_[3] * fp_size); + } + } else { + if (in_tensors_[1]->data_type() == kNumberTypeFloat16) { + MS_LOG(WARNING) << "fp16 model run in fp32 mode not support."; + memcpy(alpha_buff_, in_tensors_[0]->Data(), nhwc_shape_[3] * fp_size); + } else { + memcpy(alpha_buff_, in_tensors_[0]->Data(), nhwc_shape_[3] * fp_size); + } + } } allocator->UnmapBuffer(alpha_buff_); } @@ -55,6 +83,18 @@ void ActivationOpenClKernel::InitBuffer() { int ActivationOpenClKernel::Init() { in_size_ = in_tensors_[0]->shape().size(); out_size_ = out_tensors_[0]->shape().size(); + size_t n, h, w, c; + if (in_size_ == 2) { + n = in_tensors_[0]->shape()[0]; + c = in_tensors_[0]->shape()[1]; + h = w = 1; + } else { + n = in_tensors_[0]->shape()[0]; + h = in_tensors_[0]->shape()[1]; + w = in_tensors_[0]->shape()[2]; + c = in_tensors_[0]->shape()[3]; + } + nhwc_shape_ = {n, h, w, c}; auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); enable_fp16_ = ocl_runtime->GetFp16Enable(); fp_size = enable_fp16_ ? sizeof(uint16_t) : sizeof(float); @@ -62,7 +102,9 @@ int ActivationOpenClKernel::Init() { MS_LOG(ERROR) << "Activate fun only support dim=4 or 2, but your dim=" << in_size_; return RET_ERROR; } - InitBuffer(); + if (type_ == ActivationType_LEAKY_RELU) { + InitBuffer(); + } std::map> Program_Kernel{ {ActivationType_LEAKY_RELU, std::vector{"LEAKY_RELU", "LeakyRelu"}}, {ActivationType_RELU, std::vector{"RELU", "Relu"}}, @@ -76,7 +118,11 @@ int ActivationOpenClKernel::Init() { std::string source = activation_source; std::set build_options; ocl_runtime->LoadSource(Program_Kernel[type_][0], source); - ocl_runtime->BuildKernel(kernel_, Program_Kernel[type_][0], Program_Kernel[type_][1], build_options); + std::string kernel_name = Program_Kernel[type_][1]; + if (type_ == ActivationType_LEAKY_RELU) { + kernel_name += "_" + std::string(EnumNameFormat(op_format_)); + } + ocl_runtime->BuildKernel(kernel_, Program_Kernel[type_][0], kernel_name, build_options); in_ori_format_ = in_tensors_[0]->GetFormat(); out_ori_format_ = out_tensors_[0]->GetFormat(); in_tensors_[0]->SetFormat(op_format_); @@ -95,8 +141,11 @@ int ActivationOpenClKernel::Run() { ocl_runtime->SetKernelArg(kernel_, arg_idx++, img2d_shape); if (type_ == ActivationType_LEAKY_RELU) { ocl_runtime->SetKernelArg(kernel_, arg_idx++, alpha_buff_, lite::opencl::MemType::BUF); + cl_int4 input_shape = {static_cast(nhwc_shape_[0]), static_cast(nhwc_shape_[1]), + static_cast(nhwc_shape_[2]), static_cast(nhwc_shape_[3])}; + ocl_runtime->SetKernelArg(kernel_, arg_idx++, input_shape); } - std::vector local = {1, 1}; + std::vector local = {}; std::vector global = {static_cast(img2d_shape.s[1]), static_cast(img2d_shape.s[2])}; auto ret = ocl_runtime->RunKernel(kernel_, global, local, nullptr); if (ret != RET_OK) { @@ -107,17 +156,15 @@ int ActivationOpenClKernel::Run() { } cl_int4 ActivationOpenClKernel::GetImg2dShape() { - cl_int4 img2d_shape = {0, 0, 0, 0}; - for (int i = 0; i < in_size_; ++i) { - img2d_shape.s[i + 4 - in_size_] = in_tensors_[0]->shape()[i]; - } - if (op_format_ == schema::Format_NC4) { - img2d_shape.s[1] = img2d_shape.s[2]; - img2d_shape.s[2] = UP_DIV(img2d_shape.s[3], C4NUM); + cl_int4 img2d_shape = {1, 1, 1, 1}; + if (op_format_ == schema::Format_NHWC4) { + img2d_shape.s[1] = nhwc_shape_[1]; + img2d_shape.s[2] = nhwc_shape_[2] * UP_DIV(nhwc_shape_[3], C4NUM); img2d_shape.s[3] = C4NUM; } if (op_format_ == schema::Format_NC4HW4) { - img2d_shape.s[1] = UP_DIV(img2d_shape.s[3], C4NUM) * img2d_shape.s[1]; // UP(c / 4) * H + img2d_shape.s[1] = UP_DIV(nhwc_shape_[3], C4NUM) * nhwc_shape_[1]; + img2d_shape.s[2] = nhwc_shape_[2]; img2d_shape.s[3] = C4NUM; } return img2d_shape; @@ -130,7 +177,7 @@ int ActivationOpenClKernel::GetImageSize(size_t idx, std::vector *img_si img_dtype = CL_HALF_FLOAT; } img_size->clear(); - img_size->push_back(img_shape.s[2] * UP_DIV(img_shape.s[3], C4NUM)); + img_size->push_back(img_shape.s[2]); img_size->push_back(img_shape.s[1]); img_size->push_back(img_dtype); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h index 039a4a419cbb434b948c1c202514bc8977282bd7..72ed7a0f8e6b57102e978f5ce89a99a45fee1f16 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h @@ -50,6 +50,7 @@ class ActivationOpenClKernel : public OpenCLKernel { int out_size_; size_t fp_size; bool enable_fp16_{false}; + std::vector nhwc_shape_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/test/models_tflite_gpu.cfg b/mindspore/lite/test/models_tflite_gpu.cfg index ac008b9b1829e4e986f8f35d0d83e4ebf8f467cf..39becae923b3fe0b6cf0b06695430ea07fdbf76e 100644 --- a/mindspore/lite/test/models_tflite_gpu.cfg +++ b/mindspore/lite/test/models_tflite_gpu.cfg @@ -3,3 +3,4 @@ mobilenet_v2_1.0_224.tflite resnet.tflite hiai_cn_recognize_modify_padv2.tflite hiai_cv_focusShootOCRModel_08.tflite +hiai_model_normalize_object_scene_ps_20200519.tflite