提交 6728b40a 编写于 作者: A Alexander Alekhin

Merge pull request #10602 from pengli:dnn

......@@ -12,6 +12,7 @@ Implementation of Batch Normalization layer.
#include "../precomp.hpp"
#include "op_halide.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include "opencl_kernels_dnn.hpp"
namespace cv
{
......@@ -22,7 +23,7 @@ class BatchNormLayerImpl : public BatchNormLayer
{
public:
Mat weights_, bias_;
Mat weightMat, biasMat;
UMat umat_weight, umat_bias;
BatchNormLayerImpl(const LayerParams& params)
{
......@@ -80,6 +81,9 @@ public:
dstWeightsData[i] = w;
dstBiasData[i] = (hasBias ? biasData[i] : 0.0f) - w * meanData[i] * varMeanScale;
}
umat_weight = weights_.getUMat(ACCESS_READ);
umat_bias = bias_.getUMat(ACCESS_READ);
}
void getScaleShift(Mat& scale, Mat& shift) const
......@@ -97,25 +101,6 @@ public:
return true;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
{
if (inputs[0]->dims == 4)
{
int groups = inputs[0]->size[0];
int channels = inputs[0]->size[1];
int rows = inputs[0]->size[2];
int cols = inputs[0]->size[3];
MatShape s = shape(groups * channels, rows * cols);
weightMat = Mat(s[0], s[1], CV_32FC1);
biasMat = Mat(s[0], s[1], CV_32FC1);
for (int n = 0; n < s[0]; n++)
{
weightMat.row(n).setTo(weights_.at<float>(n % channels));
biasMat.row(n).setTo(bias_.at<float>(n % channels));
}
}
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
......@@ -155,8 +140,23 @@ public:
MatShape s = shape(groups * channels, rows * cols);
UMat src = inputs[ii].reshape(1, s.size(), &s[0]);
UMat dst = outputs[ii].reshape(1, s.size(), &s[0]);
multiply(src, weightMat, dst);
add(dst, biasMat, dst);
int number = (s[1] % 8 == 0) ? 8 : ((s[1] % 4 == 0) ? 4 : 1);
String buildopt = format("-DNUM=%d ", number);
String kname = format("batch_norm%d", number);
ocl::Kernel kernel(kname.c_str(), ocl::dnn::batchnorm_oclsrc, buildopt);
if (kernel.empty())
return false;
size_t global[] = { (size_t)s[0], (size_t)(s[1] / number) };
kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
kernel.set(1, (int)s[0]);
kernel.set(2, (int)s[1]);
kernel.set(3, (int)channels);
kernel.set(4, ocl::KernelArg::PtrReadOnly(umat_weight));
kernel.set(5, ocl::KernelArg::PtrReadOnly(umat_bias));
kernel.set(6, ocl::KernelArg::PtrWriteOnly(dst));
bool ret = kernel.run(2, global, NULL, false);
if (!ret)
return false;
}
}
return true;
......
......@@ -267,7 +267,6 @@ struct ReLUFunctor
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
{
size_t wgSize = ocl::Device::getDefault().maxWorkGroupSize();
std::vector<UMat> inputs;
std::vector<UMat> outputs;
......@@ -287,7 +286,7 @@ struct ReLUFunctor
kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
size_t gSize = src.total();
CV_Assert(kernel.run(1, &gSize, &wgSize, false));
CV_Assert(kernel.run(1, &gSize, NULL, false));
}
return true;
......@@ -395,8 +394,28 @@ struct TanHFunctor
#ifdef HAVE_OPENCL
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
{
// TODO: implement OCL version
return false;
std::vector<UMat> inputs;
std::vector<UMat> outputs;
inps.getUMatVector(inputs);
outs.getUMatVector(outputs);
String buildopt = oclGetTMacro(inputs[0]);
for (size_t i = 0; i < inputs.size(); i++)
{
UMat& src = inputs[i];
UMat& dst = outputs[i];
ocl::Kernel kernel("TanHForward", ocl::dnn::activations_oclsrc, buildopt);
kernel.set(0, (int)src.total());
kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
size_t gSize = src.total();
CV_Assert(kernel.run(1, &gSize, NULL, false));
}
return true;
}
#endif
......@@ -594,8 +613,31 @@ struct PowerFunctor
#ifdef HAVE_OPENCL
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
{
// TODO: implement OCL version
return false;
std::vector<UMat> inputs;
std::vector<UMat> outputs;
inps.getUMatVector(inputs);
outs.getUMatVector(outputs);
String buildopt = oclGetTMacro(inputs[0]);
for (size_t i = 0; i < inputs.size(); i++)
{
UMat& src = inputs[i];
UMat& dst = outputs[i];
ocl::Kernel kernel("PowForward", ocl::dnn::activations_oclsrc, buildopt);
kernel.set(0, (int)src.total());
kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
kernel.set(3, (float)power);
kernel.set(4, (float)scale);
kernel.set(5, (float)shift);
size_t gSize = src.total();
CV_Assert(kernel.run(1, &gSize, NULL, false));
}
return true;
}
#endif
......@@ -624,9 +666,11 @@ struct ChannelsPReLUFunctor
{
typedef ChannelsPReLULayer Layer;
Mat scale;
UMat scale_umat;
explicit ChannelsPReLUFunctor(const Mat& scale_=Mat()) : scale(scale_)
{
scale_umat = scale.getUMat(ACCESS_READ);
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
......@@ -669,8 +713,31 @@ struct ChannelsPReLUFunctor
#ifdef HAVE_OPENCL
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
{
// TODO: implement OCL version
return false;
std::vector<UMat> inputs;
std::vector<UMat> outputs;
inps.getUMatVector(inputs);
outs.getUMatVector(outputs);
String buildopt = oclGetTMacro(inputs[0]);
for (size_t i = 0; i < inputs.size(); i++)
{
UMat& src = inputs[i];
UMat& dst = outputs[i];
ocl::Kernel kernel("PReLUForward", ocl::dnn::activations_oclsrc, buildopt);
kernel.set(0, (int)src.total());
kernel.set(1, (int)src.size[1]);
kernel.set(2, (int)total(shape(src), 2));
kernel.set(3, ocl::KernelArg::PtrReadOnly(src));
kernel.set(4, ocl::KernelArg::PtrWriteOnly(dst));
kernel.set(5, ocl::KernelArg::PtrReadOnly(scale_umat));
size_t gSize = src.total();
CV_Assert(kernel.run(1, &gSize, NULL, false));
}
return true;
}
#endif
......
......@@ -43,6 +43,8 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include "math_functions.hpp"
#include "opencl_kernels_dnn.hpp"
namespace cv
{
......@@ -60,11 +62,93 @@ public:
eps = params.get<double>("eps", 1e-9);
}
#ifdef HAVE_OPENCL
bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
{
std::vector<UMat> inputs;
std::vector<UMat> outputs;
inputs_.getUMatVector(inputs);
outputs_.getUMatVector(outputs);
for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)
{
UMat &inpBlob = inputs[inpIdx];
UMat &outBlob = outputs[inpIdx];
int splitDim = (acrossChannels) ? 1 : 2;
int i, newRows = 1;
for( i = 0; i < splitDim; i++ )
newRows *= inpBlob.size[i];
MatShape s = shape(newRows, inpBlob.total() / newRows);
UMat& inpMat = inpBlob;
UMat& outMat = outBlob;
UMat oneMat = UMat::ones(s[1], 1, CV_32F);
UMat meanMat = UMat(s[0], 1, CV_32F);
UMat devMat = UMat(s[0], 1, CV_32F);
UMat tmpMat = UMat(s[0], s[1], CV_32F);
float alpha = 1.0f / s[1];
bool ret = ocl4dnn::ocl4dnnGEMV<float>(ocl4dnn::CblasNoTrans, s[0], s[1], alpha,
inpMat, 0, oneMat, 0, 0.0f, meanMat, 0);
if (!ret)
return false;
int number = (s[1] % 8 == 0) ? 8 : ((s[1] % 4 == 0) ? 4 : 1);
String buildopt = format("-DNUM=%d ", number);
String kname = format("calc_mean%d", number);
ocl::Kernel kernel(kname.c_str(), ocl::dnn::mvn_oclsrc, buildopt);
if (kernel.empty())
return false;
size_t global[] = { (size_t)s[0], (size_t)(s[1] / number) };
kernel.set(0, ocl::KernelArg::PtrReadOnly(inpMat));
kernel.set(1, (int)s[0]);
kernel.set(2, (int)s[1]);
kernel.set(3, ocl::KernelArg::PtrReadOnly(meanMat));
kernel.set(4, ocl::KernelArg::PtrWriteOnly(tmpMat));
ret = kernel.run(2, global, NULL, false);
if (!ret)
return false;
if (normVariance)
{
ret = ocl4dnn::ocl4dnnGEMV<float>(ocl4dnn::CblasNoTrans, s[0], s[1], alpha,
tmpMat, 0, oneMat, 0, 0.0f, devMat, 0);
if (!ret)
return false;
}
kname = format("mvn%d", number);
if (normVariance)
buildopt += "-DNORM_VARIANCE";
ocl::Kernel kernel1(kname.c_str(), ocl::dnn::mvn_oclsrc, buildopt);
if (kernel1.empty())
return false;
kernel1.set(0, ocl::KernelArg::PtrReadOnly(inpMat));
kernel1.set(1, (int)s[0]);
kernel1.set(2, (int)s[1]);
kernel1.set(3, (float)eps);
kernel1.set(4, ocl::KernelArg::PtrReadOnly(meanMat));
kernel1.set(5, ocl::KernelArg::PtrReadOnly(devMat));
kernel1.set(6, ocl::KernelArg::PtrWriteOnly(outMat));
ret = kernel1.run(2, global, NULL, false);
if (!ret)
return false;
}
return true;
}
#endif
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_OCL_RUN((preferableTarget == DNN_TARGET_OPENCL) &&
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
......
......@@ -54,6 +54,15 @@ __kernel void ReLUForward(const int count, __global const T* in, __global T* out
#endif
}
__kernel void PReLUForward(const int count, const int channels, const int plane_size,
__global const T* in, __global T* out, __global const T* slope_data)
{
int index = get_global_id(0);
int c = (index / plane_size) % channels;
if(index < count)
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
__kernel void TanHForward(const int count, __global T* in, __global T* out) {
int index = get_global_id(0);
if(index < count)
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Copyright (c) 2016-2017 Fabian David Tschopp, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
__kernel void batchnorm(__global const T *src, int src_offset,
__global const float *meanMat,
float varMeanScale,
__global const float *invStdMat,
__global const float *weight,
__global const float *bias,
int hasWeight, int hasBias,
int width, int height, int channel,
__global T *dst, int dst_offset)
#define Dtype float
#define Dtype4 float4
#define Dtype8 float8
#if NUM == 8
#define load(src, index) vload8(0, src + index)
#define store(vec, dst, index) vstore8(vec, 0, dst + index)
#define vec_type Dtype8
#define BATCH_NORM batch_norm8
#elif NUM == 4
#define load(src, index) vload4(0, src + index)
#define store(vec, dst, index) vstore4(vec, 0, dst + index)
#define vec_type Dtype4
#define BATCH_NORM batch_norm4
#elif NUM == 1
#define load(src, index) src[index]
#define store(vec, dst, index) dst[index] = vec
#define vec_type Dtype
#define BATCH_NORM batch_norm1
#endif
__kernel void BATCH_NORM(__global const Dtype* src,
const int rows,
const int cols,
const int channels,
__global const Dtype* weight,
__global const Dtype* bias,
__global Dtype* dst)
{
int x = get_global_id(0);
int y = get_global_id(1);
int c = get_global_id(2);
int y = get_global_id(1) * NUM;
int index = x * cols + y;
if (x >= width || y >= height || c >= channel)
if (x >= rows || y >= cols)
return;
float mean = meanMat[c] * varMeanScale;
float invstd = invStdMat[c];
float w = hasWeight ? weight[c] : 1;
float b = hasBias ? bias[c] : 0;
int index = y * width + x + c * width * height;
T val = (src[index + src_offset] - mean) * w * invstd + b;
dst[index + dst_offset] = val;
Dtype w = weight[x % channels];
Dtype b = bias[x % channels];
vec_type src_vec = load(src, index);
vec_type dst_vec = src_vec * w + (vec_type)b;
store(dst_vec, dst, index);
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Copyright (c) 2016-2017 Fabian David Tschopp, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#define Dtype float
#define Dtype4 float4
#define Dtype8 float8
#if NUM == 8
#define load(src, index) vload8(0, src + index)
#define store(vec, dst, index) vstore8(vec, 0, dst + index)
#define vec_type Dtype8
#define CALC_MEAN calc_mean8
#define MVN mvn8
#elif NUM == 4
#define load(src, index) vload4(0, src + index)
#define store(vec, dst, index) vstore4(vec, 0, dst + index)
#define vec_type Dtype4
#define CALC_MEAN calc_mean4
#define MVN mvn4
#elif NUM == 1
#define load(src, index) src[index]
#define store(vec, dst, index) dst[index] = vec
#define vec_type Dtype
#define CALC_MEAN calc_mean1
#define MVN mvn1
#endif
__kernel void CALC_MEAN(__global const Dtype* src,
const int rows,
const int cols,
__global Dtype* mean,
__global Dtype* dst)
{
int x = get_global_id(0);
int y = get_global_id(1) * NUM;
int index = x * cols + y;
if (x >= rows || y >= cols)
return;
Dtype mean_val = mean[x];
vec_type src_vec = load(src, index);
vec_type dst_vec = pow(src_vec - (vec_type)mean_val, 2);
store(dst_vec, dst, index);
}
__kernel void MVN(__global const Dtype* src,
const int rows,
const int cols,
const Dtype eps,
__global const Dtype* mean,
__global const Dtype* dev,
__global Dtype* dst)
{
int x = get_global_id(0);
int y = get_global_id(1) * NUM;
int index = x * cols + y;
if (x >= rows || y >= cols)
return;
Dtype mean_val = mean[x];
Dtype dev_val = sqrt(dev[x]);
Dtype alpha;
#ifdef NORM_VARIANCE
alpha = 1 / (eps + dev_val);
#else
alpha = 1;
#endif
vec_type src_vec = load(src, index) - (vec_type)mean_val;
vec_type dst_vec = src_vec * alpha;
store(dst_vec, dst, index);
}
......@@ -202,6 +202,11 @@ TEST(Layer_Test_MVN, Accuracy)
testLayerUsingCaffeModels("layer_mvn");
}
OCL_TEST(Layer_Test_MVN, Accuracy)
{
testLayerUsingCaffeModels("layer_mvn", DNN_TARGET_OPENCL);
}
void testReshape(const MatShape& inputShape, const MatShape& targetShape,
int axis = 0, int num_axes = -1,
MatShape mask = MatShape())
......@@ -331,6 +336,12 @@ TEST(Layer_Test_PReLU, Accuracy)
testLayerUsingCaffeModels("layer_prelu_fc", DNN_TARGET_CPU, true, false);
}
OCL_TEST(Layer_Test_PReLU, Accuracy)
{
testLayerUsingCaffeModels("layer_prelu", DNN_TARGET_OPENCL, true);
testLayerUsingCaffeModels("layer_prelu_fc", DNN_TARGET_OPENCL, true, false);
}
//template<typename XMat>
//static void test_Layer_Concat()
//{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册