/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/core/cuda.hpp" #include "opencv2/cudev.hpp" #include "opencv2/core/cuda/utility.hpp" using namespace cv; using namespace cv::cuda; using namespace cv::cudev; device::ThrustAllocator::~ThrustAllocator() { } namespace { class DefaultThrustAllocator: public cv::cuda::device::ThrustAllocator { public: __device__ __host__ uchar* allocate(size_t numBytes) { #ifndef __CUDA_ARCH__ uchar* ptr; CV_CUDEV_SAFE_CALL(cudaMalloc(&ptr, numBytes)); return ptr; #else return NULL; #endif } __device__ __host__ void deallocate(uchar* ptr, size_t numBytes) { (void)numBytes; #ifndef __CUDA_ARCH__ CV_CUDEV_SAFE_CALL(cudaFree(ptr)); #endif } }; DefaultThrustAllocator defaultThrustAllocator; cv::cuda::device::ThrustAllocator* g_thrustAllocator = &defaultThrustAllocator; } cv::cuda::device::ThrustAllocator& cv::cuda::device::ThrustAllocator::getAllocator() { return *g_thrustAllocator; } void cv::cuda::device::ThrustAllocator::setAllocator(cv::cuda::device::ThrustAllocator* allocator) { if(allocator == NULL) g_thrustAllocator = &defaultThrustAllocator; else g_thrustAllocator = allocator; } namespace { class DefaultAllocator : public GpuMat::Allocator { public: bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize); void free(GpuMat* mat); }; bool DefaultAllocator::allocate(GpuMat* mat, int rows, int cols, size_t elemSize) { if (rows > 1 && cols > 1) { CV_CUDEV_SAFE_CALL( cudaMallocPitch(&mat->data, &mat->step, elemSize * cols, rows) ); } else { // Single row or single column must be continuous CV_CUDEV_SAFE_CALL( cudaMalloc(&mat->data, elemSize * cols * rows) ); mat->step = elemSize * cols; } mat->refcount = (int*) fastMalloc(sizeof(int)); return true; } void DefaultAllocator::free(GpuMat* mat) { cudaFree(mat->datastart); fastFree(mat->refcount); } DefaultAllocator cudaDefaultAllocator; GpuMat::Allocator* g_defaultAllocator = &cudaDefaultAllocator; } GpuMat::Allocator* cv::cuda::GpuMat::defaultAllocator() { return g_defaultAllocator; } void cv::cuda::GpuMat::setDefaultAllocator(Allocator* allocator) { CV_Assert( allocator != 0 ); g_defaultAllocator = allocator; } ///////////////////////////////////////////////////// /// create void cv::cuda::GpuMat::create(int _rows, int _cols, int _type) { CV_DbgAssert( _rows >= 0 && _cols >= 0 ); _type &= Mat::TYPE_MASK; if (rows == _rows && cols == _cols && type() == _type && data) return; if (data) release(); if (_rows > 0 && _cols > 0) { flags = Mat::MAGIC_VAL + _type; rows = _rows; cols = _cols; const size_t esz = elemSize(); bool allocSuccess = allocator->allocate(this, rows, cols, esz); if (!allocSuccess) { // custom allocator fails, try default allocator allocator = defaultAllocator(); allocSuccess = allocator->allocate(this, rows, cols, esz); CV_Assert( allocSuccess ); } if (esz * cols == step) flags |= Mat::CONTINUOUS_FLAG; int64 _nettosize = static_cast(step) * rows; size_t nettosize = static_cast(_nettosize); datastart = data; dataend = data + nettosize; if (refcount) *refcount = 1; } } ///////////////////////////////////////////////////// /// release void cv::cuda::GpuMat::release() { CV_DbgAssert( allocator != 0 ); if (refcount && CV_XADD(refcount, -1) == 1) allocator->free(this); dataend = data = datastart = 0; step = rows = cols = 0; refcount = 0; } ///////////////////////////////////////////////////// /// upload void cv::cuda::GpuMat::upload(InputArray arr) { Mat mat = arr.getMat(); CV_DbgAssert( !mat.empty() ); create(mat.size(), mat.type()); CV_CUDEV_SAFE_CALL( cudaMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) ); } void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream) { Mat mat = arr.getMat(); CV_DbgAssert( !mat.empty() ); create(mat.size(), mat.type()); cudaStream_t stream = StreamAccessor::getStream(_stream); CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice, stream) ); } ///////////////////////////////////////////////////// /// download void cv::cuda::GpuMat::download(OutputArray _dst) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); Mat dst = _dst.getMat(); CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) ); } void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); Mat dst = _dst.getMat(); cudaStream_t stream = StreamAccessor::getStream(_stream); CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost, stream) ); } ///////////////////////////////////////////////////// /// copyTo void cv::cuda::GpuMat::copyTo(OutputArray _dst) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); GpuMat dst = _dst.getGpuMat(); CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice) ); } void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); GpuMat dst = _dst.getGpuMat(); cudaStream_t stream = StreamAccessor::getStream(_stream); CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice, stream) ); } namespace { template struct CopyToPolicy : DefaultTransformPolicy { }; template <> struct CopyToPolicy<4> : DefaultTransformPolicy { enum { shift = 2 }; }; template <> struct CopyToPolicy<8> : DefaultTransformPolicy { enum { shift = 1 }; }; template void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream) { gridTransformUnary_< CopyToPolicy::elem_type)> >(globPtr(src), globPtr(dst), identity(), globPtr(mask), stream); } } void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const { CV_DbgAssert( !empty() ); CV_DbgAssert( depth() <= CV_64F && channels() <= 4 ); GpuMat mask = _mask.getGpuMat(); CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) ); uchar* data0 = _dst.getGpuMat().data; _dst.create(size(), type()); GpuMat dst = _dst.getGpuMat(); // do not leave dst uninitialized if (dst.data != data0) dst.setTo(Scalar::all(0), stream); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream); static const func_t funcs[9][4] = { {0,0,0,0}, {copyWithMask, copyWithMask, copyWithMask, copyWithMask}, {copyWithMask, copyWithMask, copyWithMask, copyWithMask}, {0,0,0,0}, {copyWithMask, copyWithMask, copyWithMask, copyWithMask}, {0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {copyWithMask, copyWithMask, copyWithMask, copyWithMask} }; if (mask.channels() == channels()) { const func_t func = funcs[elemSize1()][0]; CV_DbgAssert( func != 0 ); func(reshape(1), dst.reshape(1), mask.reshape(1), stream); } else { const func_t func = funcs[elemSize1()][channels() - 1]; CV_DbgAssert( func != 0 ); func(*this, dst, mask, stream); } } ///////////////////////////////////////////////////// /// setTo namespace { template void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream) { Scalar_::elem_type> scalar = _scalar; gridTransformUnary(constantPtr(VecTraits::make(scalar.val), mat.rows, mat.cols), globPtr(mat), identity(), stream); } template void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream) { Scalar_::elem_type> scalar = _scalar; gridTransformUnary(constantPtr(VecTraits::make(scalar.val), mat.rows, mat.cols), globPtr(mat), identity(), globPtr(mask), stream); } } GpuMat& cv::cuda::GpuMat::setTo(Scalar value, Stream& stream) { CV_DbgAssert( !empty() ); CV_DbgAssert( depth() <= CV_64F && channels() <= 4 ); if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0) { // Zero fill if (stream) CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) ); else CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, 0, cols * elemSize(), rows) ); return *this; } if (depth() == CV_8U) { const int cn = channels(); if (cn == 1 || (cn == 2 && value[0] == value[1]) || (cn == 3 && value[0] == value[1] && value[0] == value[2]) || (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3])) { const int val = cv::saturate_cast(value[0]); if (stream) CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) ); else CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, val, cols * elemSize(), rows) ); return *this; } } typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream); static const func_t funcs[7][4] = { {setToWithOutMask,setToWithOutMask,setToWithOutMask,setToWithOutMask}, {setToWithOutMask,setToWithOutMask,setToWithOutMask,setToWithOutMask}, {setToWithOutMask,setToWithOutMask,setToWithOutMask,setToWithOutMask}, {setToWithOutMask,setToWithOutMask,setToWithOutMask,setToWithOutMask}, {setToWithOutMask,setToWithOutMask,setToWithOutMask,setToWithOutMask}, {setToWithOutMask,setToWithOutMask,setToWithOutMask,setToWithOutMask}, {setToWithOutMask,setToWithOutMask,setToWithOutMask,setToWithOutMask} }; funcs[depth()][channels() - 1](*this, value, stream); return *this; } GpuMat& cv::cuda::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream) { CV_DbgAssert( !empty() ); CV_DbgAssert( depth() <= CV_64F && channels() <= 4 ); GpuMat mask = _mask.getGpuMat(); if (mask.empty()) { return setTo(value, stream); } CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 ); typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream); static const func_t funcs[7][4] = { {setToWithMask,setToWithMask,setToWithMask,setToWithMask}, {setToWithMask,setToWithMask,setToWithMask,setToWithMask}, {setToWithMask,setToWithMask,setToWithMask,setToWithMask}, {setToWithMask,setToWithMask,setToWithMask,setToWithMask}, {setToWithMask,setToWithMask,setToWithMask,setToWithMask}, {setToWithMask,setToWithMask,setToWithMask,setToWithMask}, {setToWithMask,setToWithMask,setToWithMask,setToWithMask} }; funcs[depth()][channels() - 1](*this, mask, value, stream); return *this; } ///////////////////////////////////////////////////// /// convertTo namespace { template struct ConvertToPolicy : DefaultTransformPolicy { }; template <> struct ConvertToPolicy : DefaultTransformPolicy { enum { shift = 1 }; }; template void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream) { typedef typename VecTraits::elem_type src_elem_type; typedef typename VecTraits::elem_type dst_elem_type; typedef typename LargerType::type larger_elem_type; typedef typename LargerType::type scalar_type; gridTransformUnary_< ConvertToPolicy >(globPtr(src), globPtr(dst), saturate_cast_func(), stream); } template struct Convertor : unary_function { S alpha; S beta; __device__ __forceinline__ D operator ()(typename TypeTraits::parameter_type src) const { return cudev::saturate_cast(alpha * src + beta); } }; template void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream) { typedef typename VecTraits::elem_type src_elem_type; typedef typename VecTraits::elem_type dst_elem_type; typedef typename LargerType::type larger_elem_type; typedef typename LargerType::type scalar_type; Convertor op; op.alpha = cv::saturate_cast(alpha); op.beta = cv::saturate_cast(beta); gridTransformUnary_< ConvertToPolicy >(globPtr(src), globPtr(dst), op, stream); } template void convertScaleHalf(const GpuMat& src, const GpuMat& dst, Stream& stream) { typedef typename VecTraits::elem_type src_elem_type; typedef typename VecTraits::elem_type dst_elem_type; typedef typename LargerType::type larger_elem_type; typedef typename LargerType::type scalar_type; gridTransformUnary_< ConvertToPolicy >(globPtr(src), globPtr(dst), saturate_cast_fp16_func(), stream); } } void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const { if (rtype < 0) rtype = type(); else rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels()); const int sdepth = depth(); const int ddepth = CV_MAT_DEPTH(rtype); if (sdepth == ddepth) { if (stream) copyTo(_dst, stream); else copyTo(_dst); return; } CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F ); GpuMat src = *this; _dst.create(size(), rtype); GpuMat dst = _dst.getGpuMat(); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[7][7] = { {0, convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale}, {convertToNoScale, 0, convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale}, {convertToNoScale, convertToNoScale, 0, convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale}, {convertToNoScale, convertToNoScale, convertToNoScale, 0, convertToNoScale, convertToNoScale, convertToNoScale}, {convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale, 0, convertToNoScale, convertToNoScale}, {convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale, 0, convertToNoScale}, {convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale, convertToNoScale, 0} }; funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream); } void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const { if (rtype < 0) rtype = type(); else rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels()); const int sdepth = depth(); const int ddepth = CV_MAT_DEPTH(rtype); GpuMat src = *this; _dst.create(size(), rtype); GpuMat dst = _dst.getGpuMat(); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream); static const func_t funcs[7][7] = { {convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale}, {convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale}, {convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale}, {convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale}, {convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale}, {convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale}, {convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale, convertToScale} }; funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream); } void cv::cuda::convertFp16(InputArray _src, OutputArray _dst, Stream& stream) { GpuMat src = _src.getGpuMat(); int ddepth = 0; switch(src.depth()) { case CV_32F: ddepth = CV_16S; break; case CV_16S: ddepth = CV_32F; break; default: CV_Error(Error::StsUnsupportedFormat, "Unsupported input depth"); return; } int type = CV_MAKE_TYPE(CV_MAT_DEPTH(ddepth), src.channels()); _dst.create(src.size(), type); GpuMat dst = _dst.getGpuMat(); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[] = { 0, 0, 0, convertScaleHalf, 0, convertScaleHalf, 0, 0, }; funcs[ddepth](src.reshape(1), dst.reshape(1), stream); } #endif