提交 ed749c4b 编写于 作者: V Vladislav Vinogradov

added missing arithm operations to gpu module

上级 f947c2eb
......@@ -539,32 +539,41 @@ namespace cv
//////////////////////////// Per-element operations ////////////////////////////////////
//! adds one matrix to another (c = a + b)
//! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
CV_EXPORTS void add(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null());
CV_EXPORTS void add(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
//! adds scalar to a matrix (c = a + s)
//! supports CV_32FC1 and CV_32FC2 type
CV_EXPORTS void add(const GpuMat& a, const Scalar& sc, GpuMat& c, Stream& stream = Stream::Null());
CV_EXPORTS void add(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
//! subtracts one matrix from another (c = a - b)
//! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null());
CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
//! subtracts scalar from a matrix (c = a - s)
//! supports CV_32FC1 and CV_32FC2 type
CV_EXPORTS void subtract(const GpuMat& a, const Scalar& sc, GpuMat& c, Stream& stream = Stream::Null());
//! computes element-wise product of the two arrays (c = a * b)
//! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null());
//! multiplies matrix to a scalar (c = a * s)
//! supports CV_32FC1 type
CV_EXPORTS void multiply(const GpuMat& a, const Scalar& sc, GpuMat& c, Stream& stream = Stream::Null());
//! computes element-wise quotient of the two arrays (c = a / b)
//! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null());
//! computes element-wise quotient of matrix and scalar (c = a / s)
//! supports CV_32FC1 type
CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, Stream& stream = Stream::Null());
CV_EXPORTS void subtract(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
//! computes element-wise weighted product of the two arrays (c = scale * a * b)
CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
//! weighted multiplies matrix to a scalar (c = scale * a * s)
CV_EXPORTS void multiply(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
//! computes element-wise weighted quotient of the two arrays (c = a / b)
CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
//! computes element-wise weighted quotient of matrix and scalar (c = a / s)
CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
//! computes element-wise weighted reciprocal of an array (dst = scale/src2)
CV_EXPORTS void divide(double scale, const GpuMat& src2, GpuMat& dst, int dtype = -1, Stream& stream = Stream::Null());
//! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst,
int dtype = -1, Stream& stream = Stream::Null());
//! adds scaled array to another one (dst = alpha*src1 + src2)
static inline void scaleAdd(const GpuMat& src1, double alpha, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null())
{
addWeighted(src1, alpha, src2, 1.0, 0.0, dst, -1, stream);
}
//! computes element-wise absolute difference of two arrays (c = abs(a - b))
CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null());
//! computes element-wise absolute difference of array and scalar (c = abs(a - s))
CV_EXPORTS void absdiff(const GpuMat& a, const Scalar& s, GpuMat& c, Stream& stream = Stream::Null());
//! computes exponent of each matrix element (b = e**a)
//! supports only CV_32FC1 type
......@@ -580,13 +589,6 @@ namespace cv
//! supports only CV_32FC1 type
CV_EXPORTS void log(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null());
//! computes element-wise absolute difference of two arrays (c = abs(a - b))
//! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null());
//! computes element-wise absolute difference of array and scalar (c = abs(a - s))
//! supports only CV_32FC1 type
CV_EXPORTS void absdiff(const GpuMat& a, const Scalar& s, GpuMat& c, Stream& stream = Stream::Null());
//! compares elements of two arrays (c = a <cmpop> b)
//! supports CV_8UC4, CV_32FC1 types
CV_EXPORTS void compare(const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream = Stream::Null());
......@@ -615,10 +617,6 @@ namespace cv
//! computes per-element maximum of array and scalar (dst = max(src1, src2))
CV_EXPORTS void max(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null());
//! computes the weighted sum of two arrays
CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst,
int dtype = -1, Stream& stream = Stream::Null());
////////////////////////////// Image processing //////////////////////////////
......
......@@ -49,32 +49,1197 @@
namespace cv { namespace gpu { namespace device
{
//////////////////////////////////////////////////////////////////////////
// add
template <typename T, typename D> struct Add : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a + b);
}
};
template <> struct TransformFunctorTraits< Add<ushort, ushort> > : DefaultTransformFunctorTraits< Add<ushort, ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Add<short, short> > : DefaultTransformFunctorTraits< Add<short, short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Add<int, int> > : DefaultTransformFunctorTraits< Add<int, int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Add<float, float> > : DefaultTransformFunctorTraits< Add<float, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T, typename D> void add_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream)
{
if (mask.data)
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<D>)dst, mask, Add<T, D>(), stream);
else
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<D>)dst, Add<T, D>(), stream);
}
template void add_gpu<uchar, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<uchar, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<ushort, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<ushort, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<ushort, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<ushort, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<ushort, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<ushort, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<ushort, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<short, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<short, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<short, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<short, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<short, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<short, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<short, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<int, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<int, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<int, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<int, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<int, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<int, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<int, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<float, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<float, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<double, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template <typename T, typename D> struct AddScalar : unary_function<T, D>
{
AddScalar(double val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a + val);
}
const double val;
};
template <> struct TransformFunctorTraits< AddScalar<ushort, ushort> > : DefaultTransformFunctorTraits< AddScalar<ushort, ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< AddScalar<short, short> > : DefaultTransformFunctorTraits< AddScalar<short, short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< AddScalar<int, int> > : DefaultTransformFunctorTraits< AddScalar<int, int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< AddScalar<float, float> > : DefaultTransformFunctorTraits< AddScalar<float, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T, typename D> void add_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&val) );
AddScalar<T, D> op(val);
if (mask.data)
transform((DevMem2D_<T>)src1, (DevMem2D_<D>)dst, mask, op, stream);
else
transform((DevMem2D_<T>)src1, (DevMem2D_<D>)dst, op, stream);
}
template void add_gpu<uchar, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<uchar, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, short >(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, int >(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, float >(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<uchar, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<schar, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<ushort, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<ushort, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<ushort, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<ushort, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<ushort, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<ushort, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<ushort, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<short, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<short, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<short, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<short, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<short, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<short, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<short, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<int, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<int, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<int, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<int, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<int, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<int, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<int, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<float, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<float, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<float, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void add_gpu<double, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void add_gpu<double, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// subtract
template <typename T, typename D> struct Subtract : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a - b);
}
};
template <> struct TransformFunctorTraits< Subtract<ushort, ushort> > : DefaultTransformFunctorTraits< Subtract<ushort, ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Subtract<short, short> > : DefaultTransformFunctorTraits< Subtract<short, short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Subtract<int, int> > : DefaultTransformFunctorTraits< Subtract<int, int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Subtract<float, float> > : DefaultTransformFunctorTraits< Subtract<float, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T, typename D> void subtract_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream)
{
if (mask.data)
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<D>)dst, mask, Subtract<T, D>(), stream);
else
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<D>)dst, Subtract<T, D>(), stream);
}
template void subtract_gpu<uchar, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<uchar, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<ushort, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<ushort, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<ushort, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<ushort, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<ushort, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<ushort, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<ushort, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<short, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<short, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<short, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<short, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<short, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<short, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<short, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<int, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<int, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<int, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<int, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<int, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<int, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<int, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<float, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<float, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, uchar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, schar>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, short>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, int>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, float>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<double, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template <typename T, typename D> struct SubtractScalar : unary_function<T, D>
{
SubtractScalar(double val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a - val);
}
const double val;
};
template <> struct TransformFunctorTraits< SubtractScalar<ushort, ushort> > : DefaultTransformFunctorTraits< SubtractScalar<ushort, ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< SubtractScalar<short, short> > : DefaultTransformFunctorTraits< SubtractScalar<short, short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< SubtractScalar<int, int> > : DefaultTransformFunctorTraits< SubtractScalar<int, int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< SubtractScalar<float, float> > : DefaultTransformFunctorTraits< SubtractScalar<float, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T, typename D> void subtract_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&val) );
SubtractScalar<T, D> op(val);
if (mask.data)
transform((DevMem2D_<T>)src1, (DevMem2D_<D>)dst, mask, op, stream);
else
transform((DevMem2D_<T>)src1, (DevMem2D_<D>)dst, op, stream);
}
template void subtract_gpu<uchar, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<uchar, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, short >(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, int >(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, float >(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<uchar, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<schar, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<ushort, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<ushort, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<ushort, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<ushort, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<ushort, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<ushort, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<ushort, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<short, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<short, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<short, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<short, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<short, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<short, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<short, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<int, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<int, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<int, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<int, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<int, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<int, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<int, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<float, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<float, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<float, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, uchar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, schar>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, short>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, int>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//template void subtract_gpu<double, float>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template void subtract_gpu<double, double>(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// multiply
struct multiply_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
};
template <> struct TransformFunctorTraits<multiply_8uc4_32f> : DefaultTransformFunctorTraits<multiply_8uc4_32f>
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 8 };
};
void multiply_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream)
{
transform(static_cast< DevMem2D_<uint> >(src1), src2, static_cast< DevMem2D_<uint> >(dst), multiply_8uc4_32f(), stream);
}
struct multiply_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
};
template <> struct TransformFunctorTraits<multiply_16sc4_32f> : DefaultTransformFunctorTraits<multiply_16sc4_32f>
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 8 };
};
void multiply_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream)
{
transform(static_cast< DevMem2D_<short4> >(src1), src2,
static_cast< DevMem2D_<short4> >(dst), multiply_16sc4_32f(), stream);
}
template <typename T, typename D> struct Multiply : binary_function<T, T, D>
{
Multiply(double scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(scale * a * b);
}
const double scale;
};
template <> struct TransformFunctorTraits< Multiply<ushort, ushort> > : DefaultTransformFunctorTraits< Multiply<ushort, ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Multiply<short, short> > : DefaultTransformFunctorTraits< Multiply<short, short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Multiply<int, int> > : DefaultTransformFunctorTraits< Multiply<int, int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Multiply<float, float> > : DefaultTransformFunctorTraits< Multiply<float, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T, typename D> void multiply_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&scale) );
Multiply<T, D> op(scale);
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<D>)dst, op, stream);
}
template void multiply_gpu<uchar, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<uchar, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<ushort, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<ushort, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<ushort, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<ushort, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<ushort, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<ushort, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<ushort, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<short, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<short, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<short, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<short, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<short, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<short, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<short, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<int, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<int, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<int, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<int, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<int, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<int, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<int, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<float, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<float, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<double, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template <typename T, typename D> struct MultiplyScalar : unary_function<T, D>
{
MultiplyScalar(double val_, double scale_) : val(val_), scale(scale_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(scale * a * val);
}
const double val;
const double scale;
};
template <> struct TransformFunctorTraits< MultiplyScalar<ushort, ushort> > : DefaultTransformFunctorTraits< MultiplyScalar<ushort, ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< MultiplyScalar<short, short> > : DefaultTransformFunctorTraits< MultiplyScalar<short, short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< MultiplyScalar<int, int> > : DefaultTransformFunctorTraits< MultiplyScalar<int, int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< MultiplyScalar<float, float> > : DefaultTransformFunctorTraits< MultiplyScalar<float, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T, typename D> void multiply_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&val) );
cudaSafeCall( cudaSetDoubleForDevice(&scale) );
MultiplyScalar<T, D> op(val, scale);
transform((DevMem2D_<T>)src1, (DevMem2D_<D>)dst, op, stream);
}
template void multiply_gpu<uchar, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<uchar, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<uchar, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<schar, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<ushort, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<ushort, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<ushort, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<ushort, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<ushort, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<ushort, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<ushort, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<short, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<short, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<short, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<short, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<short, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<short, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<short, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<int, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<int, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<int, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<int, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<int, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<int, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<int, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<float, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<float, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<float, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<double, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void multiply_gpu<double, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// divide
struct divide_8uc4_32f : binary_function<uchar4, float, uchar4>
{
__device__ __forceinline__ uchar4 operator ()(uchar4 a, float b) const
{
return b != 0 ? make_uchar4(saturate_cast<uchar>(a.x / b), saturate_cast<uchar>(a.y / b),
saturate_cast<uchar>(a.z / b), saturate_cast<uchar>(a.w / b))
: make_uchar4(0,0,0,0);
}
};
template <> struct TransformFunctorTraits<divide_8uc4_32f> : DefaultTransformFunctorTraits<divide_8uc4_32f>
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 8 };
};
void divide_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream)
{
transform(static_cast< DevMem2D_<uchar4> >(src1), src2, static_cast< DevMem2D_<uchar4> >(dst), divide_8uc4_32f(), stream);
}
struct divide_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<uchar>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<uchar>(a.w / b))
: make_short4(0,0,0,0);
}
};
template <> struct TransformFunctorTraits<divide_16sc4_32f> : DefaultTransformFunctorTraits<divide_16sc4_32f>
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 8 };
};
void divide_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream)
{
transform(static_cast< DevMem2D_<short4> >(src1), src2, static_cast< DevMem2D_<short4> >(dst), divide_16sc4_32f(), stream);
}
template <typename T, typename D> struct Divide : binary_function<T, T, D>
{
Divide(double scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(scale * a / b) : 0;
}
const double scale;
};
template <> struct TransformFunctorTraits< Divide<ushort, ushort> > : DefaultTransformFunctorTraits< Divide<ushort, ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Divide<short, short> > : DefaultTransformFunctorTraits< Divide<short, short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Divide<int, int> > : DefaultTransformFunctorTraits< Divide<int, int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Divide<float, float> > : DefaultTransformFunctorTraits< Divide<float, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T, typename D> void divide_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&scale) );
Divide<T, D> op(scale);
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<D>)dst, op, stream);
}
template void divide_gpu<uchar, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<uchar, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<ushort, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<ushort, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<ushort, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<ushort, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<ushort, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<ushort, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<ushort, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<short, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<short, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<short, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<short, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<short, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<short, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<short, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<int, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<int, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<int, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<int, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<int, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<int, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<int, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<float, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<float, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<double, double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template <typename T, typename D> struct DivideScalar : unary_function<T, D>
{
DivideScalar(double val_, double scale_) : val(val_), scale(scale_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(scale * a / val);
}
const double val;
const double scale;
};
template <> struct TransformFunctorTraits< DivideScalar<ushort, ushort> > : DefaultTransformFunctorTraits< DivideScalar<ushort, ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< DivideScalar<short, short> > : DefaultTransformFunctorTraits< DivideScalar<short, short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< DivideScalar<int, int> > : DefaultTransformFunctorTraits< DivideScalar<int, int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< DivideScalar<float, float> > : DefaultTransformFunctorTraits< DivideScalar<float, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T, typename D> void divide_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&val) );
cudaSafeCall( cudaSetDoubleForDevice(&scale) );
DivideScalar<T, D> op(val, scale);
transform((DevMem2D_<T>)src1, (DevMem2D_<D>)dst, op, stream);
}
template void divide_gpu<uchar, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<uchar, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<uchar, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<schar, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<ushort, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<ushort, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<ushort, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<ushort, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<ushort, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<ushort, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<ushort, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<short, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<short, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<short, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<short, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<short, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<short, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<short, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<int, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<int, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<int, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<int, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<int, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<int, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<int, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<float, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<float, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<float, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, uchar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, schar >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, ushort>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, short >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, int >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
//template void divide_gpu<double, float >(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template void divide_gpu<double, double>(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template <typename T, typename D> struct Reciprocal : unary_function<T, D>
{
Reciprocal(double scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return a != 0 ? saturate_cast<D>(scale / a) : 0;
}
const double scale;
};
template <> struct TransformFunctorTraits< Reciprocal<ushort, ushort> > : DefaultTransformFunctorTraits< Reciprocal<ushort, ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Reciprocal<short, short> > : DefaultTransformFunctorTraits< Reciprocal<short, short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Reciprocal<int, int> > : DefaultTransformFunctorTraits< Reciprocal<int, int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Reciprocal<float, float> > : DefaultTransformFunctorTraits< Reciprocal<float, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T, typename D> void divide_gpu(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&scalar) );
Reciprocal<T, D> op(scalar);
transform((DevMem2D_<T>)src2, (DevMem2D_<D>)dst, op, stream);
}
template void divide_gpu<uchar, uchar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<uchar, schar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<uchar, ushort>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<uchar, short >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<uchar, int >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<uchar, float >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<uchar, double>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<schar, uchar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<schar, schar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<schar, ushort>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<schar, short >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<schar, int >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<schar, float >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<schar, double>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<ushort, uchar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<ushort, schar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<ushort, ushort>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<ushort, short >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<ushort, int >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<ushort, float >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<ushort, double>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<short, uchar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<short, schar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<short, ushort>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<short, short >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<short, int >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<short, float >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<short, double>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<int, uchar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<int, schar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<int, ushort>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<int, short >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<int, int >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<int, float >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<int, double>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<float, uchar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<float, schar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<float, ushort>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<float, short >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<float, int >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<float, float >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<float, double>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<double, uchar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<double, schar >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<double, ushort>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<double, short >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<double, int >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void divide_gpu<double, float >(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void divide_gpu<double, double>(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// absdiff
template <typename T> struct Absdiff : binary_function<T, T, T>
{
static __device__ __forceinline__ int abs(int a)
{
return ::abs(a);
}
static __device__ __forceinline__ float abs(float a)
{
return ::fabsf(a);
}
static __device__ __forceinline__ double abs(double a)
{
return ::fabs(a);
}
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(abs(a - b));
}
};
template <> struct TransformFunctorTraits< Absdiff<ushort> > : DefaultTransformFunctorTraits< Absdiff<ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Absdiff<short> > : DefaultTransformFunctorTraits< Absdiff<short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Absdiff<int> > : DefaultTransformFunctorTraits< Absdiff<int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Absdiff<float> > : DefaultTransformFunctorTraits< Absdiff<float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T> void absdiff_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<T>)dst, Absdiff<T>(), stream);
}
//template void absdiff_gpu<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void absdiff_gpu<schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void absdiff_gpu<ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void absdiff_gpu<short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void absdiff_gpu<int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//template void absdiff_gpu<float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void absdiff_gpu<double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template <typename T> struct AbsdiffScalar : unary_function<T, T>
{
AbsdiffScalar(double val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
return saturate_cast<T>(::fabs(a - val));
}
double val;
};
template <> struct TransformFunctorTraits< AbsdiffScalar<ushort> > : DefaultTransformFunctorTraits< AbsdiffScalar<ushort> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< AbsdiffScalar<short> > : DefaultTransformFunctorTraits< AbsdiffScalar<short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< AbsdiffScalar<int> > : DefaultTransformFunctorTraits< AbsdiffScalar<int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< AbsdiffScalar<float> > : DefaultTransformFunctorTraits< AbsdiffScalar<float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T> void absdiff_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&val) );
AbsdiffScalar<T> op(val);
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)dst, op, stream);
}
template void absdiff_gpu<uchar >(const DevMem2D& src1, double src2, const DevMem2D& dst, cudaStream_t stream);
template void absdiff_gpu<schar >(const DevMem2D& src1, double src2, const DevMem2D& dst, cudaStream_t stream);
template void absdiff_gpu<ushort>(const DevMem2D& src1, double src2, const DevMem2D& dst, cudaStream_t stream);
template void absdiff_gpu<short >(const DevMem2D& src1, double src2, const DevMem2D& dst, cudaStream_t stream);
template void absdiff_gpu<int >(const DevMem2D& src1, double src2, const DevMem2D& dst, cudaStream_t stream);
//template void absdiff_gpu<float >(const DevMem2D& src1, double src2, const DevMem2D& dst, cudaStream_t stream);
template void absdiff_gpu<double>(const DevMem2D& src1, double src2, const DevMem2D& dst, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////////////////
// Compare
template <typename T> struct Equal : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T src1, T src2) const
{
return static_cast<uchar>((src1 == src2) * 255);
}
};
template <typename T> struct NotEqual : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T src1, T src2) const
{
return static_cast<uchar>(static_cast<int>(src1 != src2) * 255);
return static_cast<uchar>((src1 != src2) * 255);
}
};
template <typename T> struct Less : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T src1, T src2) const
{
return static_cast<uchar>((src1 < src2) * 255);
}
};
template <typename T> struct LessEqual : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T src1, T src2) const
{
return static_cast<uchar>((src1 <= src2) * 255);
}
};
template <typename T>
inline void compare_ne(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
template <> struct TransformFunctorTraits< Equal<int> > : DefaultTransformFunctorTraits< Equal<int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Equal<float> > : DefaultTransformFunctorTraits< Equal<float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< NotEqual<int> > : DefaultTransformFunctorTraits< NotEqual<int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< NotEqual<float> > : DefaultTransformFunctorTraits< NotEqual<float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Less<int> > : DefaultTransformFunctorTraits< Less<int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< Less<float> > : DefaultTransformFunctorTraits< Less<float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< LessEqual<int> > : DefaultTransformFunctorTraits< LessEqual<int> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct TransformFunctorTraits< LessEqual<float> > : DefaultTransformFunctorTraits< LessEqual<float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <template <typename> class Op, typename T> void compare(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
NotEqual<T> op;
Op<T> op;
transform(static_cast< DevMem2D_<T> >(src1), static_cast< DevMem2D_<T> >(src2), dst, op, stream);
}
void compare_ne_8uc4(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
template <typename T> void compare_eq(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
compare<Equal, T>(src1, src2, dst, stream);
}
template <typename T> void compare_ne(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
compare_ne<uint>(src1, src2, dst, stream);
compare<NotEqual, T>(src1, src2, dst, stream);
}
void compare_ne_32f(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
template <typename T> void compare_lt(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
compare_ne<float>(src1, src2, dst, stream);
compare<Less, T>(src1, src2, dst, stream);
}
template <typename T> void compare_le(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
compare<LessEqual, T>(src1, src2, dst, stream);
}
template void compare_eq<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_eq<schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_eq<ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_eq<short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_eq<int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_eq<float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_eq<double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_ne<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_ne<schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_ne<ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_ne<short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_ne<int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_ne<float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_ne<double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_lt<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_lt<schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_lt<ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_lt<short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_lt<int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_lt<float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_lt<double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_le<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_le<schar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_le<ushort>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_le<short >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_le<int >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_le<float >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void compare_le<double>(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
......@@ -508,21 +1673,6 @@ namespace cv { namespace gpu { namespace device
template void threshold_gpu<double>(const DevMem2D& src, const DevMem2D& dst, double thresh, double maxVal, int type, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// subtract
template <> struct TransformFunctorTraits< minus<short> > : DefaultTransformFunctorTraits< minus<short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T> void subtractCaller(const DevMem2D src1, const DevMem2D src2, DevMem2D dst, cudaStream_t stream)
{
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<T>)dst, minus<T>(), stream);
}
template void subtractCaller<short>(const DevMem2D src1, const DevMem2D src2, DevMem2D dst, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
......@@ -604,151 +1754,7 @@ namespace cv { namespace gpu { namespace device
template void pow_caller<float>(const DevMem2D& src, float power, DevMem2D dst, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// divide
struct divide_8uc4_32f : binary_function<uchar4, float, uchar4>
{
__device__ __forceinline__ uchar4 operator ()(uchar4 a, float b) const
{
return make_uchar4(saturate_cast<uchar>(a.x / b), saturate_cast<uchar>(a.y / b),
saturate_cast<uchar>(a.z / b), saturate_cast<uchar>(a.w / b));
}
};
template <> struct TransformFunctorTraits<divide_8uc4_32f> : DefaultTransformFunctorTraits<divide_8uc4_32f>
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 8 };
};
void divide_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream)
{
transform(static_cast< DevMem2D_<uchar4> >(src1), src2, static_cast< DevMem2D_<uchar4> >(dst), divide_8uc4_32f(), stream);
}
struct divide_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x / b), saturate_cast<uchar>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<uchar>(a.w / b));
}
};
template <> struct TransformFunctorTraits<divide_16sc4_32f> : DefaultTransformFunctorTraits<divide_16sc4_32f>
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 8 };
};
void divide_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream)
{
transform(static_cast< DevMem2D_<short4> >(src1), src2, static_cast< DevMem2D_<short4> >(dst), divide_16sc4_32f(), stream);
}
//////////////////////////////////////////////////////////////////////////
// multiply
template <> struct TransformFunctorTraits< plus<short> > : DefaultTransformFunctorTraits< plus<short> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename T> void add_gpu(const DevMem2D src1, const DevMem2D src2, DevMem2D dst, cudaStream_t stream)
{
transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<T>)dst, plus<T>(), stream);
}
template void add_gpu<short>(const DevMem2D src1, const DevMem2D src2, DevMem2D dst, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// multiply
struct multiply_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
};
template <> struct TransformFunctorTraits<multiply_8uc4_32f> : DefaultTransformFunctorTraits<multiply_8uc4_32f>
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 8 };
};
void multiply_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream)
{
transform(static_cast< DevMem2D_<uint> >(src1), src2, static_cast< DevMem2D_<uint> >(dst), multiply_8uc4_32f(), stream);
}
struct multiply_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
};
template <> struct TransformFunctorTraits<multiply_16sc4_32f> : DefaultTransformFunctorTraits<multiply_16sc4_32f>
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 8 };
};
void multiply_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream)
{
transform(static_cast< DevMem2D_<short4> >(src1), src2,
static_cast< DevMem2D_<short4> >(dst), multiply_16sc4_32f(), stream);
}
//////////////////////////////////////////////////////////////////////////
// multiply (by scalar)
template <typename T, typename D, typename S> struct MultiplyScalar : unary_function<T, D>
{
__host__ __device__ __forceinline__ MultiplyScalar(typename TypeTraits<S>::ParameterType scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::ParameterType a) const
{
return saturate_cast<D>(a * scale);
}
const S scale;
};
template <> struct TransformFunctorTraits< MultiplyScalar<uchar, uchar, float> > : DefaultTransformFunctorTraits< MultiplyScalar<uchar, uchar, float> >
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 8 };
};
template <typename T, typename D>
void multiplyScalar_gpu(const DevMem2D& src, float scale, const DevMem2D& dst, cudaStream_t stream)
{
transform(static_cast< DevMem2D_<T> >(src), static_cast< DevMem2D_<D> >(dst), MultiplyScalar<T, D, float>(scale), stream);
}
template void multiplyScalar_gpu<uchar, uchar>(const DevMem2D& src, float scale, const DevMem2D& dst, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// addWeighted
......
......@@ -47,14 +47,15 @@ using namespace cv::gpu;
#if !defined (HAVE_CUDA)
void cv::gpu::add(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::add(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::subtract(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::subtract(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::multiply(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::multiply(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::divide(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::divide(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::add(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, int, Stream&) { throw_nogpu(); }
void cv::gpu::add(const GpuMat&, const Scalar&, GpuMat&, const GpuMat&, int, Stream&) { throw_nogpu(); }
void cv::gpu::subtract(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, int, Stream&) { throw_nogpu(); }
void cv::gpu::subtract(const GpuMat&, const Scalar&, GpuMat&, const GpuMat&, int, Stream&) { throw_nogpu(); }
void cv::gpu::multiply(const GpuMat&, const GpuMat&, GpuMat&, double, int, Stream&) { throw_nogpu(); }
void cv::gpu::multiply(const GpuMat&, const Scalar&, GpuMat&, double, int, Stream&) { throw_nogpu(); }
void cv::gpu::divide(const GpuMat&, const GpuMat&, GpuMat&, double, int, Stream&) { throw_nogpu(); }
void cv::gpu::divide(const GpuMat&, const Scalar&, GpuMat&, double, int, Stream&) { throw_nogpu(); }
void cv::gpu::divide(double, const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
void cv::gpu::absdiff(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::absdiff(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::compare(const GpuMat&, const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
......@@ -85,151 +86,305 @@ namespace
npp_arithm_8u_t npp_func_8uc1, npp_arithm_8u_t npp_func_8uc4,
npp_arithm_32s_t npp_func_32sc1, npp_arithm_32f_t npp_func_32fc1, cudaStream_t stream)
{
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type());
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1);
dst.create( src1.size(), src1.type() );
NppiSize sz;
sz.width = src1.cols;
sz.width = src1.cols * src1.channels();
sz.height = src1.rows;
NppStreamHandler h(stream);
switch (src1.type())
if (src1.depth() == CV_8U && (src1.cols * src1.channels()) % 4 == 0)
{
case CV_8UC1:
nppSafeCall( npp_func_8uc1(src1.ptr<Npp8u>(), static_cast<int>(src1.step), src2.ptr<Npp8u>(), static_cast<int>(src2.step),
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, 0) );
break;
case CV_8UC4:
sz.width /= 4;
nppSafeCall( npp_func_8uc4(src1.ptr<Npp8u>(), static_cast<int>(src1.step), src2.ptr<Npp8u>(), static_cast<int>(src2.step),
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, 0) );
break;
case CV_32SC1:
}
else if (src1.depth() == CV_8U)
{
nppSafeCall( npp_func_8uc1(src1.ptr<Npp8u>(), static_cast<int>(src1.step), src2.ptr<Npp8u>(), static_cast<int>(src2.step),
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, 0) );
}
else if (src1.depth() == CV_32S)
{
nppSafeCall( npp_func_32sc1(src1.ptr<Npp32s>(), static_cast<int>(src1.step), src2.ptr<Npp32s>(), static_cast<int>(src2.step),
dst.ptr<Npp32s>(), static_cast<int>(dst.step), sz) );
break;
case CV_32FC1:
}
else if (src1.depth() == CV_32F)
{
nppSafeCall( npp_func_32fc1(src1.ptr<Npp32f>(), static_cast<int>(src1.step), src2.ptr<Npp32f>(), static_cast<int>(src2.step),
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
break;
default:
CV_Assert(!"Unsupported source type");
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
template<int SCN> struct NppArithmScalarFunc;
template<> struct NppArithmScalarFunc<1>
{
typedef NppStatus (*func_ptr)(const Npp32f *pSrc, int nSrcStep, Npp32f nValue, Npp32f *pDst, int nDstStep, NppiSize oSizeROI);
};
template<> struct NppArithmScalarFunc<2>
////////////////////////////////////////////////////////////////////////
// add
namespace cv { namespace gpu { namespace device
{
template <typename T, typename D>
void add_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template <typename T, typename D>
void add_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
}}}
void cv::gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, int dtype, Stream& s)
{
using namespace cv::gpu::device;
typedef void (*func_t)(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
static const func_t funcs[7][7] =
{
typedef NppStatus (*func_ptr)(const Npp32fc *pSrc, int nSrcStep, Npp32fc nValue, Npp32fc *pDst, int nDstStep, NppiSize oSizeROI);
{add_gpu<unsigned char, unsigned char>, 0/*add_gpu<unsigned char, signed char>*/, add_gpu<unsigned char, unsigned short>, add_gpu<unsigned char, short>, add_gpu<unsigned char, int>, add_gpu<unsigned char, float>, add_gpu<unsigned char, double>},
{0/*add_gpu<signed char, unsigned char>*/, 0/*add_gpu<signed char, signed char>*/, 0/*add_gpu<signed char, unsigned short>*/, 0/*add_gpu<signed char, short>*/, 0/*add_gpu<signed char, int>*/, 0/*add_gpu<signed char, float>*/, 0/*add_gpu<signed char, double>*/},
{0/*add_gpu<unsigned short, unsigned char>*/, 0/*add_gpu<unsigned short, signed char>*/, add_gpu<unsigned short, unsigned short>, 0/*add_gpu<unsigned short, short>*/, add_gpu<unsigned short, int>, add_gpu<unsigned short, float>, add_gpu<unsigned short, double>},
{0/*add_gpu<short, unsigned char>*/, 0/*add_gpu<short, signed char>*/, 0/*add_gpu<short, unsigned short>*/, add_gpu<short, short>, add_gpu<short, int>, add_gpu<short, float>, add_gpu<short, double>},
{0/*add_gpu<int, unsigned char>*/, 0/*add_gpu<int, signed char>*/, 0/*add_gpu<int, unsigned short>*/, 0/*add_gpu<int, short>*/, add_gpu<int, int>, add_gpu<int, float>, add_gpu<int, double>},
{0/*add_gpu<float, unsigned char>*/, 0/*add_gpu<float, signed char>*/, 0/*add_gpu<float, unsigned short>*/, 0/*add_gpu<float, short>*/, 0/*add_gpu<float, int>*/, add_gpu<float, float>, add_gpu<float, double>},
{0/*add_gpu<double, unsigned char>*/, 0/*add_gpu<double, signed char>*/, 0/*add_gpu<double, unsigned short>*/, 0/*add_gpu<double, short>*/, 0/*add_gpu<double, int>*/, 0/*add_gpu<double, float>*/, add_gpu<double, double>}
};
template<int SCN, typename NppArithmScalarFunc<SCN>::func_ptr func> struct NppArithmScalar;
CV_Assert(src1.type() == src2.type() && src1.size() == src2.size());
CV_Assert(mask.empty() || (src1.channels() == 1 && mask.size() == src1.size() && mask.type() == CV_8U));
if (dtype < 0)
dtype = src1.depth();
dst.create(src1.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src1.channels()));
cudaStream_t stream = StreamAccessor::getStream(s);
template<typename NppArithmScalarFunc<1>::func_ptr func> struct NppArithmScalar<1, func>
if (mask.empty() && dst.type() == src1.type() && (src1.depth() == CV_8U || src1.depth() == CV_32S || src1.depth() == CV_32F))
{
static void calc(const GpuMat& src, const Scalar& sc, GpuMat& dst, cudaStream_t stream)
{
dst.create(src.size(), src.type());
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, nppiAdd_32s_C1R, nppiAdd_32f_C1R, stream);
return;
}
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
const func_t func = funcs[src1.depth()][dst.depth()];
CV_Assert(func != 0);
func(src1.reshape(1), src2.reshape(1), dst.reshape(1), mask, stream);
}
NppStreamHandler h(stream);
void cv::gpu::add(const GpuMat& src, const Scalar& sc, GpuMat& dst, const GpuMat& mask, int dtype, Stream& s)
{
using namespace cv::gpu::device;
nppSafeCall( func(src.ptr<Npp32f>(), static_cast<int>(src.step), (Npp32f)sc[0], dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
typedef void (*func_t)(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
static const func_t funcs[7][7] =
{
{add_gpu<unsigned char, unsigned char>, 0/*add_gpu<unsigned char, signed char>*/, add_gpu<unsigned char, unsigned short>, add_gpu<unsigned char, short>, add_gpu<unsigned char, int>, add_gpu<unsigned char, float>, add_gpu<unsigned char, double>},
{0/*add_gpu<signed char, unsigned char>*/, 0/*add_gpu<signed char, signed char>*/, 0/*add_gpu<signed char, unsigned short>*/, 0/*add_gpu<signed char, short>*/, 0/*add_gpu<signed char, int>*/, 0/*add_gpu<signed char, float>*/, 0/*add_gpu<signed char, double>*/},
{0/*add_gpu<unsigned short, unsigned char>*/, 0/*add_gpu<unsigned short, signed char>*/, add_gpu<unsigned short, unsigned short>, 0/*add_gpu<unsigned short, short>*/, add_gpu<unsigned short, int>, add_gpu<unsigned short, float>, add_gpu<unsigned short, double>},
{0/*add_gpu<short, unsigned char>*/, 0/*add_gpu<short, signed char>*/, 0/*add_gpu<short, unsigned short>*/, add_gpu<short, short>, add_gpu<short, int>, add_gpu<short, float>, add_gpu<short, double>},
{0/*add_gpu<int, unsigned char>*/, 0/*add_gpu<int, signed char>*/, 0/*add_gpu<int, unsigned short>*/, 0/*add_gpu<int, short>*/, add_gpu<int, int>, add_gpu<int, float>, add_gpu<int, double>},
{0/*add_gpu<float, unsigned char>*/, 0/*add_gpu<float, signed char>*/, 0/*add_gpu<float, unsigned short>*/, 0/*add_gpu<float, short>*/, 0/*add_gpu<float, int>*/, add_gpu<float, float>, add_gpu<float, double>},
{0/*add_gpu<double, unsigned char>*/, 0/*add_gpu<double, signed char>*/, 0/*add_gpu<double, unsigned short>*/, 0/*add_gpu<double, short>*/, 0/*add_gpu<double, int>*/, 0/*add_gpu<double, float>*/, add_gpu<double, double>}
};
template<typename NppArithmScalarFunc<2>::func_ptr func> struct NppArithmScalar<2, func>
CV_Assert(src.channels() == 1 || src.type() == CV_32FC2);
CV_Assert(mask.empty() || (src.channels() == 1 && mask.size() == src.size() && mask.type() == CV_8U));
if (dtype < 0)
dtype = src.depth();
dst.create(src.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()));
cudaStream_t stream = StreamAccessor::getStream(s);
if (mask.empty() && dst.type() == src.type() && src.depth() == CV_32F)
{
static void calc(const GpuMat& src, const Scalar& sc, GpuMat& dst, cudaStream_t stream)
{
dst.create(src.size(), src.type());
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
NppStreamHandler h(stream);
if (src.type() == CV_32FC1)
{
nppSafeCall( nppiAddC_32f_C1R(src.ptr<Npp32f>(), static_cast<int>(src.step), static_cast<Npp32f>(sc.val[0]),
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
}
else
{
Npp32fc nValue;
nValue.re = (Npp32f)sc[0];
nValue.im = (Npp32f)sc[1];
nValue.re = static_cast<Npp32f>(sc.val[0]);
nValue.im = static_cast<Npp32f>(sc.val[1]);
nppSafeCall( nppiAddC_32fc_C1R(src.ptr<Npp32fc>(), static_cast<int>(src.step), nValue,
dst.ptr<Npp32fc>(), static_cast<int>(dst.step), sz) );
}
NppStreamHandler h(stream);
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
nppSafeCall( func(src.ptr<Npp32fc>(), static_cast<int>(src.step), nValue, dst.ptr<Npp32fc>(), static_cast<int>(dst.step), sz) );
return;
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
const func_t func = funcs[src.depth()][dst.depth()];
CV_Assert(func != 0);
func(src, sc.val[0], dst, mask, stream);
}
////////////////////////////////////////////////////////////////////////
// subtract
namespace cv { namespace gpu { namespace device
{
template <typename T>
void add_gpu(const DevMem2D src1, const DevMem2D src2, DevMem2D dst, cudaStream_t stream);
template <typename T, typename D>
void subtract_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
template <typename T, typename D>
void subtract_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
}}}
void cv::gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, int dtype, Stream& s)
{
if (src1.depth() == CV_16S && src2.depth() == CV_16S)
using namespace cv::gpu::device;
typedef void (*func_t)(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
static const func_t funcs[7][7] =
{
CV_Assert(src1.size() == src2.size());
dst.create(src1.size(), src1.type());
device::add_gpu<short>(src1.reshape(1), src2.reshape(1), dst.reshape(1), StreamAccessor::getStream(stream));
{subtract_gpu<unsigned char, unsigned char>, 0/*subtract_gpu<unsigned char, signed char>*/, subtract_gpu<unsigned char, unsigned short>, subtract_gpu<unsigned char, short>, subtract_gpu<unsigned char, int>, subtract_gpu<unsigned char, float>, subtract_gpu<unsigned char, double>},
{0/*subtract_gpu<signed char, unsigned char>*/, 0/*subtract_gpu<signed char, signed char>*/, 0/*subtract_gpu<signed char, unsigned short>*/, 0/*subtract_gpu<signed char, short>*/, 0/*subtract_gpu<signed char, int>*/, 0/*subtract_gpu<signed char, float>*/, 0/*subtract_gpu<signed char, double>*/},
{0/*subtract_gpu<unsigned short, unsigned char>*/, 0/*subtract_gpu<unsigned short, signed char>*/, subtract_gpu<unsigned short, unsigned short>, 0/*subtract_gpu<unsigned short, short>*/, subtract_gpu<unsigned short, int>, subtract_gpu<unsigned short, float>, subtract_gpu<unsigned short, double>},
{0/*subtract_gpu<short, unsigned char>*/, 0/*subtract_gpu<short, signed char>*/, 0/*subtract_gpu<short, unsigned short>*/, subtract_gpu<short, short>, subtract_gpu<short, int>, subtract_gpu<short, float>, subtract_gpu<short, double>},
{0/*subtract_gpu<int, unsigned char>*/, 0/*subtract_gpu<int, signed char>*/, 0/*subtract_gpu<int, unsigned short>*/, 0/*subtract_gpu<int, short>*/, subtract_gpu<int, int>, subtract_gpu<int, float>, subtract_gpu<int, double>},
{0/*subtract_gpu<float, unsigned char>*/, 0/*subtract_gpu<float, signed char>*/, 0/*subtract_gpu<float, unsigned short>*/, 0/*subtract_gpu<float, short>*/, 0/*subtract_gpu<float, int>*/, subtract_gpu<float, float>, subtract_gpu<float, double>},
{0/*subtract_gpu<double, unsigned char>*/, 0/*subtract_gpu<double, signed char>*/, 0/*subtract_gpu<double, unsigned short>*/, 0/*subtract_gpu<double, short>*/, 0/*subtract_gpu<double, int>*/, 0/*subtract_gpu<double, float>*/, subtract_gpu<double, double>}
};
CV_Assert(src1.type() == src2.type() && src1.size() == src2.size());
CV_Assert(mask.empty() || (src1.channels() == 1 && mask.size() == src1.size() && mask.type() == CV_8U));
if (dtype < 0)
dtype = src1.depth();
dst.create(src1.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src1.channels()));
cudaStream_t stream = StreamAccessor::getStream(s);
if (mask.empty() && dst.type() == src1.type() && (src1.depth() == CV_8U || src1.depth() == CV_32S || src1.depth() == CV_32F))
{
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, nppiSub_32s_C1R, nppiSub_32f_C1R, stream);
return;
}
else
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, nppiAdd_32s_C1R, nppiAdd_32f_C1R, StreamAccessor::getStream(stream));
const func_t func = funcs[src1.depth()][dst.depth()];
CV_Assert(func != 0);
func(src1.reshape(1), src2.reshape(1), dst.reshape(1), mask, stream);
}
namespace cv { namespace gpu { namespace device
void cv::gpu::subtract(const GpuMat& src, const Scalar& sc, GpuMat& dst, const GpuMat& mask, int dtype, Stream& s)
{
template <typename T>
void subtractCaller(const DevMem2D src1, const DevMem2D src2, DevMem2D dst, cudaStream_t stream);
}}}
using namespace cv::gpu::device;
void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
{
if (src1.depth() == CV_16S && src2.depth() == CV_16S)
typedef void (*func_t)(const DevMem2D& src1, double val, const DevMem2D& dst, const PtrStep& mask, cudaStream_t stream);
static const func_t funcs[7][7] =
{
CV_Assert(src1.size() == src2.size());
dst.create(src1.size(), src1.type());
device::subtractCaller<short>(src1.reshape(1), src2.reshape(1), dst.reshape(1), StreamAccessor::getStream(stream));
{subtract_gpu<unsigned char, unsigned char>, 0/*subtract_gpu<unsigned char, signed char>*/, subtract_gpu<unsigned char, unsigned short>, subtract_gpu<unsigned char, short>, subtract_gpu<unsigned char, int>, subtract_gpu<unsigned char, float>, subtract_gpu<unsigned char, double>},
{0/*subtract_gpu<signed char, unsigned char>*/, 0/*subtract_gpu<signed char, signed char>*/, 0/*subtract_gpu<signed char, unsigned short>*/, 0/*subtract_gpu<signed char, short>*/, 0/*subtract_gpu<signed char, int>*/, 0/*subtract_gpu<signed char, float>*/, 0/*subtract_gpu<signed char, double>*/},
{0/*subtract_gpu<unsigned short, unsigned char>*/, 0/*subtract_gpu<unsigned short, signed char>*/, subtract_gpu<unsigned short, unsigned short>, 0/*subtract_gpu<unsigned short, short>*/, subtract_gpu<unsigned short, int>, subtract_gpu<unsigned short, float>, subtract_gpu<unsigned short, double>},
{0/*subtract_gpu<short, unsigned char>*/, 0/*subtract_gpu<short, signed char>*/, 0/*subtract_gpu<short, unsigned short>*/, subtract_gpu<short, short>, subtract_gpu<short, int>, subtract_gpu<short, float>, subtract_gpu<short, double>},
{0/*subtract_gpu<int, unsigned char>*/, 0/*subtract_gpu<int, signed char>*/, 0/*subtract_gpu<int, unsigned short>*/, 0/*subtract_gpu<int, short>*/, subtract_gpu<int, int>, subtract_gpu<int, float>, subtract_gpu<int, double>},
{0/*subtract_gpu<float, unsigned char>*/, 0/*subtract_gpu<float, signed char>*/, 0/*subtract_gpu<float, unsigned short>*/, 0/*subtract_gpu<float, short>*/, 0/*subtract_gpu<float, int>*/, subtract_gpu<float, float>, subtract_gpu<float, double>},
{0/*subtract_gpu<double, unsigned char>*/, 0/*subtract_gpu<double, signed char>*/, 0/*subtract_gpu<double, unsigned short>*/, 0/*subtract_gpu<double, short>*/, 0/*subtract_gpu<double, int>*/, 0/*subtract_gpu<double, float>*/, subtract_gpu<double, double>}
};
CV_Assert(src.channels() == 1 || src.type() == CV_32FC2);
CV_Assert(mask.empty() || (src.channels() == 1 && mask.size() == src.size() && mask.type() == CV_8U));
if (dtype < 0)
dtype = src.depth();
dst.create(src.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()));
cudaStream_t stream = StreamAccessor::getStream(s);
if (mask.empty() && dst.type() == src.type() && src.depth() == CV_32F)
{
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
NppStreamHandler h(stream);
if (src.type() == CV_32FC1)
{
nppSafeCall( nppiSubC_32f_C1R(src.ptr<Npp32f>(), static_cast<int>(src.step), static_cast<Npp32f>(sc.val[0]),
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
}
else
{
Npp32fc nValue;
nValue.re = static_cast<Npp32f>(sc.val[0]);
nValue.im = static_cast<Npp32f>(sc.val[1]);
nppSafeCall( nppiSubC_32fc_C1R(src.ptr<Npp32fc>(), static_cast<int>(src.step), nValue,
dst.ptr<Npp32fc>(), static_cast<int>(dst.step), sz) );
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
return;
}
else
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, nppiSub_32s_C1R, nppiSub_32f_C1R, StreamAccessor::getStream(stream));
const func_t func = funcs[src.depth()][dst.depth()];
CV_Assert(func != 0);
func(src, sc.val[0], dst, mask, stream);
}
////////////////////////////////////////////////////////////////////////
// multiply
namespace cv { namespace gpu { namespace device
{
void multiply_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream);
void multiply_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream);
template <typename T, typename D>
void multiplyScalar_gpu(const DevMem2D& src, float scale, const DevMem2D& dst, cudaStream_t stream);
template <typename T, typename D>
void multiply_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template <typename T, typename D>
void multiply_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
}}}
void cv::gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
void cv::gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double scale, int dtype, Stream& s)
{
using namespace cv::gpu::device;
typedef void (*func_t)(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
static const func_t funcs[7][7] =
{
{multiply_gpu<unsigned char, unsigned char>, 0/*multiply_gpu<unsigned char, signed char>*/, multiply_gpu<unsigned char, unsigned short>, multiply_gpu<unsigned char, short>, multiply_gpu<unsigned char, int>, multiply_gpu<unsigned char, float>, multiply_gpu<unsigned char, double>},
{0/*multiply_gpu<signed char, unsigned char>*/, 0/*multiply_gpu<signed char, signed char>*/, 0/*multiply_gpu<signed char, unsigned short>*/, 0/*multiply_gpu<signed char, short>*/, 0/*multiply_gpu<signed char, int>*/, 0/*multiply_gpu<signed char, float>*/, 0/*multiply_gpu<signed char, double>*/},
{0/*multiply_gpu<unsigned short, unsigned char>*/, 0/*multiply_gpu<unsigned short, signed char>*/, multiply_gpu<unsigned short, unsigned short>, 0/*multiply_gpu<unsigned short, short>*/, multiply_gpu<unsigned short, int>, multiply_gpu<unsigned short, float>, multiply_gpu<unsigned short, double>},
{0/*multiply_gpu<short, unsigned char>*/, 0/*multiply_gpu<short, signed char>*/, 0/*multiply_gpu<short, unsigned short>*/, multiply_gpu<short, short>, multiply_gpu<short, int>, multiply_gpu<short, float>, multiply_gpu<short, double>},
{0/*multiply_gpu<int, unsigned char>*/, 0/*multiply_gpu<int, signed char>*/, 0/*multiply_gpu<int, unsigned short>*/, 0/*multiply_gpu<int, short>*/, multiply_gpu<int, int>, multiply_gpu<int, float>, multiply_gpu<int, double>},
{0/*multiply_gpu<float, unsigned char>*/, 0/*multiply_gpu<float, signed char>*/, 0/*multiply_gpu<float, unsigned short>*/, 0/*multiply_gpu<float, short>*/, 0/*multiply_gpu<float, int>*/, multiply_gpu<float, float>, multiply_gpu<float, double>},
{0/*multiply_gpu<double, unsigned char>*/, 0/*multiply_gpu<double, signed char>*/, 0/*multiply_gpu<double, unsigned short>*/, 0/*multiply_gpu<double, short>*/, 0/*multiply_gpu<double, int>*/, 0/*multiply_gpu<double, float>*/, multiply_gpu<double, double>}
};
cudaStream_t stream = StreamAccessor::getStream(s);
if (src1.type() == CV_8UC4 && src2.type() == CV_32FC1)
{
CV_Assert(src1.size() == src2.size());
dst.create(src1.size(), src1.type());
device::multiply_gpu(static_cast<DevMem2D_<uchar4> >(src1), static_cast<DevMem2Df>(src2),
static_cast<DevMem2D_<uchar4> >(dst), StreamAccessor::getStream(stream));
multiply_gpu(static_cast<DevMem2D_<uchar4> >(src1), static_cast<DevMem2Df>(src2), static_cast<DevMem2D_<uchar4> >(dst), stream);
}
else if (src1.type() == CV_16SC4 && src2.type() == CV_32FC1)
{
......@@ -237,60 +392,124 @@ void cv::gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stre
dst.create(src1.size(), src1.type());
device::multiply_gpu(static_cast<DevMem2D_<short4> >(src1), static_cast<DevMem2Df>(src2),
static_cast<DevMem2D_<short4> >(dst), StreamAccessor::getStream(stream));
multiply_gpu(static_cast<DevMem2D_<short4> >(src1), static_cast<DevMem2Df>(src2), static_cast<DevMem2D_<short4> >(dst), stream);
}
else
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, nppiMul_32s_C1R, nppiMul_32f_C1R, StreamAccessor::getStream(stream));
{
CV_Assert(src1.type() == src2.type() && src1.size() == src2.size());
if (dtype < 0)
dtype = src1.depth();
dst.create(src1.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src1.channels()));
if (scale == 1 && dst.type() == src1.type() && (src1.depth() == CV_8U || src1.depth() == CV_32S || src1.depth() == CV_32F))
{
nppArithmCaller(src2, src1, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, nppiMul_32s_C1R, nppiMul_32f_C1R, stream);
return;
}
const func_t func = funcs[src1.depth()][dst.depth()];
CV_Assert(func != 0);
func(src1.reshape(1), src2.reshape(1), dst.reshape(1), scale, stream);
}
}
void cv::gpu::multiply(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream)
void cv::gpu::multiply(const GpuMat& src, const Scalar& sc, GpuMat& dst, double scale, int dtype, Stream& s)
{
if (src.depth() == CV_8U)
{
dst.create(src.size(), src.type());
using namespace cv::gpu::device;
device::multiplyScalar_gpu<unsigned char, unsigned char>(src.reshape(1), (float)(sc[0]), dst, StreamAccessor::getStream(stream));
}
else
typedef void (*func_t)(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
static const func_t funcs[7][7] =
{
CV_Assert(src.type() == CV_32FC1);
{multiply_gpu<unsigned char, unsigned char>, 0/*multiply_gpu<unsigned char, signed char>*/, multiply_gpu<unsigned char, unsigned short>, multiply_gpu<unsigned char, short>, multiply_gpu<unsigned char, int>, multiply_gpu<unsigned char, float>, multiply_gpu<unsigned char, double>},
{0/*multiply_gpu<signed char, unsigned char>*/, 0/*multiply_gpu<signed char, signed char>*/, 0/*multiply_gpu<signed char, unsigned short>*/, 0/*multiply_gpu<signed char, short>*/, 0/*multiply_gpu<signed char, int>*/, 0/*multiply_gpu<signed char, float>*/, 0/*multiply_gpu<signed char, double>*/},
{0/*multiply_gpu<unsigned short, unsigned char>*/, 0/*multiply_gpu<unsigned short, signed char>*/, multiply_gpu<unsigned short, unsigned short>, 0/*multiply_gpu<unsigned short, short>*/, multiply_gpu<unsigned short, int>, multiply_gpu<unsigned short, float>, multiply_gpu<unsigned short, double>},
{0/*multiply_gpu<short, unsigned char>*/, 0/*multiply_gpu<short, signed char>*/, 0/*multiply_gpu<short, unsigned short>*/, multiply_gpu<short, short>, multiply_gpu<short, int>, multiply_gpu<short, float>, multiply_gpu<short, double>},
{0/*multiply_gpu<int, unsigned char>*/, 0/*multiply_gpu<int, signed char>*/, 0/*multiply_gpu<int, unsigned short>*/, 0/*multiply_gpu<int, short>*/, multiply_gpu<int, int>, multiply_gpu<int, float>, multiply_gpu<int, double>},
{0/*multiply_gpu<float, unsigned char>*/, 0/*multiply_gpu<float, signed char>*/, 0/*multiply_gpu<float, unsigned short>*/, 0/*multiply_gpu<float, short>*/, 0/*multiply_gpu<float, int>*/, multiply_gpu<float, float>, multiply_gpu<float, double>},
{0/*multiply_gpu<double, unsigned char>*/, 0/*multiply_gpu<double, signed char>*/, 0/*multiply_gpu<double, unsigned short>*/, 0/*multiply_gpu<double, short>*/, 0/*multiply_gpu<double, int>*/, 0/*multiply_gpu<double, float>*/, multiply_gpu<double, double>}
};
dst.create(src.size(), src.type());
CV_Assert(src.channels() == 1);
if (dtype < 0)
dtype = src.depth();
dst.create(src.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()));
cudaStream_t stream = StreamAccessor::getStream(s);
if (dst.type() == src.type() && src.type() == CV_32FC1 && scale == 1)
{
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
cudaStream_t cudaStream = StreamAccessor::getStream(stream);
NppStreamHandler h(cudaStream);
NppStreamHandler h(stream);
nppSafeCall( nppiMulC_32f_C1R(src.ptr<Npp32f>(), static_cast<int>(src.step), (Npp32f)sc[0], dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
nppSafeCall( nppiMulC_32f_C1R(src.ptr<Npp32f>(), static_cast<int>(src.step), static_cast<Npp32f>(sc.val[0]),
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
if (cudaStream == 0)
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
return;
}
const func_t func = funcs[src.depth()][dst.depth()];
CV_Assert(func != 0);
func(src, sc.val[0], dst, scale, stream);
}
////////////////////////////////////////////////////////////////////////
// divide
namespace cv { namespace gpu { namespace device
{
void divide_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream);
void divide_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream);
template <typename T, typename D>
void divide_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
template <typename T, typename D>
void divide_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
template <typename T, typename D>
void divide_gpu(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
}}}
void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double scale, int dtype, Stream& s)
{
using namespace cv::gpu::device;
typedef void (*func_t)(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, double scale, cudaStream_t stream);
static const func_t funcs[7][7] =
{
{divide_gpu<unsigned char, unsigned char>, 0/*divide_gpu<unsigned char, signed char>*/, divide_gpu<unsigned char, unsigned short>, divide_gpu<unsigned char, short>, divide_gpu<unsigned char, int>, divide_gpu<unsigned char, float>, divide_gpu<unsigned char, double>},
{0/*divide_gpu<signed char, unsigned char>*/, 0/*divide_gpu<signed char, signed char>*/, 0/*divide_gpu<signed char, unsigned short>*/, 0/*divide_gpu<signed char, short>*/, 0/*divide_gpu<signed char, int>*/, 0/*divide_gpu<signed char, float>*/, 0/*divide_gpu<signed char, double>*/},
{0/*divide_gpu<unsigned short, unsigned char>*/, 0/*divide_gpu<unsigned short, signed char>*/, divide_gpu<unsigned short, unsigned short>, 0/*divide_gpu<unsigned short, short>*/, divide_gpu<unsigned short, int>, divide_gpu<unsigned short, float>, divide_gpu<unsigned short, double>},
{0/*divide_gpu<short, unsigned char>*/, 0/*divide_gpu<short, signed char>*/, 0/*divide_gpu<short, unsigned short>*/, divide_gpu<short, short>, divide_gpu<short, int>, divide_gpu<short, float>, divide_gpu<short, double>},
{0/*divide_gpu<int, unsigned char>*/, 0/*divide_gpu<int, signed char>*/, 0/*divide_gpu<int, unsigned short>*/, 0/*divide_gpu<int, short>*/, divide_gpu<int, int>, divide_gpu<int, float>, divide_gpu<int, double>},
{0/*divide_gpu<float, unsigned char>*/, 0/*divide_gpu<float, signed char>*/, 0/*divide_gpu<float, unsigned short>*/, 0/*divide_gpu<float, short>*/, 0/*divide_gpu<float, int>*/, divide_gpu<float, float>, divide_gpu<float, double>},
{0/*divide_gpu<double, unsigned char>*/, 0/*divide_gpu<double, signed char>*/, 0/*divide_gpu<double, unsigned short>*/, 0/*divide_gpu<double, short>*/, 0/*divide_gpu<double, int>*/, 0/*divide_gpu<double, float>*/, divide_gpu<double, double>}
};
cudaStream_t stream = StreamAccessor::getStream(s);
if (src1.type() == CV_8UC4 && src2.type() == CV_32FC1)
{
CV_Assert(src1.size() == src2.size());
dst.create(src1.size(), src1.type());
device::divide_gpu(static_cast<DevMem2D_<uchar4> >(src1), static_cast<DevMem2Df>(src2),
static_cast<DevMem2D_<uchar4> >(dst), StreamAccessor::getStream(stream));
multiply_gpu(static_cast<DevMem2D_<uchar4> >(src1), static_cast<DevMem2Df>(src2), static_cast<DevMem2D_<uchar4> >(dst), stream);
}
else if (src1.type() == CV_16SC4 && src2.type() == CV_32FC1)
{
......@@ -298,117 +517,236 @@ void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream
dst.create(src1.size(), src1.type());
device::divide_gpu(static_cast<DevMem2D_<short4> >(src1), static_cast<DevMem2Df>(src2),
static_cast<DevMem2D_<short4> >(dst), StreamAccessor::getStream(stream));
multiply_gpu(static_cast<DevMem2D_<short4> >(src1), static_cast<DevMem2Df>(src2), static_cast<DevMem2D_<short4> >(dst), stream);
}
else
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, nppiDiv_32s_C1R, nppiDiv_32f_C1R, StreamAccessor::getStream(stream));
}
{
CV_Assert(src1.type() == src2.type() && src1.size() == src2.size());
void cv::gpu::add(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream)
{
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst, cudaStream_t stream);
static const caller_t callers[] = {0, NppArithmScalar<1, nppiAddC_32f_C1R>::calc, NppArithmScalar<2, nppiAddC_32fc_C1R>::calc};
if (dtype < 0)
dtype = src1.depth();
dst.create(src1.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src1.channels()));
if (scale == 1 && dst.type() == src1.type() && (src1.depth() == CV_8U || src1.depth() == CV_32S || src1.depth() == CV_32F))
{
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, nppiDiv_32s_C1R, nppiDiv_32f_C1R, stream);
return;
}
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
const func_t func = funcs[src1.depth()][dst.depth()];
CV_Assert(func != 0);
callers[src.channels()](src, sc, dst, StreamAccessor::getStream(stream));
func(src1.reshape(1), src2.reshape(1), dst.reshape(1), scale, stream);
}
}
void cv::gpu::subtract(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream)
void cv::gpu::divide(const GpuMat& src, const Scalar& sc, GpuMat& dst, double scale, int dtype, Stream& s)
{
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst, cudaStream_t stream);
static const caller_t callers[] = {0, NppArithmScalar<1, nppiSubC_32f_C1R>::calc, NppArithmScalar<2, nppiSubC_32fc_C1R>::calc};
using namespace cv::gpu::device;
typedef void (*func_t)(const DevMem2D& src1, double val, const DevMem2D& dst, double scale, cudaStream_t stream);
static const func_t funcs[7][7] =
{
{divide_gpu<unsigned char, unsigned char>, 0/*divide_gpu<unsigned char, signed char>*/, divide_gpu<unsigned char, unsigned short>, divide_gpu<unsigned char, short>, divide_gpu<unsigned char, int>, divide_gpu<unsigned char, float>, divide_gpu<unsigned char, double>},
{0/*divide_gpu<signed char, unsigned char>*/, 0/*divide_gpu<signed char, signed char>*/, 0/*divide_gpu<signed char, unsigned short>*/, 0/*divide_gpu<signed char, short>*/, 0/*divide_gpu<signed char, int>*/, 0/*divide_gpu<signed char, float>*/, 0/*divide_gpu<signed char, double>*/},
{0/*divide_gpu<unsigned short, unsigned char>*/, 0/*divide_gpu<unsigned short, signed char>*/, divide_gpu<unsigned short, unsigned short>, 0/*divide_gpu<unsigned short, short>*/, divide_gpu<unsigned short, int>, divide_gpu<unsigned short, float>, divide_gpu<unsigned short, double>},
{0/*divide_gpu<short, unsigned char>*/, 0/*divide_gpu<short, signed char>*/, 0/*divide_gpu<short, unsigned short>*/, divide_gpu<short, short>, divide_gpu<short, int>, divide_gpu<short, float>, divide_gpu<short, double>},
{0/*divide_gpu<int, unsigned char>*/, 0/*divide_gpu<int, signed char>*/, 0/*divide_gpu<int, unsigned short>*/, 0/*divide_gpu<int, short>*/, divide_gpu<int, int>, divide_gpu<int, float>, divide_gpu<int, double>},
{0/*divide_gpu<float, unsigned char>*/, 0/*divide_gpu<float, signed char>*/, 0/*divide_gpu<float, unsigned short>*/, 0/*divide_gpu<float, short>*/, 0/*divide_gpu<float, int>*/, divide_gpu<float, float>, divide_gpu<float, double>},
{0/*divide_gpu<double, unsigned char>*/, 0/*divide_gpu<double, signed char>*/, 0/*divide_gpu<double, unsigned short>*/, 0/*divide_gpu<double, short>*/, 0/*divide_gpu<double, int>*/, 0/*divide_gpu<double, float>*/, divide_gpu<double, double>}
};
CV_Assert(src.channels() == 1);
if (dtype < 0)
dtype = src.depth();
dst.create(src.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()));
cudaStream_t stream = StreamAccessor::getStream(s);
if (dst.type() == src.type() && src.type() == CV_32FC1 && scale == 1)
{
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
NppStreamHandler h(stream);
nppSafeCall( nppiDivC_32f_C1R(src.ptr<Npp32f>(), static_cast<int>(src.step), static_cast<Npp32f>(sc.val[0]),
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
return;
}
const func_t func = funcs[src.depth()][dst.depth()];
CV_Assert(func != 0);
callers[src.channels()](src, sc, dst, StreamAccessor::getStream(stream));
func(src, sc.val[0], dst, scale, stream);
}
void cv::gpu::divide(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream)
void cv::gpu::divide(double scale, const GpuMat& src, GpuMat& dst, int dtype, Stream& s)
{
CV_Assert(src.type() == CV_32FC1);
using namespace cv::gpu::device;
dst.create(src.size(), src.type());
typedef void (*func_t)(double scalar, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
static const func_t funcs[7][7] =
{
{divide_gpu<unsigned char, unsigned char>, 0/*divide_gpu<unsigned char, signed char>*/, divide_gpu<unsigned char, unsigned short>, divide_gpu<unsigned char, short>, divide_gpu<unsigned char, int>, divide_gpu<unsigned char, float>, divide_gpu<unsigned char, double>},
{0/*divide_gpu<signed char, unsigned char>*/, 0/*divide_gpu<signed char, signed char>*/, 0/*divide_gpu<signed char, unsigned short>*/, 0/*divide_gpu<signed char, short>*/, 0/*divide_gpu<signed char, int>*/, 0/*divide_gpu<signed char, float>*/, 0/*divide_gpu<signed char, double>*/},
{0/*divide_gpu<unsigned short, unsigned char>*/, 0/*divide_gpu<unsigned short, signed char>*/, divide_gpu<unsigned short, unsigned short>, 0/*divide_gpu<unsigned short, short>*/, divide_gpu<unsigned short, int>, divide_gpu<unsigned short, float>, divide_gpu<unsigned short, double>},
{0/*divide_gpu<short, unsigned char>*/, 0/*divide_gpu<short, signed char>*/, 0/*divide_gpu<short, unsigned short>*/, divide_gpu<short, short>, divide_gpu<short, int>, divide_gpu<short, float>, divide_gpu<short, double>},
{0/*divide_gpu<int, unsigned char>*/, 0/*divide_gpu<int, signed char>*/, 0/*divide_gpu<int, unsigned short>*/, 0/*divide_gpu<int, short>*/, divide_gpu<int, int>, divide_gpu<int, float>, divide_gpu<int, double>},
{0/*divide_gpu<float, unsigned char>*/, 0/*divide_gpu<float, signed char>*/, 0/*divide_gpu<float, unsigned short>*/, 0/*divide_gpu<float, short>*/, 0/*divide_gpu<float, int>*/, divide_gpu<float, float>, divide_gpu<float, double>},
{0/*divide_gpu<double, unsigned char>*/, 0/*divide_gpu<double, signed char>*/, 0/*divide_gpu<double, unsigned short>*/, 0/*divide_gpu<double, short>*/, 0/*divide_gpu<double, int>*/, 0/*divide_gpu<double, float>*/, divide_gpu<double, double>}
};
cudaStream_t cudaStream = StreamAccessor::getStream(stream);
CV_Assert(src.channels() == 1);
NppStreamHandler h(cudaStream);
if (dtype < 0)
dtype = src.depth();
nppSafeCall( nppiDivC_32f_C1R(src.ptr<Npp32f>(), static_cast<int>(src.step), (Npp32f)sc[0], dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
dst.create(src.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()));
if (cudaStream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
cudaStream_t stream = StreamAccessor::getStream(s);
const func_t func = funcs[src.depth()][dst.depth()];
CV_Assert(func != 0);
func(scale, src, dst, stream);
}
//////////////////////////////////////////////////////////////////////////////
// Absolute difference
// absdiff
namespace cv { namespace gpu { namespace device
{
template <typename T>
void absdiff_gpu(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template <typename T>
void absdiff_gpu(const DevMem2D& src1, double val, const DevMem2D& dst, cudaStream_t stream);
}}}
void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& s)
{
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type());
using namespace cv::gpu::device;
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1);
typedef void (*func_t)(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
static const func_t funcs[] =
{
0/*absdiff_gpu<unsigned char>*/, absdiff_gpu<signed char>, absdiff_gpu<unsigned short>, absdiff_gpu<short>, 0/*absdiff_gpu<int>*/, 0/*absdiff_gpu<float>*/, absdiff_gpu<double>
};
CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());
dst.create( src1.size(), src1.type() );
cudaStream_t stream = StreamAccessor::getStream(s);
NppiSize sz;
sz.width = src1.cols;
sz.width = src1.cols * src1.channels();
sz.height = src1.rows;
cudaStream_t stream = StreamAccessor::getStream(s);
if (src1.depth() == CV_8U && (src1.cols * src1.channels()) % 4 == 0)
{
NppStreamHandler h(stream);
NppStreamHandler h(stream);
sz.width /= 4;
switch (src1.type())
nppSafeCall( nppiAbsDiff_8u_C4R(src1.ptr<Npp8u>(), static_cast<int>(src1.step), src2.ptr<Npp8u>(), static_cast<int>(src2.step),
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
else if (src1.depth() == CV_8U)
{
case CV_8UC1:
NppStreamHandler h(stream);
nppSafeCall( nppiAbsDiff_8u_C1R(src1.ptr<Npp8u>(), static_cast<int>(src1.step), src2.ptr<Npp8u>(), static_cast<int>(src2.step),
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz) );
break;
case CV_8UC4:
nppSafeCall( nppiAbsDiff_8u_C4R(src1.ptr<Npp8u>(), static_cast<int>(src1.step), src2.ptr<Npp8u>(), static_cast<int>(src2.step),
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz) );
break;
case CV_32SC1:
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
else if (src1.depth() == CV_32S)
{
NppStreamHandler h(stream);
nppSafeCall( nppiAbsDiff_32s_C1R(src1.ptr<Npp32s>(), static_cast<int>(src1.step), src2.ptr<Npp32s>(), static_cast<int>(src2.step),
dst.ptr<Npp32s>(), static_cast<int>(dst.step), sz) );
break;
case CV_32FC1:
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
else if (src1.depth() == CV_32F)
{
NppStreamHandler h(stream);
nppSafeCall( nppiAbsDiff_32f_C1R(src1.ptr<Npp32f>(), static_cast<int>(src1.step), src2.ptr<Npp32f>(), static_cast<int>(src2.step),
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
break;
default:
CV_Assert(!"Unsupported source type");
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
else
{
const func_t func = funcs[src1.depth()];
CV_Assert(func != 0);
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
func(src1.reshape(1), src2.reshape(1), dst.reshape(1), stream);
}
}
void cv::gpu::absdiff(const GpuMat& src1, const Scalar& src2, GpuMat& dst, Stream& s)
{
CV_Assert(src1.type() == CV_32FC1);
using namespace cv::gpu::device;
dst.create( src1.size(), src1.type() );
typedef void (*func_t)(const DevMem2D& src1, double val, const DevMem2D& dst, cudaStream_t stream);
NppiSize sz;
sz.width = src1.cols;
sz.height = src1.rows;
static const func_t funcs[] =
{
absdiff_gpu<unsigned char>, absdiff_gpu<signed char>, absdiff_gpu<unsigned short>, absdiff_gpu<short>,absdiff_gpu<int>, 0/*absdiff_gpu<float>*/, absdiff_gpu<double>
};
CV_Assert(src1.channels() == 1);
dst.create(src1.size(), src1.type());
cudaStream_t stream = StreamAccessor::getStream(s);
NppStreamHandler h(stream);
if (src1.type() == CV_32FC1)
{
NppiSize sz;
sz.width = src1.cols;
sz.height = src1.rows;
cudaStream_t stream = StreamAccessor::getStream(s);
nppSafeCall( nppiAbsDiffC_32f_C1R(src1.ptr<Npp32f>(), static_cast<int>(src1.step), dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz, (Npp32f)src2[0]) );
NppStreamHandler h(stream);
nppSafeCall( nppiAbsDiffC_32f_C1R(src1.ptr<Npp32f>(), static_cast<int>(src1.step),
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz, static_cast<Npp32f>(src2.val[0])) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
return;
}
const func_t func = funcs[src1.depth()];
CV_Assert(func != 0);
func(src1, src2.val[0], dst, stream);
}
......@@ -417,62 +755,74 @@ void cv::gpu::absdiff(const GpuMat& src1, const Scalar& src2, GpuMat& dst, Strea
namespace cv { namespace gpu { namespace device
{
void compare_ne_8uc4(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
void compare_ne_32f(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template <typename T> void compare_eq(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template <typename T> void compare_ne(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template <typename T> void compare_lt(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template <typename T> void compare_le(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
}}}
void cv::gpu::compare(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int cmpop, Stream& s)
void cv::gpu::compare(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int cmpop, Stream& stream)
{
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type());
CV_Assert(src1.type() == CV_8UC4 || src1.type() == CV_32FC1);
dst.create( src1.size(), CV_8UC1 );
static const NppCmpOp nppCmpOp[] = { NPP_CMP_EQ, NPP_CMP_GREATER, NPP_CMP_GREATER_EQ, NPP_CMP_LESS, NPP_CMP_LESS_EQ };
NppiSize sz;
sz.width = src1.cols;
sz.height = src1.rows;
using namespace cv::gpu::device;
cudaStream_t stream = StreamAccessor::getStream(s);
typedef void (*func_t)(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
if (src1.type() == CV_8UC4)
static const func_t funcs[7][4] =
{
if (cmpop != CMP_NE)
{
NppStreamHandler h(stream);
{compare_eq<unsigned char>, compare_ne<unsigned char>, compare_lt<unsigned char>, compare_le<unsigned char>},
{compare_eq<signed char>, compare_ne<signed char>, compare_lt<signed char>, compare_le<signed char>},
{compare_eq<unsigned short>, compare_ne<unsigned short>, compare_lt<unsigned short>, compare_le<unsigned short>},
{compare_eq<short>, compare_ne<short>, compare_lt<short>, compare_le<short>},
{compare_eq<int>, compare_ne<int>, compare_lt<int>, compare_le<int>},
{compare_eq<float>, compare_ne<float>, compare_lt<float>, compare_le<float>},
{compare_eq<double>, compare_ne<double>, compare_lt<double>, compare_le<double>}
};
nppSafeCall( nppiCompare_8u_C4R(src1.ptr<Npp8u>(), static_cast<int>(src1.step),
src2.ptr<Npp8u>(), static_cast<int>(src2.step),
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, nppCmpOp[cmpop]) );
CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
else
{
device::compare_ne_8uc4(src1, src2, dst, stream);
}
}
else
int code;
const GpuMat* psrc1;
const GpuMat* psrc2;
switch (cmpop)
{
if (cmpop != CMP_NE)
{
NppStreamHandler h(stream);
case CMP_EQ:
code = 0;
psrc1 = &src1;
psrc2 = &src2;
break;
case CMP_GE:
code = 3;
psrc1 = &src2;
psrc2 = &src1;
break;
case CMP_GT:
code = 2;
psrc1 = &src2;
psrc2 = &src1;
break;
case CMP_LE:
code = 3;
psrc1 = &src1;
psrc2 = &src2;
break;
case CMP_LT:
code = 2;
psrc1 = &src1;
psrc2 = &src2;
break;
case CMP_NE:
code = 1;
psrc1 = &src1;
psrc2 = &src2;
break;
default:
CV_Error(CV_StsBadFlag, "Incorrect compare operation");
};
nppSafeCall( nppiCompare_32f_C1R(src1.ptr<Npp32f>(), static_cast<int>(src1.step),
src2.ptr<Npp32f>(), static_cast<int>(src2.step),
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, nppCmpOp[cmpop]) );
dst.create(src1.size(), CV_MAKE_TYPE(CV_8U, src1.channels()));
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
else
{
device::compare_ne_32f(src1, src2, dst, stream);
}
}
funcs[src1.depth()][code](psrc1->reshape(1), psrc2->reshape(1), dst.reshape(1), StreamAccessor::getStream(stream));
}
......
......@@ -639,17 +639,17 @@ void cv::gpu::morphologyEx(const GpuMat& src, GpuMat& dst, int op, const Mat& ke
case CV_MOP_GRADIENT:
erode(src, buf2, kernel, buf1, anchor, iterations, stream);
dilate(src, dst, kernel, buf1, anchor, iterations, stream);
subtract(dst, buf2, dst, stream);
subtract(dst, buf2, dst, GpuMat(), -1, stream);
break;
case CV_MOP_TOPHAT:
erode(src, dst, kernel, buf1, anchor, iterations, stream);
dilate(dst, buf2, kernel, buf1, anchor, iterations, stream);
subtract(src, buf2, dst, stream);
subtract(src, buf2, dst, GpuMat(), -1, stream);
break;
case CV_MOP_BLACKHAT:
dilate(src, dst, kernel, buf1, anchor, iterations, stream);
erode(dst, buf2, kernel, buf1, anchor, iterations, stream);
subtract(buf2, src, dst, stream);
subtract(buf2, src, dst, GpuMat(), -1, stream);
break;
default:
CV_Error(CV_StsBadArg, "unknown morphological operation");
......
......@@ -96,7 +96,7 @@ TEST_P(AddArray, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, AddArray, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1)));
testing::Values(CV_8UC1, CV_8UC4, CV_16UC1, CV_32SC1, CV_32FC1)));
struct AddScalar : ArithmTest {};
......@@ -130,7 +130,7 @@ TEST_P(AddScalar, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, AddScalar, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_32FC1, CV_32FC2)));
testing::Values(CV_8UC1, CV_16UC1, CV_32SC1, CV_32FC1, CV_32FC2)));
////////////////////////////////////////////////////////////////////////////////
// subtract
......@@ -161,7 +161,7 @@ TEST_P(SubtractArray, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, SubtractArray, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1)));
testing::Values(CV_8UC1, CV_8UC4, CV_16UC1, CV_32SC1, CV_32FC1)));
struct SubtractScalar : ArithmTest {};
......@@ -195,7 +195,7 @@ TEST_P(SubtractScalar, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, SubtractScalar, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_32FC1, CV_32FC2)));
testing::Values(CV_8UC1, CV_16UC1, CV_32SC1, CV_32FC1, CV_32FC2)));
////////////////////////////////////////////////////////////////////////////////
// multiply
......@@ -226,7 +226,7 @@ TEST_P(MultiplyArray, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, MultiplyArray, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1)));
testing::Values(CV_8UC1, CV_8UC4, CV_16UC1, CV_32SC1, CV_32FC1)));
struct MultiplyScalar : ArithmTest {};
......@@ -260,7 +260,7 @@ TEST_P(MultiplyScalar, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, MultiplyScalar, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_32FC1)));
testing::Values(CV_8UC1, CV_16UC1, CV_32SC1, CV_32FC1)));
////////////////////////////////////////////////////////////////////////////////
// divide
......@@ -291,7 +291,7 @@ TEST_P(DivideArray, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, DivideArray, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1)));
testing::Values(CV_8UC1, CV_8UC4, CV_16UC1, CV_32SC1, CV_32FC1)));
struct DivideScalar : ArithmTest {};
......@@ -325,7 +325,7 @@ TEST_P(DivideScalar, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, DivideScalar, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_32FC1)));
testing::Values(CV_8UC1, CV_16UC1, CV_32SC1, CV_32FC1)));
////////////////////////////////////////////////////////////////////////////////
// transpose
......@@ -387,7 +387,7 @@ TEST_P(AbsdiffArray, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, AbsdiffArray, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1)));
testing::Values(CV_8UC1, CV_8UC4, CV_16UC1, CV_32SC1, CV_32FC1)));
struct AbsdiffScalar : ArithmTest {};
......@@ -421,7 +421,7 @@ TEST_P(AbsdiffScalar, Accuracy)
INSTANTIATE_TEST_CASE_P(Arithm, AbsdiffScalar, testing::Combine(
testing::ValuesIn(devices()),
testing::Values(CV_32FC1)));
testing::Values(CV_8UC1, CV_16UC1, CV_32SC1, CV_32FC1)));
////////////////////////////////////////////////////////////////////////////////
// compare
......@@ -813,7 +813,7 @@ TEST_P(Pow, Accuracy)
/*std::cout << mat << std::endl << std::endl;
std::cout << dst << std::endl << std::endl;
std::cout << dst_gold << std::endl;*/
EXPECT_MAT_NEAR(dst_gold, dst, 1);
EXPECT_MAT_NEAR(dst_gold, dst, 2);
}
INSTANTIATE_TEST_CASE_P(Arithm, Pow, testing::Combine(
......
......@@ -169,8 +169,8 @@ TEST_P(Resize, Accuracy)
gpuRes2.download(dst2);
);
EXPECT_MAT_SIMILAR(dst_gold1, dst1, 0.2);
EXPECT_MAT_SIMILAR(dst_gold2, dst2, 0.2);
EXPECT_MAT_SIMILAR(dst_gold1, dst1, 0.21);
EXPECT_MAT_SIMILAR(dst_gold2, dst2, 0.21);
}
INSTANTIATE_TEST_CASE_P(ImgProc, Resize, testing::Combine(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册