arithm.cpp 124.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                           License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

/* ////////////////////////////////////////////////////////////////////
//
45
//  Arithmetic and logical operations: +, -, *, /, &, |, ^, ~, abs ...
46 47 48 49
//
// */

#include "precomp.hpp"
V
Vadim Pisarevsky 已提交
50
#include "opencl_kernels.hpp"
51 52 53 54

namespace cv
{

55
struct NOP {};
56

57 58 59 60 61 62 63 64 65 66 67 68 69
#if CV_SSE2

#define FUNCTOR_TEMPLATE(name)          \
    template<typename T> struct name {}

FUNCTOR_TEMPLATE(VLoadStore128);
FUNCTOR_TEMPLATE(VLoadStore64);
FUNCTOR_TEMPLATE(VLoadStore128Aligned);

#endif

template<typename T, class Op, class VOp>
void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, size_t step, Size sz)
70
{
71
#if CV_SSE2
72
    VOp vop;
73
#endif
74
    Op op;
75

76 77 78
    for( ; sz.height--; src1 += step1/sizeof(src1[0]),
                        src2 += step2/sizeof(src2[0]),
                        dst += step/sizeof(dst[0]) )
79 80
    {
        int x = 0;
81

82
#if CV_SSE2
83
        if( USE_SSE2 )
84
        {
85
            for( ; x <= sz.width - 32/(int)sizeof(T); x += 32/sizeof(T) )
86
            {
87 88 89 90 91 92
                typename VLoadStore128<T>::reg_type r0 = VLoadStore128<T>::load(src1 + x               );
                typename VLoadStore128<T>::reg_type r1 = VLoadStore128<T>::load(src1 + x + 16/sizeof(T));
                r0 = vop(r0, VLoadStore128<T>::load(src2 + x               ));
                r1 = vop(r1, VLoadStore128<T>::load(src2 + x + 16/sizeof(T)));
                VLoadStore128<T>::store(dst + x               , r0);
                VLoadStore128<T>::store(dst + x + 16/sizeof(T), r1);
93
            }
94
        }
V
Victoria Zhislina 已提交
95
#endif
96
#if CV_SSE2
97
        if( USE_SSE2 )
98
        {
99
            for( ; x <= sz.width - 8/(int)sizeof(T); x += 8/sizeof(T) )
100
            {
101 102 103
                typename VLoadStore64<T>::reg_type r = VLoadStore64<T>::load(src1 + x);
                r = vop(r, VLoadStore64<T>::load(src2 + x));
                VLoadStore64<T>::store(dst + x, r);
104
            }
105
        }
106 107
#endif
#if CV_ENABLE_UNROLLED
108
        for( ; x <= sz.width - 4; x += 4 )
109
        {
110 111 112 113 114 115
            T v0 = op(src1[x], src2[x]);
            T v1 = op(src1[x+1], src2[x+1]);
            dst[x] = v0; dst[x+1] = v1;
            v0 = op(src1[x+2], src2[x+2]);
            v1 = op(src1[x+3], src2[x+3]);
            dst[x+2] = v0; dst[x+3] = v1;
116
        }
117
#endif
118

119 120
        for( ; x < sz.width; x++ )
            dst[x] = op(src1[x], src2[x]);
121
    }
122
}
123

124 125 126
template<typename T, class Op, class Op32>
void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2,
              T* dst, size_t step, Size sz)
127
{
128
#if CV_SSE2
129
    Op32 op32;
130
#endif
131
    Op op;
132

133 134 135
    for( ; sz.height--; src1 += step1/sizeof(src1[0]),
        src2 += step2/sizeof(src2[0]),
        dst += step/sizeof(dst[0]) )
136 137
    {
        int x = 0;
138

139 140 141 142
#if CV_SSE2
        if( USE_SSE2 )
        {
            if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 )
143
            {
144 145
                for( ; x <= sz.width - 8; x += 8 )
                {
146 147 148 149 150 151
                    typename VLoadStore128Aligned<T>::reg_type r0 = VLoadStore128Aligned<T>::load(src1 + x    );
                    typename VLoadStore128Aligned<T>::reg_type r1 = VLoadStore128Aligned<T>::load(src1 + x + 4);
                    r0 = op32(r0, VLoadStore128Aligned<T>::load(src2 + x    ));
                    r1 = op32(r1, VLoadStore128Aligned<T>::load(src2 + x + 4));
                    VLoadStore128Aligned<T>::store(dst + x    , r0);
                    VLoadStore128Aligned<T>::store(dst + x + 4, r1);
152
                }
153
            }
154 155
        }
#endif
156
#if CV_SSE2
157 158
        if( USE_SSE2 )
        {
159 160 161 162 163 164 165 166 167
            for( ; x <= sz.width - 8; x += 8 )
            {
                typename VLoadStore128<T>::reg_type r0 = VLoadStore128<T>::load(src1 + x    );
                typename VLoadStore128<T>::reg_type r1 = VLoadStore128<T>::load(src1 + x + 4);
                r0 = op32(r0, VLoadStore128<T>::load(src2 + x    ));
                r1 = op32(r1, VLoadStore128<T>::load(src2 + x + 4));
                VLoadStore128<T>::store(dst + x    , r0);
                VLoadStore128<T>::store(dst + x + 4, r1);
            }
168
        }
169
#endif
V
Victoria Zhislina 已提交
170
#if CV_ENABLE_UNROLLED
171 172
        for( ; x <= sz.width - 4; x += 4 )
        {
173 174
            T v0 = op(src1[x], src2[x]);
            T v1 = op(src1[x+1], src2[x+1]);
175 176 177 178 179
            dst[x] = v0; dst[x+1] = v1;
            v0 = op(src1[x+2], src2[x+2]);
            v1 = op(src1[x+3], src2[x+3]);
            dst[x+2] = v0; dst[x+3] = v1;
        }
V
Victoria Zhislina 已提交
180
#endif
181

182 183 184 185 186
        for( ; x < sz.width; x++ )
            dst[x] = op(src1[x], src2[x]);
    }
}

187 188

template<typename T, class Op, class Op64>
189 190
void vBinOp64(const T* src1, size_t step1, const T* src2, size_t step2,
               T* dst, size_t step, Size sz)
191
{
192
#if CV_SSE2
193
    Op64 op64;
194
#endif
195
    Op op;
196

197 198 199 200 201
    for( ; sz.height--; src1 += step1/sizeof(src1[0]),
        src2 += step2/sizeof(src2[0]),
        dst += step/sizeof(dst[0]) )
    {
        int x = 0;
202

203 204 205 206
#if CV_SSE2
        if( USE_SSE2 )
        {
            if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 )
207
            {
208 209 210 211 212 213 214 215 216
                for( ; x <= sz.width - 4; x += 4 )
                {
                    typename VLoadStore128Aligned<T>::reg_type r0 = VLoadStore128Aligned<T>::load(src1 + x    );
                    typename VLoadStore128Aligned<T>::reg_type r1 = VLoadStore128Aligned<T>::load(src1 + x + 2);
                    r0 = op64(r0, VLoadStore128Aligned<T>::load(src2 + x    ));
                    r1 = op64(r1, VLoadStore128Aligned<T>::load(src2 + x + 2));
                    VLoadStore128Aligned<T>::store(dst + x    , r0);
                    VLoadStore128Aligned<T>::store(dst + x + 2, r1);
                }
217
            }
218 219 220
        }
#endif

221 222
        for( ; x <= sz.width - 4; x += 4 )
        {
223 224
            T v0 = op(src1[x], src2[x]);
            T v1 = op(src1[x+1], src2[x+1]);
225 226 227 228 229
            dst[x] = v0; dst[x+1] = v1;
            v0 = op(src1[x+2], src2[x+2]);
            v1 = op(src1[x+3], src2[x+3]);
            dst[x+2] = v0; dst[x+3] = v1;
        }
230

231 232
        for( ; x < sz.width; x++ )
            dst[x] = op(src1[x], src2[x]);
233
    }
234
}
235

236
#if CV_SSE2
237

238 239 240 241
#define FUNCTOR_LOADSTORE_CAST(name, template_arg, register_type, load_body, store_body)\
    template <>                                                                                  \
    struct name<template_arg>{                                                                   \
        typedef register_type reg_type;                                                          \
242 243
        static reg_type load(const template_arg * p) { return load_body ((const reg_type *)p); } \
        static void store(template_arg * p, reg_type v) { store_body ((reg_type *)p, v); }       \
244
    }
245 246 247 248 249

#define FUNCTOR_LOADSTORE(name, template_arg, register_type, load_body, store_body)\
    template <>                                                                \
    struct name<template_arg>{                                                 \
        typedef register_type reg_type;                                        \
250 251
        static reg_type load(const template_arg * p) { return load_body (p); } \
        static void store(template_arg * p, reg_type v) { store_body (p, v); } \
252 253
    }

254 255 256 257 258 259 260 261 262 263 264
#define FUNCTOR_CLOSURE_2arg(name, template_arg, body)\
    template<>                                                                 \
    struct name<template_arg>                                                  \
    {                                                                          \
        VLoadStore128<template_arg>::reg_type operator()(                      \
                        const VLoadStore128<template_arg>::reg_type & a,       \
                        const VLoadStore128<template_arg>::reg_type & b) const \
        {                                                                      \
            body;                                                              \
        }                                                                      \
    }
265

266 267 268 269 270 271 272 273 274 275
#define FUNCTOR_CLOSURE_1arg(name, template_arg, body)\
    template<>                                                                 \
    struct name<template_arg>                                                  \
    {                                                                          \
        VLoadStore128<template_arg>::reg_type operator()(                      \
                        const VLoadStore128<template_arg>::reg_type & a,       \
                        const VLoadStore128<template_arg>::reg_type &  ) const \
        {                                                                      \
            body;                                                              \
        }                                                                      \
276
    }
277

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
FUNCTOR_LOADSTORE_CAST(VLoadStore128,  uchar, __m128i, _mm_loadu_si128, _mm_storeu_si128);
FUNCTOR_LOADSTORE_CAST(VLoadStore128,  schar, __m128i, _mm_loadu_si128, _mm_storeu_si128);
FUNCTOR_LOADSTORE_CAST(VLoadStore128, ushort, __m128i, _mm_loadu_si128, _mm_storeu_si128);
FUNCTOR_LOADSTORE_CAST(VLoadStore128,  short, __m128i, _mm_loadu_si128, _mm_storeu_si128);
FUNCTOR_LOADSTORE_CAST(VLoadStore128,    int, __m128i, _mm_loadu_si128, _mm_storeu_si128);
FUNCTOR_LOADSTORE(     VLoadStore128,  float, __m128 , _mm_loadu_ps   , _mm_storeu_ps   );
FUNCTOR_LOADSTORE(     VLoadStore128, double, __m128d, _mm_loadu_pd   , _mm_storeu_pd   );

FUNCTOR_LOADSTORE_CAST(VLoadStore64,  uchar, __m128i, _mm_loadl_epi64, _mm_storel_epi64);
FUNCTOR_LOADSTORE_CAST(VLoadStore64,  schar, __m128i, _mm_loadl_epi64, _mm_storel_epi64);
FUNCTOR_LOADSTORE_CAST(VLoadStore64, ushort, __m128i, _mm_loadl_epi64, _mm_storel_epi64);
FUNCTOR_LOADSTORE_CAST(VLoadStore64,  short, __m128i, _mm_loadl_epi64, _mm_storel_epi64);

FUNCTOR_LOADSTORE_CAST(VLoadStore128Aligned,    int, __m128i, _mm_load_si128, _mm_store_si128);
FUNCTOR_LOADSTORE(     VLoadStore128Aligned,  float, __m128 , _mm_load_ps   , _mm_store_ps   );
FUNCTOR_LOADSTORE(     VLoadStore128Aligned, double, __m128d, _mm_load_pd   , _mm_store_pd   );

FUNCTOR_TEMPLATE(VAdd);
FUNCTOR_CLOSURE_2arg(VAdd,  uchar, return _mm_adds_epu8 (a, b));
FUNCTOR_CLOSURE_2arg(VAdd,  schar, return _mm_adds_epi8 (a, b));
FUNCTOR_CLOSURE_2arg(VAdd, ushort, return _mm_adds_epu16(a, b));
FUNCTOR_CLOSURE_2arg(VAdd,  short, return _mm_adds_epi16(a, b));
FUNCTOR_CLOSURE_2arg(VAdd,    int, return _mm_add_epi32 (a, b));
FUNCTOR_CLOSURE_2arg(VAdd,  float, return _mm_add_ps    (a, b));
FUNCTOR_CLOSURE_2arg(VAdd, double, return _mm_add_pd    (a, b));

FUNCTOR_TEMPLATE(VSub);
FUNCTOR_CLOSURE_2arg(VSub,  uchar, return _mm_subs_epu8 (a, b));
FUNCTOR_CLOSURE_2arg(VSub,  schar, return _mm_subs_epi8 (a, b));
FUNCTOR_CLOSURE_2arg(VSub, ushort, return _mm_subs_epu16(a, b));
FUNCTOR_CLOSURE_2arg(VSub,  short, return _mm_subs_epi16(a, b));
FUNCTOR_CLOSURE_2arg(VSub,    int, return _mm_sub_epi32 (a, b));
FUNCTOR_CLOSURE_2arg(VSub,  float, return _mm_sub_ps    (a, b));
FUNCTOR_CLOSURE_2arg(VSub, double, return _mm_sub_pd    (a, b));

FUNCTOR_TEMPLATE(VMin);
FUNCTOR_CLOSURE_2arg(VMin, uchar, return _mm_min_epu8(a, b));
FUNCTOR_CLOSURE_2arg(VMin, schar,
        __m128i m = _mm_cmpgt_epi8(a, b);
        return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m));
    );
FUNCTOR_CLOSURE_2arg(VMin, ushort, return _mm_subs_epu16(a, _mm_subs_epu16(a, b)));
FUNCTOR_CLOSURE_2arg(VMin,  short, return _mm_min_epi16(a, b));
FUNCTOR_CLOSURE_2arg(VMin,    int,
322 323
        __m128i m = _mm_cmpgt_epi32(a, b);
        return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m));
324 325 326 327 328 329 330 331 332 333 334 335 336
    );
FUNCTOR_CLOSURE_2arg(VMin,  float, return _mm_min_ps(a, b));
FUNCTOR_CLOSURE_2arg(VMin, double, return _mm_min_pd(a, b));

FUNCTOR_TEMPLATE(VMax);
FUNCTOR_CLOSURE_2arg(VMax, uchar, return _mm_max_epu8(a, b));
FUNCTOR_CLOSURE_2arg(VMax, schar,
        __m128i m = _mm_cmpgt_epi8(b, a);
        return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m));
    );
FUNCTOR_CLOSURE_2arg(VMax, ushort, return _mm_adds_epu16(_mm_subs_epu16(a, b), b));
FUNCTOR_CLOSURE_2arg(VMax,  short, return _mm_max_epi16(a, b));
FUNCTOR_CLOSURE_2arg(VMax,    int,
337 338
        __m128i m = _mm_cmpgt_epi32(b, a);
        return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m));
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
    );
FUNCTOR_CLOSURE_2arg(VMax,  float, return _mm_max_ps(a, b));
FUNCTOR_CLOSURE_2arg(VMax, double, return _mm_max_pd(a, b));


static int CV_DECL_ALIGNED(16) v32f_absmask[] = { 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff };
static int CV_DECL_ALIGNED(16) v64f_absmask[] = { 0xffffffff, 0x7fffffff, 0xffffffff, 0x7fffffff };

FUNCTOR_TEMPLATE(VAbsDiff);
FUNCTOR_CLOSURE_2arg(VAbsDiff,  uchar,
        return _mm_add_epi8(_mm_subs_epu8(a, b), _mm_subs_epu8(b, a));
    );
FUNCTOR_CLOSURE_2arg(VAbsDiff,  schar,
        __m128i d = _mm_subs_epi8(a, b);
        __m128i m = _mm_cmpgt_epi8(b, a);
        return _mm_subs_epi8(_mm_xor_si128(d, m), m);
    );
FUNCTOR_CLOSURE_2arg(VAbsDiff, ushort,
        return _mm_add_epi16(_mm_subs_epu16(a, b), _mm_subs_epu16(b, a));
    );
FUNCTOR_CLOSURE_2arg(VAbsDiff,  short,
        __m128i M = _mm_max_epi16(a, b);
        __m128i m = _mm_min_epi16(a, b);
        return _mm_subs_epi16(M, m);
    );
FUNCTOR_CLOSURE_2arg(VAbsDiff,    int,
365 366 367
        __m128i d = _mm_sub_epi32(a, b);
        __m128i m = _mm_cmpgt_epi32(b, a);
        return _mm_sub_epi32(_mm_xor_si128(d, m), m);
368 369
    );
FUNCTOR_CLOSURE_2arg(VAbsDiff,  float,
370
        return _mm_and_ps(_mm_sub_ps(a,b), *(const __m128*)v32f_absmask);
371 372
    );
FUNCTOR_CLOSURE_2arg(VAbsDiff, double,
373
        return _mm_and_pd(_mm_sub_pd(a,b), *(const __m128d*)v64f_absmask);
374 375 376 377 378 379 380 381 382 383
    );

FUNCTOR_TEMPLATE(VAnd);
FUNCTOR_CLOSURE_2arg(VAnd, uchar, return _mm_and_si128(a, b));
FUNCTOR_TEMPLATE(VOr);
FUNCTOR_CLOSURE_2arg(VOr , uchar, return _mm_or_si128 (a, b));
FUNCTOR_TEMPLATE(VXor);
FUNCTOR_CLOSURE_2arg(VXor, uchar, return _mm_xor_si128(a, b));
FUNCTOR_TEMPLATE(VNot);
FUNCTOR_CLOSURE_1arg(VNot, uchar, return _mm_xor_si128(_mm_set1_epi32(-1), a));
384
#endif
385

386 387
#if CV_SSE2
#define IF_SIMD(op) op
388
#else
389
#define IF_SIMD(op) NOP
390
#endif
391

392 393 394 395
template<> inline uchar OpAdd<uchar>::operator ()(uchar a, uchar b) const
{ return CV_FAST_CAST_8U(a + b); }
template<> inline uchar OpSub<uchar>::operator ()(uchar a, uchar b) const
{ return CV_FAST_CAST_8U(a - b); }
396

397
template<typename T> struct OpAbsDiff
398
{
399 400 401 402
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()(T a, T b) const { return (T)std::abs(a - b); }
403 404
};

405 406
template<> inline short OpAbsDiff<short>::operator ()(short a, short b) const
{ return saturate_cast<short>(std::abs(a - b)); }
407

408 409
template<> inline schar OpAbsDiff<schar>::operator ()(schar a, schar b) const
{ return saturate_cast<schar>(std::abs(a - b)); }
410

411
template<typename T, typename WT=T> struct OpAbsDiffS
412
{
413 414 415 416
    typedef T type1;
    typedef WT type2;
    typedef T rtype;
    T operator()(T a, WT b) const { return saturate_cast<T>(std::abs(a - b)); }
417 418
};

419
template<typename T> struct OpAnd
420
{
421 422 423 424
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()( T a, T b ) const { return a & b; }
425 426
};

427
template<typename T> struct OpOr
428
{
429 430 431 432
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()( T a, T b ) const { return a | b; }
433 434
};

435
template<typename T> struct OpXor
436
{
437 438 439 440
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()( T a, T b ) const { return a ^ b; }
441 442
};

443
template<typename T> struct OpNot
444
{
445 446 447 448
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()( T a, T ) const { return ~a; }
449
};
450

451
#if (ARITHM_USE_IPP == 1)
452 453 454 455 456
static inline void fixSteps(Size sz, size_t elemSize, size_t& step1, size_t& step2, size_t& step)
{
    if( sz.height == 1 )
        step1 = step2 = step = sz.width*elemSize;
}
457
#endif
458

459 460 461 462
static void add8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
{
V
vbystricky 已提交
463 464
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
465
    if (0 <= ippiAdd_8u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz), 0))
V
vbystricky 已提交
466
        return;
I
Ilya Lavrenov 已提交
467
    setIppErrorStatus();
V
vbystricky 已提交
468 469
#endif
    (vBinOp<uchar, OpAdd<uchar>, IF_SIMD(VAdd<uchar>)>(src1, step1, src2, step2, dst, step, sz));
470
}
471

472 473 474
static void add8s( const schar* src1, size_t step1,
                   const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* )
475
{
476
    vBinOp<schar, OpAdd<schar>, IF_SIMD(VAdd<schar>)>(src1, step1, src2, step2, dst, step, sz);
477
}
478

479 480 481
static void add16u( const ushort* src1, size_t step1,
                    const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* )
482
{
V
vbystricky 已提交
483 484
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
485
    if (0 <= ippiAdd_16u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz), 0))
V
vbystricky 已提交
486
        return;
I
Ilya Lavrenov 已提交
487
    setIppErrorStatus();
V
vbystricky 已提交
488 489
#endif
    (vBinOp<ushort, OpAdd<ushort>, IF_SIMD(VAdd<ushort>)>(src1, step1, src2, step2, dst, step, sz));
490
}
491

492 493 494
static void add16s( const short* src1, size_t step1,
                    const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* )
495
{
V
vbystricky 已提交
496 497
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
498
    if (0 <= ippiAdd_16s_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz), 0))
V
vbystricky 已提交
499
        return;
I
Ilya Lavrenov 已提交
500
    setIppErrorStatus();
V
vbystricky 已提交
501 502
#endif
    (vBinOp<short, OpAdd<short>, IF_SIMD(VAdd<short>)>(src1, step1, src2, step2, dst, step, sz));
503
}
504

505 506 507
static void add32s( const int* src1, size_t step1,
                    const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* )
508
{
509
    vBinOp32<int, OpAdd<int>, IF_SIMD(VAdd<int>)>(src1, step1, src2, step2, dst, step, sz);
510
}
511

512 513 514
static void add32f( const float* src1, size_t step1,
                    const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* )
515
{
V
vbystricky 已提交
516 517
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
518
    if (0 <= ippiAdd_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)))
V
vbystricky 已提交
519
        return;
I
Ilya Lavrenov 已提交
520
    setIppErrorStatus();
V
vbystricky 已提交
521 522
#endif
    (vBinOp32<float, OpAdd<float>, IF_SIMD(VAdd<float>)>(src1, step1, src2, step2, dst, step, sz));
523
}
524

525 526 527
static void add64f( const double* src1, size_t step1,
                    const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* )
528
{
529
    vBinOp64<double, OpAdd<double>, IF_SIMD(VAdd<double>)>(src1, step1, src2, step2, dst, step, sz);
530
}
531

532 533 534
static void sub8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
535
{
V
vbystricky 已提交
536 537
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
538
    if (0 <= ippiSub_8u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, ippiSize(sz), 0))
V
vbystricky 已提交
539
        return;
I
Ilya Lavrenov 已提交
540
    setIppErrorStatus();
V
vbystricky 已提交
541 542
#endif
    (vBinOp<uchar, OpSub<uchar>, IF_SIMD(VSub<uchar>)>(src1, step1, src2, step2, dst, step, sz));
543
}
544

545 546 547
static void sub8s( const schar* src1, size_t step1,
                   const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* )
548
{
549
    vBinOp<schar, OpSub<schar>, IF_SIMD(VSub<schar>)>(src1, step1, src2, step2, dst, step, sz);
550
}
551

552 553 554
static void sub16u( const ushort* src1, size_t step1,
                    const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* )
555
{
V
vbystricky 已提交
556 557
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
558
    if (0 <= ippiSub_16u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, ippiSize(sz), 0))
V
vbystricky 已提交
559
        return;
I
Ilya Lavrenov 已提交
560
    setIppErrorStatus();
V
vbystricky 已提交
561 562
#endif
    (vBinOp<ushort, OpSub<ushort>, IF_SIMD(VSub<ushort>)>(src1, step1, src2, step2, dst, step, sz));
563
}
564

565 566 567
static void sub16s( const short* src1, size_t step1,
                    const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* )
568
{
V
vbystricky 已提交
569 570
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
571
    if (0 <= ippiSub_16s_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, ippiSize(sz), 0))
V
vbystricky 已提交
572
        return;
I
Ilya Lavrenov 已提交
573
    setIppErrorStatus();
V
vbystricky 已提交
574 575
#endif
    (vBinOp<short, OpSub<short>, IF_SIMD(VSub<short>)>(src1, step1, src2, step2, dst, step, sz));
576
}
577

578 579 580
static void sub32s( const int* src1, size_t step1,
                    const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* )
581
{
582
    vBinOp32<int, OpSub<int>, IF_SIMD(VSub<int>)>(src1, step1, src2, step2, dst, step, sz);
583
}
584

585 586 587
static void sub32f( const float* src1, size_t step1,
                   const float* src2, size_t step2,
                   float* dst, size_t step, Size sz, void* )
588
{
V
vbystricky 已提交
589 590
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
591
    if (0 <= ippiSub_32f_C1R(src2, (int)step2, src1, (int)step1, dst, (int)step, ippiSize(sz)))
V
vbystricky 已提交
592
        return;
I
Ilya Lavrenov 已提交
593
    setIppErrorStatus();
V
vbystricky 已提交
594 595
#endif
    (vBinOp32<float, OpSub<float>, IF_SIMD(VSub<float>)>(src1, step1, src2, step2, dst, step, sz));
596
}
597

598 599 600
static void sub64f( const double* src1, size_t step1,
                    const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* )
601
{
602
    vBinOp64<double, OpSub<double>, IF_SIMD(VSub<double>)>(src1, step1, src2, step2, dst, step, sz);
603
}
604

605 606
template<> inline uchar OpMin<uchar>::operator ()(uchar a, uchar b) const { return CV_MIN_8U(a, b); }
template<> inline uchar OpMax<uchar>::operator ()(uchar a, uchar b) const { return CV_MAX_8U(a, b); }
607

608 609 610
static void max8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
611
{
612 613 614 615 616
#if (ARITHM_USE_IPP == 1)
    uchar* s1 = (uchar*)src1;
    uchar* s2 = (uchar*)src2;
    uchar* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
V
vbystricky 已提交
617 618
    int i = 0;
    for(; i < sz.height; i++)
619
    {
620
        if (0 > ippsMaxEvery_8u(s1, s2, d, sz.width))
V
vbystricky 已提交
621 622 623 624
            break;
        s1 += step1;
        s2 += step2;
        d  += step;
625
    }
V
vbystricky 已提交
626 627
    if (i == sz.height)
        return;
I
Ilya Lavrenov 已提交
628
    setIppErrorStatus();
629
#endif
V
vbystricky 已提交
630
    vBinOp<uchar, OpMax<uchar>, IF_SIMD(VMax<uchar>)>(src1, step1, src2, step2, dst, step, sz);
631
}
632

633 634 635
static void max8s( const schar* src1, size_t step1,
                   const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* )
636
{
637
    vBinOp<schar, OpMax<schar>, IF_SIMD(VMax<schar>)>(src1, step1, src2, step2, dst, step, sz);
638
}
639

640 641 642
static void max16u( const ushort* src1, size_t step1,
                    const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* )
643
{
644 645 646 647 648
#if (ARITHM_USE_IPP == 1)
    ushort* s1 = (ushort*)src1;
    ushort* s2 = (ushort*)src2;
    ushort* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
V
vbystricky 已提交
649 650
    int i = 0;
    for(; i < sz.height; i++)
651
    {
652
        if (0 > ippsMaxEvery_16u(s1, s2, d, sz.width))
V
vbystricky 已提交
653 654 655 656
            break;
        s1 = (ushort*)((uchar*)s1 + step1);
        s2 = (ushort*)((uchar*)s2 + step2);
        d  = (ushort*)((uchar*)d + step);
657
    }
V
vbystricky 已提交
658 659
    if (i == sz.height)
        return;
I
Ilya Lavrenov 已提交
660
    setIppErrorStatus();
661
#endif
V
vbystricky 已提交
662
    vBinOp<ushort, OpMax<ushort>, IF_SIMD(VMax<ushort>)>(src1, step1, src2, step2, dst, step, sz);
663
}
664

665 666 667
static void max16s( const short* src1, size_t step1,
                    const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* )
668
{
669
    vBinOp<short, OpMax<short>, IF_SIMD(VMax<short>)>(src1, step1, src2, step2, dst, step, sz);
670
}
671

672 673 674
static void max32s( const int* src1, size_t step1,
                    const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* )
675
{
676
    vBinOp32<int, OpMax<int>, IF_SIMD(VMax<int>)>(src1, step1, src2, step2, dst, step, sz);
677
}
678

679 680 681
static void max32f( const float* src1, size_t step1,
                    const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* )
682
{
683 684 685 686 687
#if (ARITHM_USE_IPP == 1)
    float* s1 = (float*)src1;
    float* s2 = (float*)src2;
    float* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
V
vbystricky 已提交
688 689
    int i = 0;
    for(; i < sz.height; i++)
690
    {
691
        if (0 > ippsMaxEvery_32f(s1, s2, d, sz.width))
V
vbystricky 已提交
692 693 694 695
            break;
        s1 = (float*)((uchar*)s1 + step1);
        s2 = (float*)((uchar*)s2 + step2);
        d  = (float*)((uchar*)d + step);
696
    }
V
vbystricky 已提交
697 698
    if (i == sz.height)
        return;
I
Ilya Lavrenov 已提交
699
    setIppErrorStatus();
700
#endif
V
vbystricky 已提交
701
    vBinOp32<float, OpMax<float>, IF_SIMD(VMax<float>)>(src1, step1, src2, step2, dst, step, sz);
702
}
703

704 705 706
static void max64f( const double* src1, size_t step1,
                    const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* )
707
{
I
Ilya Lavrenov 已提交
708
#if ARITHM_USE_IPP == 1 && !defined HAVE_IPP_ICV_ONLY
I
Ilya Lavrenov 已提交
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
    double* s1 = (double*)src1;
    double* s2 = (double*)src2;
    double* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
    int i = 0;
    for(; i < sz.height; i++)
    {
        if (0 > ippsMaxEvery_64f(s1, s2, d, sz.width))
            break;
        s1 = (double*)((uchar*)s1 + step1);
        s2 = (double*)((uchar*)s2 + step2);
        d  = (double*)((uchar*)d + step);
    }
    if (i == sz.height)
        return;
#endif
725
    vBinOp64<double, OpMax<double>, IF_SIMD(VMax<double>)>(src1, step1, src2, step2, dst, step, sz);
726
}
727

728 729 730
static void min8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
731
{
732 733 734 735 736
#if (ARITHM_USE_IPP == 1)
    uchar* s1 = (uchar*)src1;
    uchar* s2 = (uchar*)src2;
    uchar* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
V
vbystricky 已提交
737 738
    int i = 0;
    for(; i < sz.height; i++)
739
    {
740
        if (0 > ippsMinEvery_8u(s1, s2, d, sz.width))
V
vbystricky 已提交
741 742 743 744
            break;
        s1 += step1;
        s2 += step2;
        d  += step;
745
    }
V
vbystricky 已提交
746 747
    if (i == sz.height)
        return;
I
Ilya Lavrenov 已提交
748
    setIppErrorStatus();
749
#endif
V
vbystricky 已提交
750
    vBinOp<uchar, OpMin<uchar>, IF_SIMD(VMin<uchar>)>(src1, step1, src2, step2, dst, step, sz);
751
}
752

753 754 755 756
static void min8s( const schar* src1, size_t step1,
                   const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* )
{
757
    vBinOp<schar, OpMin<schar>, IF_SIMD(VMin<schar>)>(src1, step1, src2, step2, dst, step, sz);
758
}
759

760 761 762 763
static void min16u( const ushort* src1, size_t step1,
                    const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* )
{
764 765 766 767 768
#if (ARITHM_USE_IPP == 1)
    ushort* s1 = (ushort*)src1;
    ushort* s2 = (ushort*)src2;
    ushort* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
V
vbystricky 已提交
769 770
    int i = 0;
    for(; i < sz.height; i++)
771
    {
772
        if (0 > ippsMinEvery_16u(s1, s2, d, sz.width))
V
vbystricky 已提交
773 774 775 776
            break;
        s1 = (ushort*)((uchar*)s1 + step1);
        s2 = (ushort*)((uchar*)s2 + step2);
        d  = (ushort*)((uchar*)d + step);
777
    }
V
vbystricky 已提交
778 779
    if (i == sz.height)
        return;
I
Ilya Lavrenov 已提交
780
    setIppErrorStatus();
781
#endif
V
vbystricky 已提交
782
    vBinOp<ushort, OpMin<ushort>, IF_SIMD(VMin<ushort>)>(src1, step1, src2, step2, dst, step, sz);
783
}
784

785 786 787 788
static void min16s( const short* src1, size_t step1,
                    const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* )
{
789
    vBinOp<short, OpMin<short>, IF_SIMD(VMin<short>)>(src1, step1, src2, step2, dst, step, sz);
790
}
791

792 793 794
static void min32s( const int* src1, size_t step1,
                    const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* )
795
{
796
    vBinOp32<int, OpMin<int>, IF_SIMD(VMin<int>)>(src1, step1, src2, step2, dst, step, sz);
797
}
798

799 800 801
static void min32f( const float* src1, size_t step1,
                    const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* )
802
{
803 804 805 806 807
#if (ARITHM_USE_IPP == 1)
    float* s1 = (float*)src1;
    float* s2 = (float*)src2;
    float* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
V
vbystricky 已提交
808 809
    int i = 0;
    for(; i < sz.height; i++)
810
    {
811
        if (0 > ippsMinEvery_32f(s1, s2, d, sz.width))
V
vbystricky 已提交
812 813 814 815
            break;
        s1 = (float*)((uchar*)s1 + step1);
        s2 = (float*)((uchar*)s2 + step2);
        d  = (float*)((uchar*)d + step);
816
    }
V
vbystricky 已提交
817 818
    if (i == sz.height)
        return;
I
Ilya Lavrenov 已提交
819
    setIppErrorStatus();
820
#endif
V
vbystricky 已提交
821
    vBinOp32<float, OpMin<float>, IF_SIMD(VMin<float>)>(src1, step1, src2, step2, dst, step, sz);
822
}
823

824 825 826
static void min64f( const double* src1, size_t step1,
                    const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* )
827
{
I
Ilya Lavrenov 已提交
828
#if ARITHM_USE_IPP == 1 && !defined HAVE_IPP_ICV_ONLY
I
Ilya Lavrenov 已提交
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
    double* s1 = (double*)src1;
    double* s2 = (double*)src2;
    double* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
    int i = 0;
    for(; i < sz.height; i++)
    {
        if (0 > ippsMinEvery_64f(s1, s2, d, sz.width))
            break;
        s1 = (double*)((uchar*)s1 + step1);
        s2 = (double*)((uchar*)s2 + step2);
        d  = (double*)((uchar*)d + step);
    }
    if (i == sz.height)
        return;
#endif
845
    vBinOp64<double, OpMin<double>, IF_SIMD(VMin<double>)>(src1, step1, src2, step2, dst, step, sz);
846
}
847

848 849 850
static void absdiff8u( const uchar* src1, size_t step1,
                       const uchar* src2, size_t step2,
                       uchar* dst, size_t step, Size sz, void* )
851
{
V
vbystricky 已提交
852 853
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
854
    if (0 <= ippiAbsDiff_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)))
V
vbystricky 已提交
855
        return;
I
Ilya Lavrenov 已提交
856
    setIppErrorStatus();
V
vbystricky 已提交
857 858
#endif
    (vBinOp<uchar, OpAbsDiff<uchar>, IF_SIMD(VAbsDiff<uchar>)>(src1, step1, src2, step2, dst, step, sz));
859
}
860

861 862 863 864
static void absdiff8s( const schar* src1, size_t step1,
                       const schar* src2, size_t step2,
                       schar* dst, size_t step, Size sz, void* )
{
865
    vBinOp<schar, OpAbsDiff<schar>, IF_SIMD(VAbsDiff<schar>)>(src1, step1, src2, step2, dst, step, sz);
866
}
867

868 869 870 871
static void absdiff16u( const ushort* src1, size_t step1,
                        const ushort* src2, size_t step2,
                        ushort* dst, size_t step, Size sz, void* )
{
V
vbystricky 已提交
872 873
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
874
    if (0 <= ippiAbsDiff_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)))
V
vbystricky 已提交
875
        return;
I
Ilya Lavrenov 已提交
876
    setIppErrorStatus();
V
vbystricky 已提交
877 878
#endif
    (vBinOp<ushort, OpAbsDiff<ushort>, IF_SIMD(VAbsDiff<ushort>)>(src1, step1, src2, step2, dst, step, sz));
879
}
880

881 882 883 884
static void absdiff16s( const short* src1, size_t step1,
                        const short* src2, size_t step2,
                        short* dst, size_t step, Size sz, void* )
{
885
    vBinOp<short, OpAbsDiff<short>, IF_SIMD(VAbsDiff<short>)>(src1, step1, src2, step2, dst, step, sz);
886
}
887

888 889 890 891
static void absdiff32s( const int* src1, size_t step1,
                        const int* src2, size_t step2,
                        int* dst, size_t step, Size sz, void* )
{
892
    vBinOp32<int, OpAbsDiff<int>, IF_SIMD(VAbsDiff<int>)>(src1, step1, src2, step2, dst, step, sz);
893
}
894

895 896 897 898
static void absdiff32f( const float* src1, size_t step1,
                        const float* src2, size_t step2,
                        float* dst, size_t step, Size sz, void* )
{
V
vbystricky 已提交
899 900
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
901
    if (0 <= ippiAbsDiff_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)))
V
vbystricky 已提交
902
        return;
I
Ilya Lavrenov 已提交
903
    setIppErrorStatus();
V
vbystricky 已提交
904 905
#endif
    (vBinOp32<float, OpAbsDiff<float>, IF_SIMD(VAbsDiff<float>)>(src1, step1, src2, step2, dst, step, sz));
906
}
907

908 909 910 911
static void absdiff64f( const double* src1, size_t step1,
                        const double* src2, size_t step2,
                        double* dst, size_t step, Size sz, void* )
{
912
    vBinOp64<double, OpAbsDiff<double>, IF_SIMD(VAbsDiff<double>)>(src1, step1, src2, step2, dst, step, sz);
913 914
}

915

916 917 918 919
static void and8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
{
V
vbystricky 已提交
920 921
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
922
    if (0 <= ippiAnd_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)))
V
vbystricky 已提交
923
        return;
I
Ilya Lavrenov 已提交
924
    setIppErrorStatus();
V
vbystricky 已提交
925 926
#endif
    (vBinOp<uchar, OpAnd<uchar>, IF_SIMD(VAnd<uchar>)>(src1, step1, src2, step2, dst, step, sz));
927 928 929 930 931 932
}

static void or8u( const uchar* src1, size_t step1,
                  const uchar* src2, size_t step2,
                  uchar* dst, size_t step, Size sz, void* )
{
V
vbystricky 已提交
933 934
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
935
    if (0 <= ippiOr_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)))
V
vbystricky 已提交
936
        return;
I
Ilya Lavrenov 已提交
937
    setIppErrorStatus();
V
vbystricky 已提交
938 939
#endif
    (vBinOp<uchar, OpOr<uchar>, IF_SIMD(VOr<uchar>)>(src1, step1, src2, step2, dst, step, sz));
940 941 942 943 944 945
}

static void xor8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
{
V
vbystricky 已提交
946 947
#if (ARITHM_USE_IPP == 1)
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
948
    if (0 <= ippiXor_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)))
V
vbystricky 已提交
949
        return;
I
Ilya Lavrenov 已提交
950
    setIppErrorStatus();
V
vbystricky 已提交
951 952
#endif
    (vBinOp<uchar, OpXor<uchar>, IF_SIMD(VXor<uchar>)>(src1, step1, src2, step2, dst, step, sz));
953
}
954 955 956 957 958

static void not8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
{
V
vbystricky 已提交
959
#if (ARITHM_USE_IPP == 1)
I
Ilya Lavrenov 已提交
960 961
    fixSteps(sz, sizeof(dst[0]), step1, step2, step); (void)src2;
    if (0 <= ippiNot_8u_C1R(src1, (int)step1, dst, (int)step, ippiSize(sz)))
V
vbystricky 已提交
962
        return;
I
Ilya Lavrenov 已提交
963
    setIppErrorStatus();
V
vbystricky 已提交
964 965
#endif
    (vBinOp<uchar, OpNot<uchar>, IF_SIMD(VNot<uchar>)>(src1, step1, src2, step2, dst, step, sz));
966
}
967

968 969 970
/****************************************************************************************\
*                                   logical operations                                   *
\****************************************************************************************/
971

972
void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t blocksize )
973 974 975
{
    int scn = (int)sc.total(), cn = CV_MAT_CN(buftype);
    size_t esz = CV_ELEM_SIZE(buftype);
I
Ilya Lavrenov 已提交
976
    getConvertFunc(sc.depth(), buftype)(sc.data, 1, 0, 1, scbuf, 1, Size(std::min(cn, scn), 1), 0);
977 978 979 980 981 982 983 984 985 986 987
    // unroll the scalar
    if( scn < cn )
    {
        CV_Assert( scn == 1 );
        size_t esz1 = CV_ELEM_SIZE1(buftype);
        for( size_t i = esz1; i < esz; i++ )
            scbuf[i] = scbuf[i - esz1];
    }
    for( size_t i = esz; i < blocksize*esz; i++ )
        scbuf[i] = scbuf[i - esz];
}
988

989 990 991

enum { OCL_OP_ADD=0, OCL_OP_SUB=1, OCL_OP_RSUB=2, OCL_OP_ABSDIFF=3, OCL_OP_MUL=4,
       OCL_OP_MUL_SCALE=5, OCL_OP_DIV_SCALE=6, OCL_OP_RECIP_SCALE=7, OCL_OP_ADDW=8,
I
Ilya Lavrenov 已提交
992 993
       OCL_OP_AND=9, OCL_OP_OR=10, OCL_OP_XOR=11, OCL_OP_NOT=12, OCL_OP_MIN=13, OCL_OP_MAX=14,
       OCL_OP_RDIV_SCALE=15 };
994

I
Ilya Lavrenov 已提交
995 996
#ifdef HAVE_OPENCL

997 998
static const char* oclop2str[] = { "OP_ADD", "OP_SUB", "OP_RSUB", "OP_ABSDIFF",
    "OP_MUL", "OP_MUL_SCALE", "OP_DIV_SCALE", "OP_RECIP_SCALE",
I
Ilya Lavrenov 已提交
999
    "OP_ADDW", "OP_AND", "OP_OR", "OP_XOR", "OP_NOT", "OP_MIN", "OP_MAX", "OP_RDIV_SCALE", 0 };
1000 1001 1002

static bool ocl_binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
                          InputArray _mask, bool bitwise, int oclop, bool haveScalar )
1003
{
1004 1005 1006 1007 1008
    bool haveMask = !_mask.empty();
    int srctype = _src1.type();
    int srcdepth = CV_MAT_DEPTH(srctype);
    int cn = CV_MAT_CN(srctype);

1009
    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
1010
    if( oclop < 0 || ((haveMask || haveScalar) && cn > 4) ||
I
Ilya Lavrenov 已提交
1011
            (!doubleSupport && srcdepth == CV_64F && !bitwise))
1012 1013 1014
        return false;

    char opts[1024];
I
Ilya Lavrenov 已提交
1015
    int kercn = haveMask || haveScalar ? cn : ocl::predictOptimalVectorWidth(_src1, _src2, _dst);
1016 1017 1018
    int scalarcn = kercn == 3 ? 4 : kercn;

    sprintf(opts, "-D %s%s -D %s -D dstT=%s%s -D dstT_C1=%s -D workST=%s -D cn=%d",
I
Ilya Lavrenov 已提交
1019
            haveMask ? "MASK_" : "", haveScalar ? "UNARY_OP" : "BINARY_OP", oclop2str[oclop],
1020
            bitwise ? ocl::memopTypeToStr(CV_MAKETYPE(srcdepth, kercn)) :
I
Ilya Lavrenov 已提交
1021
                ocl::typeToStr(CV_MAKETYPE(srcdepth, kercn)), doubleSupport ? " -D DOUBLE_SUPPORT" : "",
1022
            bitwise ? ocl::memopTypeToStr(CV_MAKETYPE(srcdepth, 1)) :
I
Ilya Lavrenov 已提交
1023
                ocl::typeToStr(CV_MAKETYPE(srcdepth, 1)),
1024
            bitwise ? ocl::memopTypeToStr(CV_MAKETYPE(srcdepth, scalarcn)) :
I
Ilya Lavrenov 已提交
1025
                ocl::typeToStr(CV_MAKETYPE(srcdepth, scalarcn)),
1026
            kercn);
1027 1028

    ocl::Kernel k("KF", ocl::core::arithm_oclsrc, opts);
I
Ilya Lavrenov 已提交
1029
    if (k.empty())
1030 1031
        return false;

I
Ilya Lavrenov 已提交
1032 1033 1034
    UMat src1 = _src1.getUMat(), src2;
    UMat dst = _dst.getUMat(), mask = _mask.getUMat();

I
Ilya Lavrenov 已提交
1035 1036 1037
    ocl::KernelArg src1arg = ocl::KernelArg::ReadOnlyNoSize(src1, cn, kercn);
    ocl::KernelArg dstarg = haveMask ? ocl::KernelArg::ReadWrite(dst, cn, kercn) :
                                       ocl::KernelArg::WriteOnly(dst, cn, kercn);
1038 1039 1040 1041
    ocl::KernelArg maskarg = ocl::KernelArg::ReadOnlyNoSize(mask, 1);

    if( haveScalar )
    {
1042
        size_t esz = CV_ELEM_SIZE1(srctype)*scalarcn;
1043 1044 1045 1046 1047 1048 1049 1050
        double buf[4] = {0,0,0,0};

        if( oclop != OCL_OP_NOT )
        {
            Mat src2sc = _src2.getMat();
            convertAndUnrollScalar(src2sc, srctype, (uchar*)buf, 1);
        }

I
Ilya Lavrenov 已提交
1051
        ocl::KernelArg scalararg = ocl::KernelArg(0, 0, 0, 0, buf, esz);
1052 1053 1054 1055 1056 1057 1058 1059 1060

        if( !haveMask )
            k.args(src1arg, dstarg, scalararg);
        else
            k.args(src1arg, maskarg, dstarg, scalararg);
    }
    else
    {
        src2 = _src2.getUMat();
I
Ilya Lavrenov 已提交
1061
        ocl::KernelArg src2arg = ocl::KernelArg::ReadOnlyNoSize(src2, cn, kercn);
1062 1063 1064 1065 1066 1067 1068

        if( !haveMask )
            k.args(src1arg, src2arg, dstarg);
        else
            k.args(src1arg, src2arg, maskarg, dstarg);
    }

I
Ilya Lavrenov 已提交
1069
    size_t globalsize[] = { src1.cols * cn / kercn, src1.rows };
1070 1071 1072
    return k.run(2, globalsize, 0, false);
}

I
Ilya Lavrenov 已提交
1073
#endif
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085

static void binary_op( InputArray _src1, InputArray _src2, OutputArray _dst,
                       InputArray _mask, const BinaryFunc* tab,
                       bool bitwise, int oclop )
{
    const _InputArray *psrc1 = &_src1, *psrc2 = &_src2;
    int kind1 = psrc1->kind(), kind2 = psrc2->kind();
    int type1 = psrc1->type(), depth1 = CV_MAT_DEPTH(type1), cn = CV_MAT_CN(type1);
    int type2 = psrc2->type(), depth2 = CV_MAT_DEPTH(type2), cn2 = CV_MAT_CN(type2);
    int dims1 = psrc1->dims(), dims2 = psrc2->dims();
    Size sz1 = dims1 <= 2 ? psrc1->size() : Size();
    Size sz2 = dims2 <= 2 ? psrc2->size() : Size();
I
Ilya Lavrenov 已提交
1086
#ifdef HAVE_OPENCL
1087
    bool use_opencl = (kind1 == _InputArray::UMAT || kind2 == _InputArray::UMAT) &&
I
Ilya Lavrenov 已提交
1088 1089
            dims1 <= 2 && dims2 <= 2;
#endif
1090 1091
    bool haveMask = !_mask.empty(), haveScalar = false;
    BinaryFunc func;
1092

1093
    if( dims1 <= 2 && dims2 <= 2 && kind1 == kind2 && sz1 == sz2 && type1 == type2 && !haveMask )
1094
    {
1095
        _dst.create(sz1, type1);
I
Ilya Lavrenov 已提交
1096 1097 1098
        CV_OCL_RUN(use_opencl,
                   ocl_binary_op(*psrc1, *psrc2, _dst, _mask, bitwise, oclop, false))

1099 1100 1101
        if( bitwise )
        {
            func = *tab;
1102
            cn = (int)CV_ELEM_SIZE(type1);
1103 1104
        }
        else
1105
            func = tab[depth1];
1106

1107
        Mat src1 = psrc1->getMat(), src2 = psrc2->getMat(), dst = _dst.getMat();
1108
        Size sz = getContinuousSize(src1, src2, dst);
1109
        size_t len = sz.width*(size_t)cn;
1110 1111 1112 1113 1114 1115
        if( len == (size_t)(int)len )
        {
            sz.width = (int)len;
            func(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, 0);
            return;
        }
1116
    }
1117

1118 1119 1120 1121
    if( oclop == OCL_OP_NOT )
        haveScalar = true;
    else if( (kind1 == _InputArray::MATX) + (kind2 == _InputArray::MATX) == 1 ||
        !psrc1->sameSize(*psrc2) || type1 != type2 )
1122
    {
1123 1124
        if( checkScalar(*psrc1, type2, kind1, kind2) )
        {
1125
            // src1 is a scalar; swap it with src2
1126 1127 1128 1129 1130 1131 1132
            swap(psrc1, psrc2);
            swap(type1, type2);
            swap(depth1, depth2);
            swap(cn, cn2);
            swap(sz1, sz2);
        }
        else if( !checkScalar(*psrc2, type1, kind2, kind1) )
1133 1134 1135 1136 1137
            CV_Error( CV_StsUnmatchedSizes,
                      "The operation is neither 'array op array' (where arrays have the same size and type), "
                      "nor 'array op scalar', nor 'scalar op array'" );
        haveScalar = true;
    }
1138 1139 1140 1141
    else
    {
        CV_Assert( psrc1->sameSize(*psrc2) && type1 == type2 );
    }
1142

1143
    size_t esz = CV_ELEM_SIZE(type1);
1144 1145
    size_t blocksize0 = (BLOCK_SIZE + esz-1)/esz;
    BinaryFunc copymask = 0;
1146
    bool reallocate = false;
1147

1148 1149
    if( haveMask )
    {
1150 1151
        int mtype = _mask.type();
        CV_Assert( (mtype == CV_8U || mtype == CV_8S) && _mask.sameSize(*psrc1));
1152
        copymask = getCopyMaskFunc(esz);
1153
        reallocate = !_dst.sameSize(*psrc1) || _dst.type() != type1;
1154
    }
1155

1156 1157
    AutoBuffer<uchar> _buf;
    uchar *scbuf = 0, *maskbuf = 0;
1158

1159
    _dst.createSameSize(*psrc1, type1);
1160
    // if this is mask operation and dst has been reallocated,
1161
    // we have to clear the destination
1162
    if( haveMask && reallocate )
1163 1164
        _dst.setTo(0.);

I
Ilya Lavrenov 已提交
1165 1166 1167
    CV_OCL_RUN(use_opencl,
               ocl_binary_op(*psrc1, *psrc2, _dst, _mask, bitwise, oclop, haveScalar))

1168 1169 1170

    Mat src1 = psrc1->getMat(), src2 = psrc2->getMat();
    Mat dst = _dst.getMat(), mask = _mask.getMat();
1171

1172 1173 1174
    if( bitwise )
    {
        func = *tab;
1175
        cn = (int)esz;
1176 1177
    }
    else
1178
        func = tab[depth1];
1179

1180
    if( !haveScalar )
1181
    {
1182 1183
        const Mat* arrays[] = { &src1, &src2, &dst, &mask, 0 };
        uchar* ptrs[4];
1184

1185 1186
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = total;
1187

1188 1189
        if( blocksize*cn > INT_MAX )
            blocksize = INT_MAX/cn;
1190

1191 1192 1193 1194 1195 1196
        if( haveMask )
        {
            blocksize = std::min(blocksize, blocksize0);
            _buf.allocate(blocksize*esz);
            maskbuf = _buf;
        }
1197

1198
        for( size_t i = 0; i < it.nplanes; i++, ++it )
1199
        {
1200
            for( size_t j = 0; j < total; j += blocksize )
1201
            {
1202
                int bsz = (int)MIN(total - j, blocksize);
1203

1204
                func( ptrs[0], 0, ptrs[1], 0, haveMask ? maskbuf : ptrs[2], 0, Size(bsz*cn, 1), 0 );
1205
                if( haveMask )
1206
                {
1207 1208
                    copymask( maskbuf, 0, ptrs[3], 0, ptrs[2], 0, Size(bsz, 1), &esz );
                    ptrs[3] += bsz;
1209
                }
1210

1211 1212
                bsz *= (int)esz;
                ptrs[0] += bsz; ptrs[1] += bsz; ptrs[2] += bsz;
1213 1214
            }
        }
1215 1216 1217 1218 1219
    }
    else
    {
        const Mat* arrays[] = { &src1, &dst, &mask, 0 };
        uchar* ptrs[3];
1220

1221 1222
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = std::min(total, blocksize0);
1223

1224 1225 1226
        _buf.allocate(blocksize*(haveMask ? 2 : 1)*esz + 32);
        scbuf = _buf;
        maskbuf = alignPtr(scbuf + blocksize*esz, 16);
1227

1228
        convertAndUnrollScalar( src2, src1.type(), scbuf, blocksize);
1229

1230
        for( size_t i = 0; i < it.nplanes; i++, ++it )
1231
        {
1232
            for( size_t j = 0; j < total; j += blocksize )
1233
            {
1234
                int bsz = (int)MIN(total - j, blocksize);
1235

1236
                func( ptrs[0], 0, scbuf, 0, haveMask ? maskbuf : ptrs[1], 0, Size(bsz*cn, 1), 0 );
1237
                if( haveMask )
1238
                {
1239 1240
                    copymask( maskbuf, 0, ptrs[2], 0, ptrs[1], 0, Size(bsz, 1), &esz );
                    ptrs[2] += bsz;
1241
                }
1242

1243 1244
                bsz *= (int)esz;
                ptrs[0] += bsz; ptrs[1] += bsz;
1245 1246 1247 1248
            }
        }
    }
}
1249

1250
static BinaryFunc* getMaxTab()
V
Vadim Pisarevsky 已提交
1251
{
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
    static BinaryFunc maxTab[] =
    {
        (BinaryFunc)GET_OPTIMIZED(max8u), (BinaryFunc)GET_OPTIMIZED(max8s),
        (BinaryFunc)GET_OPTIMIZED(max16u), (BinaryFunc)GET_OPTIMIZED(max16s),
        (BinaryFunc)GET_OPTIMIZED(max32s),
        (BinaryFunc)GET_OPTIMIZED(max32f), (BinaryFunc)max64f,
        0
    };

    return maxTab;
}
1263

1264
static BinaryFunc* getMinTab()
1265
{
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
    static BinaryFunc minTab[] =
    {
        (BinaryFunc)GET_OPTIMIZED(min8u), (BinaryFunc)GET_OPTIMIZED(min8s),
        (BinaryFunc)GET_OPTIMIZED(min16u), (BinaryFunc)GET_OPTIMIZED(min16s),
        (BinaryFunc)GET_OPTIMIZED(min32s),
        (BinaryFunc)GET_OPTIMIZED(min32f), (BinaryFunc)min64f,
        0
    };

    return minTab;
}
1277

V
Vadim Pisarevsky 已提交
1278
}
1279

1280
void cv::bitwise_and(InputArray a, InputArray b, OutputArray c, InputArray mask)
1281
{
A
Andrey Kamaev 已提交
1282
    BinaryFunc f = (BinaryFunc)GET_OPTIMIZED(and8u);
1283
    binary_op(a, b, c, mask, &f, true, OCL_OP_AND);
1284 1285
}

1286
void cv::bitwise_or(InputArray a, InputArray b, OutputArray c, InputArray mask)
1287
{
A
Andrey Kamaev 已提交
1288
    BinaryFunc f = (BinaryFunc)GET_OPTIMIZED(or8u);
1289
    binary_op(a, b, c, mask, &f, true, OCL_OP_OR);
1290 1291
}

1292
void cv::bitwise_xor(InputArray a, InputArray b, OutputArray c, InputArray mask)
1293
{
A
Andrey Kamaev 已提交
1294
    BinaryFunc f = (BinaryFunc)GET_OPTIMIZED(xor8u);
1295
    binary_op(a, b, c, mask, &f, true, OCL_OP_XOR);
1296 1297
}

1298
void cv::bitwise_not(InputArray a, OutputArray c, InputArray mask)
1299
{
A
Andrey Kamaev 已提交
1300
    BinaryFunc f = (BinaryFunc)GET_OPTIMIZED(not8u);
1301
    binary_op(a, a, c, mask, &f, true, OCL_OP_NOT);
1302 1303
}

1304
void cv::max( InputArray src1, InputArray src2, OutputArray dst )
1305
{
1306
    binary_op(src1, src2, dst, noArray(), getMaxTab(), false, OCL_OP_MAX );
1307 1308
}

1309
void cv::min( InputArray src1, InputArray src2, OutputArray dst )
1310
{
1311
    binary_op(src1, src2, dst, noArray(), getMinTab(), false, OCL_OP_MIN );
1312 1313
}

1314
void cv::max(const Mat& src1, const Mat& src2, Mat& dst)
1315
{
1316
    OutputArray _dst(dst);
1317
    binary_op(src1, src2, _dst, noArray(), getMaxTab(), false, OCL_OP_MAX );
1318 1319
}

1320 1321 1322
void cv::min(const Mat& src1, const Mat& src2, Mat& dst)
{
    OutputArray _dst(dst);
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
    binary_op(src1, src2, _dst, noArray(), getMinTab(), false, OCL_OP_MIN );
}

void cv::max(const UMat& src1, const UMat& src2, UMat& dst)
{
    OutputArray _dst(dst);
    binary_op(src1, src2, _dst, noArray(), getMaxTab(), false, OCL_OP_MAX );
}

void cv::min(const UMat& src1, const UMat& src2, UMat& dst)
{
    OutputArray _dst(dst);
    binary_op(src1, src2, _dst, noArray(), getMinTab(), false, OCL_OP_MIN );
1336
}
1337 1338


1339 1340 1341
/****************************************************************************************\
*                                      add/subtract                                      *
\****************************************************************************************/
1342

1343 1344
namespace cv
{
1345

1346 1347
static int actualScalarDepth(const double* data, int len)
{
1348 1349
    int i = 0, minval = INT_MAX, maxval = INT_MIN;
    for(; i < len; ++i)
1350
    {
1351 1352 1353 1354 1355
        int ival = cvRound(data[i]);
        if( ival != data[i] )
            break;
        minval = MIN(minval, ival);
        maxval = MAX(maxval, ival);
1356
    }
1357
    return i < len ? CV_64F :
A
Andrey Kamaev 已提交
1358 1359 1360 1361
        minval >= 0 && maxval <= (int)UCHAR_MAX ? CV_8U :
        minval >= (int)SCHAR_MIN && maxval <= (int)SCHAR_MAX ? CV_8S :
        minval >= 0 && maxval <= (int)USHRT_MAX ? CV_16U :
        minval >= (int)SHRT_MIN && maxval <= (int)SHRT_MAX ? CV_16S :
1362
        CV_32S;
1363 1364
}

I
Ilya Lavrenov 已提交
1365
#ifdef HAVE_OPENCL
1366 1367 1368 1369 1370

static bool ocl_arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
                          InputArray _mask, int wtype,
                          void* usrdata, int oclop,
                          bool haveScalar )
1371
{
I
Ilya Lavrenov 已提交
1372
    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
1373
    int type1 = _src1.type(), depth1 = CV_MAT_DEPTH(type1), cn = CV_MAT_CN(type1);
1374
    bool haveMask = !_mask.empty();
1375

1376
    if ( (haveMask || haveScalar) && cn > 4 )
1377 1378
        return false;

1379
    int dtype = _dst.type(), ddepth = CV_MAT_DEPTH(dtype), wdepth = std::max(CV_32S, CV_MAT_DEPTH(wtype));
I
Ilya Lavrenov 已提交
1380 1381 1382
    if (!doubleSupport)
        wdepth = std::min(wdepth, CV_32F);

1383
    wtype = CV_MAKETYPE(wdepth, cn);
1384
    int type2 = haveScalar ? wtype : _src2.type(), depth2 = CV_MAT_DEPTH(type2);
I
Ilya Lavrenov 已提交
1385 1386
    if (!doubleSupport && (depth2 == CV_64F || depth1 == CV_64F))
        return false;
1387

I
Ilya Lavrenov 已提交
1388
    int kercn = haveMask || haveScalar ? cn : ocl::predictOptimalVectorWidth(_src1, _src2, _dst);
1389
    int scalarcn = kercn == 3 ? 4 : kercn;
1390

I
Ilya Lavrenov 已提交
1391
    char cvtstr[4][32], opts[1024];
1392
    sprintf(opts, "-D %s%s -D %s -D srcT1=%s -D srcT1_C1=%s -D srcT2=%s -D srcT2_C1=%s "
I
Ilya Lavrenov 已提交
1393
            "-D dstT=%s -D dstT_C1=%s -D workT=%s -D workST=%s -D scaleT=%s -D wdepth=%d -D convertToWT1=%s "
1394
            "-D convertToWT2=%s -D convertToDT=%s%s -D cn=%d",
1395 1396
            (haveMask ? "MASK_" : ""), (haveScalar ? "UNARY_OP" : "BINARY_OP"),
            oclop2str[oclop], ocl::typeToStr(CV_MAKETYPE(depth1, kercn)),
1397 1398 1399
            ocl::typeToStr(depth1), ocl::typeToStr(CV_MAKETYPE(depth2, kercn)),
            ocl::typeToStr(depth2), ocl::typeToStr(CV_MAKETYPE(ddepth, kercn)),
            ocl::typeToStr(ddepth), ocl::typeToStr(CV_MAKETYPE(wdepth, kercn)),
1400
            ocl::typeToStr(CV_MAKETYPE(wdepth, scalarcn)),
1401
            ocl::typeToStr(wdepth), wdepth,
1402 1403
            ocl::convertTypeStr(depth1, wdepth, kercn, cvtstr[0]),
            ocl::convertTypeStr(depth2, wdepth, kercn, cvtstr[1]),
I
Ilya Lavrenov 已提交
1404
            ocl::convertTypeStr(wdepth, ddepth, kercn, cvtstr[2]),
1405
            doubleSupport ? " -D DOUBLE_SUPPORT" : "", kercn);
1406

I
Ilya Lavrenov 已提交
1407
    size_t usrdata_esz = CV_ELEM_SIZE(wdepth);
1408 1409 1410 1411
    const uchar* usrdata_p = (const uchar*)usrdata;
    const double* usrdata_d = (const double*)usrdata;
    float usrdata_f[3];
    int i, n = oclop == OCL_OP_MUL_SCALE || oclop == OCL_OP_DIV_SCALE ||
I
Ilya Lavrenov 已提交
1412
        oclop == OCL_OP_RDIV_SCALE || oclop == OCL_OP_RECIP_SCALE ? 1 : oclop == OCL_OP_ADDW ? 3 : 0;
1413 1414 1415 1416 1417 1418 1419 1420
    if( n > 0 && wdepth == CV_32F )
    {
        for( i = 0; i < n; i++ )
            usrdata_f[i] = (float)usrdata_d[i];
        usrdata_p = (const uchar*)usrdata_f;
    }

    ocl::Kernel k("KF", ocl::core::arithm_oclsrc, opts);
1421
    if (k.empty())
1422 1423
        return false;

I
Ilya Lavrenov 已提交
1424 1425 1426
    UMat src1 = _src1.getUMat(), src2;
    UMat dst = _dst.getUMat(), mask = _mask.getUMat();

I
Ilya Lavrenov 已提交
1427 1428 1429
    ocl::KernelArg src1arg = ocl::KernelArg::ReadOnlyNoSize(src1, cn, kercn);
    ocl::KernelArg dstarg = haveMask ? ocl::KernelArg::ReadWrite(dst, cn, kercn) :
                                       ocl::KernelArg::WriteOnly(dst, cn, kercn);
1430 1431 1432 1433
    ocl::KernelArg maskarg = ocl::KernelArg::ReadOnlyNoSize(mask, 1);

    if( haveScalar )
    {
1434
        size_t esz = CV_ELEM_SIZE1(wtype)*scalarcn;
1435 1436 1437 1438 1439
        double buf[4]={0,0,0,0};
        Mat src2sc = _src2.getMat();

        if( !src2sc.empty() )
            convertAndUnrollScalar(src2sc, wtype, (uchar*)buf, 1);
I
Ilya Lavrenov 已提交
1440
        ocl::KernelArg scalararg = ocl::KernelArg(0, 0, 0, 0, buf, esz);
1441 1442

        if( !haveMask )
I
Ilya Lavrenov 已提交
1443 1444 1445 1446 1447
        {
            if(n == 0)
                k.args(src1arg, dstarg, scalararg);
            else if(n == 1)
                k.args(src1arg, dstarg, scalararg,
I
Ilya Lavrenov 已提交
1448
                       ocl::KernelArg(0, 0, 0, 0, usrdata_p, usrdata_esz));
I
Ilya Lavrenov 已提交
1449 1450 1451
            else
                CV_Error(Error::StsNotImplemented, "unsupported number of extra parameters");
        }
1452 1453 1454 1455 1456 1457
        else
            k.args(src1arg, maskarg, dstarg, scalararg);
    }
    else
    {
        src2 = _src2.getUMat();
I
Ilya Lavrenov 已提交
1458
        ocl::KernelArg src2arg = ocl::KernelArg::ReadOnlyNoSize(src2, cn, kercn);
1459 1460 1461

        if( !haveMask )
        {
1462
            if (n == 0)
1463
                k.args(src1arg, src2arg, dstarg);
1464
            else if (n == 1)
1465
                k.args(src1arg, src2arg, dstarg,
I
Ilya Lavrenov 已提交
1466
                       ocl::KernelArg(0, 0, 0, 0, usrdata_p, usrdata_esz));
1467
            else if (n == 3)
1468
                k.args(src1arg, src2arg, dstarg,
I
Ilya Lavrenov 已提交
1469 1470 1471
                       ocl::KernelArg(0, 0, 0, 0, usrdata_p, usrdata_esz),
                       ocl::KernelArg(0, 0, 0, 0, usrdata_p + usrdata_esz, usrdata_esz),
                       ocl::KernelArg(0, 0, 0, 0, usrdata_p + usrdata_esz*2, usrdata_esz));
1472 1473 1474 1475 1476 1477 1478
            else
                CV_Error(Error::StsNotImplemented, "unsupported number of extra parameters");
        }
        else
            k.args(src1arg, src2arg, maskarg, dstarg);
    }

I
Ilya Lavrenov 已提交
1479
    size_t globalsize[] = { src1.cols * cn / kercn, src1.rows };
1480
    return k.run(2, globalsize, NULL, false);
1481 1482
}

I
Ilya Lavrenov 已提交
1483
#endif
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497

static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
                      InputArray _mask, int dtype, BinaryFunc* tab, bool muldiv=false,
                      void* usrdata=0, int oclop=-1 )
{
    const _InputArray *psrc1 = &_src1, *psrc2 = &_src2;
    int kind1 = psrc1->kind(), kind2 = psrc2->kind();
    bool haveMask = !_mask.empty();
    bool reallocate = false;
    int type1 = psrc1->type(), depth1 = CV_MAT_DEPTH(type1), cn = CV_MAT_CN(type1);
    int type2 = psrc2->type(), depth2 = CV_MAT_DEPTH(type2), cn2 = CV_MAT_CN(type2);
    int wtype, dims1 = psrc1->dims(), dims2 = psrc2->dims();
    Size sz1 = dims1 <= 2 ? psrc1->size() : Size();
    Size sz2 = dims2 <= 2 ? psrc2->size() : Size();
I
Ilya Lavrenov 已提交
1498 1499 1500
#ifdef HAVE_OPENCL
    bool use_opencl = _dst.isUMat() && dims1 <= 2 && dims2 <= 2;
#endif
1501 1502 1503 1504 1505 1506
    bool src1Scalar = checkScalar(*psrc1, type2, kind1, kind2);
    bool src2Scalar = checkScalar(*psrc2, type1, kind2, kind1);

    if( (kind1 == kind2 || cn == 1) && sz1 == sz2 && dims1 <= 2 && dims2 <= 2 && type1 == type2 &&
        !haveMask && ((!_dst.fixedType() && (dtype < 0 || CV_MAT_DEPTH(dtype) == depth1)) ||
                       (_dst.fixedType() && _dst.type() == type1)) &&
B
Bo Li 已提交
1507
        ((src1Scalar && src2Scalar) || (!src1Scalar && !src2Scalar)) )
V
Vadim Pisarevsky 已提交
1508
    {
1509
        _dst.createSameSize(*psrc1, type1);
I
Ilya Lavrenov 已提交
1510
        CV_OCL_RUN(use_opencl,
1511 1512 1513
            ocl_arithm_op(*psrc1, *psrc2, _dst, _mask,
                          (!usrdata ? type1 : std::max(depth1, CV_32F)),
                          usrdata, oclop, false))
1514

1515
        Mat src1 = psrc1->getMat(), src2 = psrc2->getMat(), dst = _dst.getMat();
1516
        Size sz = getContinuousSize(src1, src2, dst, src1.channels());
1517
        tab[depth1](src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, usrdata);
V
Vadim Pisarevsky 已提交
1518 1519
        return;
    }
1520

1521
    bool haveScalar = false, swapped12 = false;
1522 1523

    if( dims1 != dims2 || sz1 != sz2 || cn != cn2 ||
1524 1525
        (kind1 == _InputArray::MATX && (sz1 == Size(1,4) || sz1 == Size(1,1))) ||
        (kind2 == _InputArray::MATX && (sz2 == Size(1,4) || sz2 == Size(1,1))) )
1526
    {
1527
        if( checkScalar(*psrc1, type2, kind1, kind2) )
1528 1529
        {
            // src1 is a scalar; swap it with src2
1530 1531 1532 1533 1534 1535
            swap(psrc1, psrc2);
            swap(sz1, sz2);
            swap(type1, type2);
            swap(depth1, depth2);
            swap(cn, cn2);
            swap(dims1, dims2);
1536
            swapped12 = true;
1537 1538
            if( oclop == OCL_OP_SUB )
                oclop = OCL_OP_RSUB;
I
Ilya Lavrenov 已提交
1539 1540
            if ( oclop == OCL_OP_DIV_SCALE )
                oclop = OCL_OP_RDIV_SCALE;
1541
        }
1542
        else if( !checkScalar(*psrc2, type1, kind2, kind1) )
1543
            CV_Error( CV_StsUnmatchedSizes,
1544 1545
                     "The operation is neither 'array op array' "
                     "(where arrays have the same size and the same number of channels), "
1546 1547
                     "nor 'array op scalar', nor 'scalar op array'" );
        haveScalar = true;
1548
        CV_Assert(type2 == CV_64F && (sz2.height == 1 || sz2.height == 4));
A
Andrey Kamaev 已提交
1549

1550 1551
        if (!muldiv)
        {
1552 1553 1554
            Mat sc = psrc2->getMat();
            depth2 = actualScalarDepth(sc.ptr<double>(), cn);
            if( depth2 == CV_64F && (depth1 < CV_32S || depth1 == CV_32F) )
1555 1556
                depth2 = CV_32F;
        }
A
Andrey Kamaev 已提交
1557
        else
1558
            depth2 = CV_64F;
1559
    }
1560

1561 1562 1563 1564 1565 1566
    if( dtype < 0 )
    {
        if( _dst.fixedType() )
            dtype = _dst.type();
        else
        {
1567
            if( !haveScalar && type1 != type2 )
1568 1569 1570
                CV_Error(CV_StsBadArg,
                     "When the input arrays in add/subtract/multiply/divide functions have different types, "
                     "the output array type must be explicitly specified");
1571
            dtype = type1;
1572 1573 1574
        }
    }
    dtype = CV_MAT_DEPTH(dtype);
1575

1576 1577 1578 1579 1580 1581 1582
    if( depth1 == depth2 && dtype == depth1 )
        wtype = dtype;
    else if( !muldiv )
    {
        wtype = depth1 <= CV_8S && depth2 <= CV_8S ? CV_16S :
                depth1 <= CV_32S && depth2 <= CV_32S ? CV_32S : std::max(depth1, depth2);
        wtype = std::max(wtype, dtype);
1583

1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
        // when the result of addition should be converted to an integer type,
        // and just one of the input arrays is floating-point, it makes sense to convert that input to integer type before the operation,
        // instead of converting the other input to floating-point and then converting the operation result back to integers.
        if( dtype < CV_32F && (depth1 < CV_32F || depth2 < CV_32F) )
            wtype = CV_32S;
    }
    else
    {
        wtype = std::max(depth1, std::max(depth2, CV_32F));
        wtype = std::max(wtype, dtype);
    }
1595

1596 1597
    dtype = CV_MAKETYPE(dtype, cn);
    wtype = CV_MAKETYPE(wtype, cn);
1598

1599 1600
    if( haveMask )
    {
1601 1602 1603
        int mtype = _mask.type();
        CV_Assert( (mtype == CV_8UC1 || mtype == CV_8SC1) && _mask.sameSize(*psrc1) );
        reallocate = !_dst.sameSize(*psrc1) || _dst.type() != dtype;
1604
    }
1605

1606 1607 1608
    _dst.createSameSize(*psrc1, dtype);
    if( reallocate )
        _dst.setTo(0.);
1609

I
Ilya Lavrenov 已提交
1610 1611 1612
    CV_OCL_RUN(use_opencl,
               ocl_arithm_op(*psrc1, *psrc2, _dst, _mask, wtype,
               usrdata, oclop, haveScalar))
1613

1614 1615 1616 1617 1618 1619 1620 1621 1622
    BinaryFunc cvtsrc1 = type1 == wtype ? 0 : getConvertFunc(type1, wtype);
    BinaryFunc cvtsrc2 = type2 == type1 ? cvtsrc1 : type2 == wtype ? 0 : getConvertFunc(type2, wtype);
    BinaryFunc cvtdst = dtype == wtype ? 0 : getConvertFunc(wtype, dtype);

    size_t esz1 = CV_ELEM_SIZE(type1), esz2 = CV_ELEM_SIZE(type2);
    size_t dsz = CV_ELEM_SIZE(dtype), wsz = CV_ELEM_SIZE(wtype);
    size_t blocksize0 = (size_t)(BLOCK_SIZE + wsz-1)/wsz;
    BinaryFunc copymask = getCopyMaskFunc(dsz);
    Mat src1 = psrc1->getMat(), src2 = psrc2->getMat(), dst = _dst.getMat(), mask = _mask.getMat();
1623

1624 1625 1626 1627 1628 1629
    AutoBuffer<uchar> _buf;
    uchar *buf, *maskbuf = 0, *buf1 = 0, *buf2 = 0, *wbuf = 0;
    size_t bufesz = (cvtsrc1 ? wsz : 0) +
                    (cvtsrc2 || haveScalar ? wsz : 0) +
                    (cvtdst ? wsz : 0) +
                    (haveMask ? dsz : 0);
1630
    BinaryFunc func = tab[CV_MAT_DEPTH(wtype)];
1631

1632 1633 1634 1635
    if( !haveScalar )
    {
        const Mat* arrays[] = { &src1, &src2, &dst, &mask, 0 };
        uchar* ptrs[4];
1636

1637 1638
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = total;
1639

1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
        if( haveMask || cvtsrc1 || cvtsrc2 || cvtdst )
            blocksize = std::min(blocksize, blocksize0);

        _buf.allocate(bufesz*blocksize + 64);
        buf = _buf;
        if( cvtsrc1 )
            buf1 = buf, buf = alignPtr(buf + blocksize*wsz, 16);
        if( cvtsrc2 )
            buf2 = buf, buf = alignPtr(buf + blocksize*wsz, 16);
        wbuf = maskbuf = buf;
        if( cvtdst )
            buf = alignPtr(buf + blocksize*wsz, 16);
        if( haveMask )
            maskbuf = buf;
1654

1655
        for( size_t i = 0; i < it.nplanes; i++, ++it )
1656
        {
1657
            for( size_t j = 0; j < total; j += blocksize )
1658
            {
1659
                int bsz = (int)MIN(total - j, blocksize);
1660 1661 1662 1663 1664
                Size bszn(bsz*cn, 1);
                const uchar *sptr1 = ptrs[0], *sptr2 = ptrs[1];
                uchar* dptr = ptrs[2];
                if( cvtsrc1 )
                {
I
Ilya Lavrenov 已提交
1665
                    cvtsrc1( sptr1, 1, 0, 1, buf1, 1, bszn, 0 );
1666 1667 1668 1669 1670 1671
                    sptr1 = buf1;
                }
                if( ptrs[0] == ptrs[1] )
                    sptr2 = sptr1;
                else if( cvtsrc2 )
                {
I
Ilya Lavrenov 已提交
1672
                    cvtsrc2( sptr2, 1, 0, 1, buf2, 1, bszn, 0 );
1673 1674
                    sptr2 = buf2;
                }
1675

1676
                if( !haveMask && !cvtdst )
I
Ilya Lavrenov 已提交
1677
                    func( sptr1, 1, sptr2, 1, dptr, 1, bszn, usrdata );
1678 1679
                else
                {
I
Ilya Lavrenov 已提交
1680
                    func( sptr1, 1, sptr2, 1, wbuf, 0, bszn, usrdata );
1681
                    if( !haveMask )
I
Ilya Lavrenov 已提交
1682
                        cvtdst( wbuf, 1, 0, 1, dptr, 1, bszn, 0 );
1683 1684
                    else if( !cvtdst )
                    {
I
Ilya Lavrenov 已提交
1685
                        copymask( wbuf, 1, ptrs[3], 1, dptr, 1, Size(bsz, 1), &dsz );
1686 1687 1688 1689
                        ptrs[3] += bsz;
                    }
                    else
                    {
I
Ilya Lavrenov 已提交
1690 1691
                        cvtdst( wbuf, 1, 0, 1, maskbuf, 1, bszn, 0 );
                        copymask( maskbuf, 1, ptrs[3], 1, dptr, 1, Size(bsz, 1), &dsz );
1692 1693 1694 1695
                        ptrs[3] += bsz;
                    }
                }
                ptrs[0] += bsz*esz1; ptrs[1] += bsz*esz2; ptrs[2] += bsz*dsz;
1696 1697
            }
        }
1698 1699 1700 1701 1702
    }
    else
    {
        const Mat* arrays[] = { &src1, &dst, &mask, 0 };
        uchar* ptrs[3];
1703

1704 1705
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = std::min(total, blocksize0);
1706

1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
        _buf.allocate(bufesz*blocksize + 64);
        buf = _buf;
        if( cvtsrc1 )
            buf1 = buf, buf = alignPtr(buf + blocksize*wsz, 16);
        buf2 = buf; buf = alignPtr(buf + blocksize*wsz, 16);
        wbuf = maskbuf = buf;
        if( cvtdst )
            buf = alignPtr(buf + blocksize*wsz, 16);
        if( haveMask )
            maskbuf = buf;
1717

1718
        convertAndUnrollScalar( src2, wtype, buf2, blocksize);
1719

1720
        for( size_t i = 0; i < it.nplanes; i++, ++it )
1721
        {
1722 1723
            for( size_t j = 0; j < total; j += blocksize )
            {
1724
                int bsz = (int)MIN(total - j, blocksize);
1725 1726 1727 1728
                Size bszn(bsz*cn, 1);
                const uchar *sptr1 = ptrs[0];
                const uchar* sptr2 = buf2;
                uchar* dptr = ptrs[1];
1729

1730 1731
                if( cvtsrc1 )
                {
I
Ilya Lavrenov 已提交
1732
                    cvtsrc1( sptr1, 1, 0, 1, buf1, 1, bszn, 0 );
1733 1734
                    sptr1 = buf1;
                }
1735

1736 1737
                if( swapped12 )
                    std::swap(sptr1, sptr2);
1738

1739
                if( !haveMask && !cvtdst )
I
Ilya Lavrenov 已提交
1740
                    func( sptr1, 1, sptr2, 1, dptr, 1, bszn, usrdata );
1741 1742
                else
                {
I
Ilya Lavrenov 已提交
1743
                    func( sptr1, 1, sptr2, 1, wbuf, 1, bszn, usrdata );
1744
                    if( !haveMask )
I
Ilya Lavrenov 已提交
1745
                        cvtdst( wbuf, 1, 0, 1, dptr, 1, bszn, 0 );
1746 1747
                    else if( !cvtdst )
                    {
I
Ilya Lavrenov 已提交
1748
                        copymask( wbuf, 1, ptrs[2], 1, dptr, 1, Size(bsz, 1), &dsz );
1749 1750 1751 1752
                        ptrs[2] += bsz;
                    }
                    else
                    {
I
Ilya Lavrenov 已提交
1753 1754
                        cvtdst( wbuf, 1, 0, 1, maskbuf, 1, bszn, 0 );
                        copymask( maskbuf, 1, ptrs[2], 1, dptr, 1, Size(bsz, 1), &dsz );
1755 1756 1757 1758 1759
                        ptrs[2] += bsz;
                    }
                }
                ptrs[0] += bsz*esz1; ptrs[1] += bsz*dsz;
            }
1760 1761 1762
        }
    }
}
1763

1764
static BinaryFunc* getAddTab()
1765
{
1766 1767 1768 1769 1770 1771 1772 1773
    static BinaryFunc addTab[] =
    {
        (BinaryFunc)GET_OPTIMIZED(add8u), (BinaryFunc)GET_OPTIMIZED(add8s),
        (BinaryFunc)GET_OPTIMIZED(add16u), (BinaryFunc)GET_OPTIMIZED(add16s),
        (BinaryFunc)GET_OPTIMIZED(add32s),
        (BinaryFunc)GET_OPTIMIZED(add32f), (BinaryFunc)add64f,
        0
    };
1774

1775 1776 1777 1778
    return addTab;
}

static BinaryFunc* getSubTab()
1779
{
1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
    static BinaryFunc subTab[] =
    {
        (BinaryFunc)GET_OPTIMIZED(sub8u), (BinaryFunc)GET_OPTIMIZED(sub8s),
        (BinaryFunc)GET_OPTIMIZED(sub16u), (BinaryFunc)GET_OPTIMIZED(sub16s),
        (BinaryFunc)GET_OPTIMIZED(sub32s),
        (BinaryFunc)GET_OPTIMIZED(sub32f), (BinaryFunc)sub64f,
        0
    };

    return subTab;
}
1791

1792
static BinaryFunc* getAbsDiffTab()
1793
{
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
    static BinaryFunc absDiffTab[] =
    {
        (BinaryFunc)GET_OPTIMIZED(absdiff8u), (BinaryFunc)GET_OPTIMIZED(absdiff8s),
        (BinaryFunc)GET_OPTIMIZED(absdiff16u), (BinaryFunc)GET_OPTIMIZED(absdiff16s),
        (BinaryFunc)GET_OPTIMIZED(absdiff32s),
        (BinaryFunc)GET_OPTIMIZED(absdiff32f), (BinaryFunc)absdiff64f,
        0
    };

    return absDiffTab;
}
1805 1806

}
1807

1808 1809
void cv::add( InputArray src1, InputArray src2, OutputArray dst,
          InputArray mask, int dtype )
1810
{
1811
    arithm_op(src1, src2, dst, mask, dtype, getAddTab(), false, 0, OCL_OP_ADD );
1812 1813
}

1814 1815
void cv::subtract( InputArray src1, InputArray src2, OutputArray dst,
               InputArray mask, int dtype )
1816
{
A
Andrey Kamaev 已提交
1817
#ifdef HAVE_TEGRA_OPTIMIZATION
1818
    if (mask.empty() && src1.depth() == CV_8U && src2.depth() == CV_8U)
1819
    {
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843
        if (dtype == -1 && dst.fixedType())
            dtype = dst.depth();

        if (!dst.fixedType() || dtype == dst.depth())
        {
            if (dtype == CV_16S)
            {
                Mat _dst = dst.getMat();
                if(tegra::subtract_8u8u16s(src1.getMat(), src2.getMat(), _dst))
                    return;
            }
            else if (dtype == CV_32F)
            {
                Mat _dst = dst.getMat();
                if(tegra::subtract_8u8u32f(src1.getMat(), src2.getMat(), _dst))
                    return;
            }
            else if (dtype == CV_8S)
            {
                Mat _dst = dst.getMat();
                if(tegra::subtract_8u8u8s(src1.getMat(), src2.getMat(), _dst))
                    return;
            }
        }
1844
    }
A
Andrey Kamaev 已提交
1845
#endif
1846
    arithm_op(src1, src2, dst, mask, dtype, getSubTab(), false, 0, OCL_OP_SUB );
1847 1848
}

1849
void cv::absdiff( InputArray src1, InputArray src2, OutputArray dst )
1850
{
1851
    arithm_op(src1, src2, dst, noArray(), -1, getAbsDiffTab(), false, 0, OCL_OP_ABSDIFF);
1852
}
1853 1854 1855 1856 1857

/****************************************************************************************\
*                                    multiply/divide                                     *
\****************************************************************************************/

1858 1859 1860
namespace cv
{

1861
template<typename T, typename WT> static void
1862 1863
mul_( const T* src1, size_t step1, const T* src2, size_t step2,
      T* dst, size_t step, Size size, WT scale )
1864
{
1865 1866 1867
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    step /= sizeof(dst[0]);
1868

1869
    if( scale == (WT)1. )
1870
    {
1871
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1872
        {
V
Victoria Zhislina 已提交
1873
            int i=0;
1874
            #if CV_ENABLE_UNROLLED
V
Victoria Zhislina 已提交
1875
            for(; i <= size.width - 4; i += 4 )
1876
            {
1877 1878 1879 1880 1881 1882
                T t0;
                T t1;
                t0 = saturate_cast<T>(src1[i  ] * src2[i  ]);
                t1 = saturate_cast<T>(src1[i+1] * src2[i+1]);
                dst[i  ] = t0;
                dst[i+1] = t1;
1883 1884 1885

                t0 = saturate_cast<T>(src1[i+2] * src2[i+2]);
                t1 = saturate_cast<T>(src1[i+3] * src2[i+3]);
1886 1887
                dst[i+2] = t0;
                dst[i+3] = t1;
1888
            }
V
Victoria Zhislina 已提交
1889
            #endif
1890 1891 1892 1893 1894 1895
            for( ; i < size.width; i++ )
                dst[i] = saturate_cast<T>(src1[i] * src2[i]);
        }
    }
    else
    {
1896
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1897
        {
V
Victoria Zhislina 已提交
1898
            int i = 0;
1899
            #if CV_ENABLE_UNROLLED
V
Victoria Zhislina 已提交
1900
            for(; i <= size.width - 4; i += 4 )
1901 1902 1903 1904 1905 1906 1907 1908 1909
            {
                T t0 = saturate_cast<T>(scale*(WT)src1[i]*src2[i]);
                T t1 = saturate_cast<T>(scale*(WT)src1[i+1]*src2[i+1]);
                dst[i] = t0; dst[i+1] = t1;

                t0 = saturate_cast<T>(scale*(WT)src1[i+2]*src2[i+2]);
                t1 = saturate_cast<T>(scale*(WT)src1[i+3]*src2[i+3]);
                dst[i+2] = t0; dst[i+3] = t1;
            }
V
Victoria Zhislina 已提交
1910
            #endif
1911 1912 1913 1914 1915 1916 1917
            for( ; i < size.width; i++ )
                dst[i] = saturate_cast<T>(scale*(WT)src1[i]*src2[i]);
        }
    }
}

template<typename T> static void
1918 1919
div_( const T* src1, size_t step1, const T* src2, size_t step2,
      T* dst, size_t step, Size size, double scale )
1920
{
1921 1922 1923
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    step /= sizeof(dst[0]);
1924

1925
    for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1926 1927
    {
        int i = 0;
1928
        #if CV_ENABLE_UNROLLED
1929 1930 1931 1932 1933 1934 1935 1936 1937
        for( ; i <= size.width - 4; i += 4 )
        {
            if( src2[i] != 0 && src2[i+1] != 0 && src2[i+2] != 0 && src2[i+3] != 0 )
            {
                double a = (double)src2[i] * src2[i+1];
                double b = (double)src2[i+2] * src2[i+3];
                double d = scale/(a * b);
                b *= d;
                a *= d;
1938

1939 1940 1941 1942
                T z0 = saturate_cast<T>(src2[i+1] * ((double)src1[i] * b));
                T z1 = saturate_cast<T>(src2[i] * ((double)src1[i+1] * b));
                T z2 = saturate_cast<T>(src2[i+3] * ((double)src1[i+2] * a));
                T z3 = saturate_cast<T>(src2[i+2] * ((double)src1[i+3] * a));
1943

1944 1945 1946 1947 1948 1949 1950 1951 1952
                dst[i] = z0; dst[i+1] = z1;
                dst[i+2] = z2; dst[i+3] = z3;
            }
            else
            {
                T z0 = src2[i] != 0 ? saturate_cast<T>(src1[i]*scale/src2[i]) : 0;
                T z1 = src2[i+1] != 0 ? saturate_cast<T>(src1[i+1]*scale/src2[i+1]) : 0;
                T z2 = src2[i+2] != 0 ? saturate_cast<T>(src1[i+2]*scale/src2[i+2]) : 0;
                T z3 = src2[i+3] != 0 ? saturate_cast<T>(src1[i+3]*scale/src2[i+3]) : 0;
1953

1954 1955 1956 1957
                dst[i] = z0; dst[i+1] = z1;
                dst[i+2] = z2; dst[i+3] = z3;
            }
        }
V
Victoria Zhislina 已提交
1958
        #endif
1959 1960 1961 1962 1963 1964
        for( ; i < size.width; i++ )
            dst[i] = src2[i] != 0 ? saturate_cast<T>(src1[i]*scale/src2[i]) : 0;
    }
}

template<typename T> static void
1965 1966
recip_( const T*, size_t, const T* src2, size_t step2,
        T* dst, size_t step, Size size, double scale )
1967
{
1968 1969
    step2 /= sizeof(src2[0]);
    step /= sizeof(dst[0]);
1970

1971
    for( ; size.height--; src2 += step2, dst += step )
1972 1973
    {
        int i = 0;
1974
        #if CV_ENABLE_UNROLLED
1975 1976 1977 1978 1979 1980 1981 1982 1983
        for( ; i <= size.width - 4; i += 4 )
        {
            if( src2[i] != 0 && src2[i+1] != 0 && src2[i+2] != 0 && src2[i+3] != 0 )
            {
                double a = (double)src2[i] * src2[i+1];
                double b = (double)src2[i+2] * src2[i+3];
                double d = scale/(a * b);
                b *= d;
                a *= d;
1984

1985 1986 1987 1988
                T z0 = saturate_cast<T>(src2[i+1] * b);
                T z1 = saturate_cast<T>(src2[i] * b);
                T z2 = saturate_cast<T>(src2[i+3] * a);
                T z3 = saturate_cast<T>(src2[i+2] * a);
1989

1990 1991 1992 1993 1994 1995 1996 1997 1998
                dst[i] = z0; dst[i+1] = z1;
                dst[i+2] = z2; dst[i+3] = z3;
            }
            else
            {
                T z0 = src2[i] != 0 ? saturate_cast<T>(scale/src2[i]) : 0;
                T z1 = src2[i+1] != 0 ? saturate_cast<T>(scale/src2[i+1]) : 0;
                T z2 = src2[i+2] != 0 ? saturate_cast<T>(scale/src2[i+2]) : 0;
                T z3 = src2[i+3] != 0 ? saturate_cast<T>(scale/src2[i+3]) : 0;
1999

2000 2001 2002 2003
                dst[i] = z0; dst[i+1] = z1;
                dst[i+2] = z2; dst[i+3] = z3;
            }
        }
V
Victoria Zhislina 已提交
2004
        #endif
2005 2006 2007 2008
        for( ; i < size.width; i++ )
            dst[i] = src2[i] != 0 ? saturate_cast<T>(scale/src2[i]) : 0;
    }
}
2009 2010


2011 2012 2013
static void mul8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* scale)
{
I
Ilya Lavrenov 已提交
2014
    float fscale = (float)*(const double*)scale;
I
Ilya Lavrenov 已提交
2015 2016 2017 2018 2019
#if defined HAVE_IPP && !defined HAVE_IPP_ICV_ONLY
    if (std::fabs(fscale - 1) <= FLT_EPSILON &&
            ippiMul_8u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz), 0) >= 0)
        return;
#endif
I
Ilya Lavrenov 已提交
2020
    mul_(src1, step1, src2, step2, dst, step, sz, fscale);
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
}

static void mul8s( const schar* src1, size_t step1, const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale);
}

static void mul16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* scale)
{
I
Ilya Lavrenov 已提交
2032 2033 2034 2035 2036 2037 2038
    float fscale = (float)*(const double*)scale;
#if defined HAVE_IPP && !defined HAVE_IPP_ICV_ONLY
    if (std::fabs(fscale - 1) <= FLT_EPSILON &&
            ippiMul_16u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz), 0) >= 0)
        return;
#endif
    mul_(src1, step1, src2, step2, dst, step, sz, fscale);
2039 2040 2041 2042 2043
}

static void mul16s( const short* src1, size_t step1, const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* scale)
{
I
Ilya Lavrenov 已提交
2044 2045 2046 2047 2048 2049 2050
    float fscale = (float)*(const double*)scale;
#if defined HAVE_IPP && !defined HAVE_IPP_ICV_ONLY
    if (std::fabs(fscale - 1) <= FLT_EPSILON &&
            ippiMul_16s_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz), 0) >= 0)
        return;
#endif
    mul_(src1, step1, src2, step2, dst, step, sz, fscale);
2051 2052 2053 2054 2055 2056 2057
}

static void mul32s( const int* src1, size_t step1, const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}
2058

2059 2060 2061
static void mul32f( const float* src1, size_t step1, const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* scale)
{
I
Ilya Lavrenov 已提交
2062 2063 2064 2065 2066 2067 2068
    float fscale = (float)*(const double*)scale;
#if defined HAVE_IPP && !defined HAVE_IPP_ICV_ONLY
    if (std::fabs(fscale - 1) <= FLT_EPSILON &&
            ippiMul_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)) >= 0)
        return;
#endif
    mul_(src1, step1, src2, step2, dst, step, sz, fscale);
2069
}
2070

2071 2072 2073 2074 2075
static void mul64f( const double* src1, size_t step1, const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}
2076

2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
static void div8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* scale)
{
    if( src1 )
        div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
    else
        recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div8s( const schar* src1, size_t step1, const schar* src2, size_t step2,
                  schar* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div16s( const short* src1, size_t step1, const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div32s( const int* src1, size_t step1, const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div32f( const float* src1, size_t step1, const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div64f( const double* src1, size_t step1, const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                  uchar* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip8s( const schar* src1, size_t step1, const schar* src2, size_t step2,
                  schar* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                   ushort* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip16s( const short* src1, size_t step1, const short* src2, size_t step2,
                   short* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}
2145

2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
static void recip32s( const int* src1, size_t step1, const int* src2, size_t step2,
                   int* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip32f( const float* src1, size_t step1, const float* src2, size_t step2,
                   float* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip64f( const double* src1, size_t step1, const double* src2, size_t step2,
                   double* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}
2163 2164


2165
static BinaryFunc* getMulTab()
2166
{
2167 2168 2169 2170 2171 2172 2173 2174 2175
    static BinaryFunc mulTab[] =
    {
        (BinaryFunc)mul8u, (BinaryFunc)mul8s, (BinaryFunc)mul16u,
        (BinaryFunc)mul16s, (BinaryFunc)mul32s, (BinaryFunc)mul32f,
        (BinaryFunc)mul64f, 0
    };

    return mulTab;
}
2176

2177
static BinaryFunc* getDivTab()
2178
{
2179 2180 2181 2182 2183 2184
    static BinaryFunc divTab[] =
    {
        (BinaryFunc)div8u, (BinaryFunc)div8s, (BinaryFunc)div16u,
        (BinaryFunc)div16s, (BinaryFunc)div32s, (BinaryFunc)div32f,
        (BinaryFunc)div64f, 0
    };
2185

2186 2187 2188 2189
    return divTab;
}

static BinaryFunc* getRecipTab()
2190
{
2191 2192 2193 2194 2195 2196
    static BinaryFunc recipTab[] =
    {
        (BinaryFunc)recip8u, (BinaryFunc)recip8s, (BinaryFunc)recip16u,
        (BinaryFunc)recip16s, (BinaryFunc)recip32s, (BinaryFunc)recip32f,
        (BinaryFunc)recip64f, 0
    };
2197

2198 2199
    return recipTab;
}
2200

2201
}
2202

2203
void cv::multiply(InputArray src1, InputArray src2,
2204
                  OutputArray dst, double scale, int dtype)
2205
{
2206
    arithm_op(src1, src2, dst, noArray(), dtype, getMulTab(),
I
Ilya Lavrenov 已提交
2207
              true, &scale, std::abs(scale - 1.0) < DBL_EPSILON ? OCL_OP_MUL : OCL_OP_MUL_SCALE);
2208
}
2209

2210
void cv::divide(InputArray src1, InputArray src2,
2211 2212
                OutputArray dst, double scale, int dtype)
{
2213
    arithm_op(src1, src2, dst, noArray(), dtype, getDivTab(), true, &scale, OCL_OP_DIV_SCALE);
2214 2215
}

2216
void cv::divide(double scale, InputArray src2,
2217 2218
                OutputArray dst, int dtype)
{
2219
    arithm_op(src2, src2, dst, noArray(), dtype, getRecipTab(), true, &scale, OCL_OP_RECIP_SCALE);
2220 2221
}

2222 2223 2224 2225
/****************************************************************************************\
*                                      addWeighted                                       *
\****************************************************************************************/

2226 2227 2228
namespace cv
{

2229
template<typename T, typename WT> static void
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
addWeighted_( const T* src1, size_t step1, const T* src2, size_t step2,
              T* dst, size_t step, Size size, void* _scalars )
{
    const double* scalars = (const double*)_scalars;
    WT alpha = (WT)scalars[0], beta = (WT)scalars[1], gamma = (WT)scalars[2];
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    step /= sizeof(dst[0]);

    for( ; size.height--; src1 += step1, src2 += step2, dst += step )
2240
    {
2241
        int x = 0;
2242
        #if CV_ENABLE_UNROLLED
2243
        for( ; x <= size.width - 4; x += 4 )
2244
        {
2245 2246 2247
            T t0 = saturate_cast<T>(src1[x]*alpha + src2[x]*beta + gamma);
            T t1 = saturate_cast<T>(src1[x+1]*alpha + src2[x+1]*beta + gamma);
            dst[x] = t0; dst[x+1] = t1;
2248

2249 2250 2251
            t0 = saturate_cast<T>(src1[x+2]*alpha + src2[x+2]*beta + gamma);
            t1 = saturate_cast<T>(src1[x+3]*alpha + src2[x+3]*beta + gamma);
            dst[x+2] = t0; dst[x+3] = t1;
2252
        }
V
Victoria Zhislina 已提交
2253
        #endif
2254 2255
        for( ; x < size.width; x++ )
            dst[x] = saturate_cast<T>(src1[x]*alpha + src2[x]*beta + gamma);
2256 2257 2258 2259 2260
    }
}


static void
2261 2262 2263 2264 2265 2266 2267
addWeighted8u( const uchar* src1, size_t step1,
               const uchar* src2, size_t step2,
               uchar* dst, size_t step, Size size,
               void* _scalars )
{
    const double* scalars = (const double*)_scalars;
    float alpha = (float)scalars[0], beta = (float)scalars[1], gamma = (float)scalars[2];
2268

2269
    for( ; size.height--; src1 += step1, src2 += step2, dst += step )
2270
    {
2271
        int x = 0;
2272

2273 2274
#if CV_SSE2
        if( USE_SSE2 )
2275
        {
2276 2277
            __m128 a4 = _mm_set1_ps(alpha), b4 = _mm_set1_ps(beta), g4 = _mm_set1_ps(gamma);
            __m128i z = _mm_setzero_si128();
2278

2279
            for( ; x <= size.width - 8; x += 8 )
2280
            {
2281 2282
                __m128i u = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(src1 + x)), z);
                __m128i v = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(src2 + x)), z);
2283

2284 2285 2286 2287
                __m128 u0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(u, z));
                __m128 u1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(u, z));
                __m128 v0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v, z));
                __m128 v1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v, z));
2288

2289 2290 2291
                u0 = _mm_add_ps(_mm_mul_ps(u0, a4), _mm_mul_ps(v0, b4));
                u1 = _mm_add_ps(_mm_mul_ps(u1, a4), _mm_mul_ps(v1, b4));
                u0 = _mm_add_ps(u0, g4); u1 = _mm_add_ps(u1, g4);
2292

2293 2294
                u = _mm_packs_epi32(_mm_cvtps_epi32(u0), _mm_cvtps_epi32(u1));
                u = _mm_packus_epi16(u, u);
2295

2296
                _mm_storel_epi64((__m128i*)(dst + x), u);
2297 2298
            }
        }
2299
#endif
2300
        #if CV_ENABLE_UNROLLED
2301
        for( ; x <= size.width - 4; x += 4 )
2302
        {
2303 2304 2305
            float t0, t1;
            t0 = CV_8TO32F(src1[x])*alpha + CV_8TO32F(src2[x])*beta + gamma;
            t1 = CV_8TO32F(src1[x+1])*alpha + CV_8TO32F(src2[x+1])*beta + gamma;
2306

2307 2308
            dst[x] = saturate_cast<uchar>(t0);
            dst[x+1] = saturate_cast<uchar>(t1);
2309

2310 2311
            t0 = CV_8TO32F(src1[x+2])*alpha + CV_8TO32F(src2[x+2])*beta + gamma;
            t1 = CV_8TO32F(src1[x+3])*alpha + CV_8TO32F(src2[x+3])*beta + gamma;
2312

2313 2314 2315
            dst[x+2] = saturate_cast<uchar>(t0);
            dst[x+3] = saturate_cast<uchar>(t1);
        }
V
Victoria Zhislina 已提交
2316
        #endif
2317

2318 2319 2320 2321
        for( ; x < size.width; x++ )
        {
            float t0 = CV_8TO32F(src1[x])*alpha + CV_8TO32F(src2[x])*beta + gamma;
            dst[x] = saturate_cast<uchar>(t0);
2322 2323 2324 2325
        }
    }
}

2326 2327
static void addWeighted8s( const schar* src1, size_t step1, const schar* src2, size_t step2,
                           schar* dst, size_t step, Size sz, void* scalars )
2328
{
2329
    addWeighted_<schar, float>(src1, step1, src2, step2, dst, step, sz, scalars);
2330 2331
}

2332 2333
static void addWeighted16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                            ushort* dst, size_t step, Size sz, void* scalars )
2334
{
2335 2336
    addWeighted_<ushort, float>(src1, step1, src2, step2, dst, step, sz, scalars);
}
2337

2338 2339
static void addWeighted16s( const short* src1, size_t step1, const short* src2, size_t step2,
                            short* dst, size_t step, Size sz, void* scalars )
2340
{
2341 2342
    addWeighted_<short, float>(src1, step1, src2, step2, dst, step, sz, scalars);
}
2343

2344 2345
static void addWeighted32s( const int* src1, size_t step1, const int* src2, size_t step2,
                            int* dst, size_t step, Size sz, void* scalars )
2346
{
2347
    addWeighted_<int, double>(src1, step1, src2, step2, dst, step, sz, scalars);
2348 2349
}

2350 2351
static void addWeighted32f( const float* src1, size_t step1, const float* src2, size_t step2,
                            float* dst, size_t step, Size sz, void* scalars )
2352
{
2353 2354
    addWeighted_<float, double>(src1, step1, src2, step2, dst, step, sz, scalars);
}
2355

2356 2357 2358 2359 2360
static void addWeighted64f( const double* src1, size_t step1, const double* src2, size_t step2,
                            double* dst, size_t step, Size sz, void* scalars )
{
    addWeighted_<double, double>(src1, step1, src2, step2, dst, step, sz, scalars);
}
V
Vadim Pisarevsky 已提交
2361

2362
static BinaryFunc* getAddWeightedTab()
2363
{
2364 2365 2366 2367 2368 2369 2370 2371 2372
    static BinaryFunc addWeightedTab[] =
    {
        (BinaryFunc)GET_OPTIMIZED(addWeighted8u), (BinaryFunc)GET_OPTIMIZED(addWeighted8s), (BinaryFunc)GET_OPTIMIZED(addWeighted16u),
        (BinaryFunc)GET_OPTIMIZED(addWeighted16s), (BinaryFunc)GET_OPTIMIZED(addWeighted32s), (BinaryFunc)addWeighted32f,
        (BinaryFunc)addWeighted64f, 0
    };

    return addWeightedTab;
}
2373

2374
}
2375

2376
void cv::addWeighted( InputArray src1, double alpha, InputArray src2,
2377 2378 2379
                      double beta, double gamma, OutputArray dst, int dtype )
{
    double scalars[] = {alpha, beta, gamma};
2380
    arithm_op(src1, src2, dst, noArray(), dtype, getAddWeightedTab(), true, scalars, OCL_OP_ADDW);
2381 2382
}

2383

2384
/****************************************************************************************\
2385
*                                          compare                                       *
2386 2387
\****************************************************************************************/

2388
namespace cv
2389 2390
{

2391 2392 2393
template<typename T> static void
cmp_(const T* src1, size_t step1, const T* src2, size_t step2,
     uchar* dst, size_t step, Size size, int code)
2394
{
2395 2396 2397
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    if( code == CMP_GE || code == CMP_LT )
2398
    {
2399 2400 2401
        std::swap(src1, src2);
        std::swap(step1, step2);
        code = code == CMP_GE ? CMP_LE : CMP_GT;
2402
    }
2403

2404
    if( code == CMP_GT || code == CMP_LE )
2405
    {
2406 2407 2408 2409
        int m = code == CMP_GT ? 0 : 255;
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
        {
            int x = 0;
2410
            #if CV_ENABLE_UNROLLED
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
            for( ; x <= size.width - 4; x += 4 )
            {
                int t0, t1;
                t0 = -(src1[x] > src2[x]) ^ m;
                t1 = -(src1[x+1] > src2[x+1]) ^ m;
                dst[x] = (uchar)t0; dst[x+1] = (uchar)t1;
                t0 = -(src1[x+2] > src2[x+2]) ^ m;
                t1 = -(src1[x+3] > src2[x+3]) ^ m;
                dst[x+2] = (uchar)t0; dst[x+3] = (uchar)t1;
            }
V
Victoria Zhislina 已提交
2421
            #endif
2422 2423
            for( ; x < size.width; x++ )
                dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m);
2424
               }
2425
    }
2426
    else if( code == CMP_EQ || code == CMP_NE )
2427
    {
2428 2429 2430 2431
        int m = code == CMP_EQ ? 0 : 255;
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
        {
            int x = 0;
2432
            #if CV_ENABLE_UNROLLED
2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
            for( ; x <= size.width - 4; x += 4 )
            {
                int t0, t1;
                t0 = -(src1[x] == src2[x]) ^ m;
                t1 = -(src1[x+1] == src2[x+1]) ^ m;
                dst[x] = (uchar)t0; dst[x+1] = (uchar)t1;
                t0 = -(src1[x+2] == src2[x+2]) ^ m;
                t1 = -(src1[x+3] == src2[x+3]) ^ m;
                dst[x+2] = (uchar)t0; dst[x+3] = (uchar)t1;
            }
V
Victoria Zhislina 已提交
2443
            #endif
2444 2445 2446
            for( ; x < size.width; x++ )
                dst[x] = (uchar)(-(src1[x] == src2[x]) ^ m);
        }
2447
    }
2448
}
2449

K
kdrobnyh 已提交
2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
#if ARITHM_USE_IPP
inline static IppCmpOp convert_cmp(int _cmpop)
{
    return _cmpop == CMP_EQ ? ippCmpEq :
        _cmpop == CMP_GT ? ippCmpGreater :
        _cmpop == CMP_GE ? ippCmpGreaterEq :
        _cmpop == CMP_LT ? ippCmpLess :
        _cmpop == CMP_LE ? ippCmpLessEq :
        (IppCmpOp)-1;
}
#endif
2461

2462 2463
static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2464
{
K
kdrobnyh 已提交
2465 2466 2467 2468 2469
#if ARITHM_USE_IPP
    IppCmpOp op = convert_cmp(*(int *)_cmpop);
    if( op  >= 0 )
    {
        fixSteps(size, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
2470
        if (0 <= ippiCompare_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(size), op))
K
kdrobnyh 已提交
2471
            return;
I
Ilya Lavrenov 已提交
2472
        setIppErrorStatus();
K
kdrobnyh 已提交
2473 2474
    }
#endif
2475
  //vz optimized  cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
2476
    int code = *(int*)_cmpop;
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    if( code == CMP_GE || code == CMP_LT )
    {
        std::swap(src1, src2);
        std::swap(step1, step2);
        code = code == CMP_GE ? CMP_LE : CMP_GT;
    }

    if( code == CMP_GT || code == CMP_LE )
    {
        int m = code == CMP_GT ? 0 : 255;
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
        {
            int x =0;
2492 2493
            #if CV_SSE2
            if( USE_SSE2 ){
2494 2495
                __m128i m128 = code == CMP_GT ? _mm_setzero_si128() : _mm_set1_epi8 (-1);
                __m128i c128 = _mm_set1_epi8 (-128);
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508
                for( ; x <= size.width - 16; x += 16 )
                {
                    __m128i r00 = _mm_loadu_si128((const __m128i*)(src1 + x));
                    __m128i r10 = _mm_loadu_si128((const __m128i*)(src2 + x));
                    // no simd for 8u comparison, that's why we need the trick
                    r00 = _mm_sub_epi8(r00,c128);
                    r10 = _mm_sub_epi8(r10,c128);

                    r00 =_mm_xor_si128(_mm_cmpgt_epi8(r00, r10), m128);
                    _mm_storeu_si128((__m128i*)(dst + x),r00);

                }
            }
2509 2510
           #endif

2511
            for( ; x < size.width; x++ ){
2512
                dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m);
2513
            }
2514 2515 2516 2517 2518
        }
    }
    else if( code == CMP_EQ || code == CMP_NE )
    {
        int m = code == CMP_EQ ? 0 : 255;
2519
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
2520 2521
        {
            int x = 0;
2522 2523
            #if CV_SSE2
            if( USE_SSE2 ){
2524
                __m128i m128 =  code == CMP_EQ ? _mm_setzero_si128() : _mm_set1_epi8 (-1);
2525 2526 2527 2528 2529 2530 2531 2532
                for( ; x <= size.width - 16; x += 16 )
                {
                    __m128i r00 = _mm_loadu_si128((const __m128i*)(src1 + x));
                    __m128i r10 = _mm_loadu_si128((const __m128i*)(src2 + x));
                    r00 = _mm_xor_si128 ( _mm_cmpeq_epi8 (r00, r10), m128);
                    _mm_storeu_si128((__m128i*)(dst + x), r00);
                }
            }
2533 2534 2535 2536 2537
           #endif
           for( ; x < size.width; x++ )
                dst[x] = (uchar)(-(src1[x] == src2[x]) ^ m);
        }
    }
2538 2539
}

2540 2541
static void cmp8s(const schar* src1, size_t step1, const schar* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2542
{
2543
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
2544 2545
}

2546 2547
static void cmp16u(const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2548
{
K
kdrobnyh 已提交
2549 2550 2551 2552 2553
#if ARITHM_USE_IPP
    IppCmpOp op = convert_cmp(*(int *)_cmpop);
    if( op  >= 0 )
    {
        fixSteps(size, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
2554
        if (0 <= ippiCompare_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(size), op))
K
kdrobnyh 已提交
2555
            return;
I
Ilya Lavrenov 已提交
2556
        setIppErrorStatus();
K
kdrobnyh 已提交
2557 2558
    }
#endif
2559
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
2560 2561
}

2562 2563
static void cmp16s(const short* src1, size_t step1, const short* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2564
{
K
kdrobnyh 已提交
2565 2566 2567 2568 2569
#if ARITHM_USE_IPP
    IppCmpOp op = convert_cmp(*(int *)_cmpop);
    if( op  > 0 )
    {
        fixSteps(size, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
2570
        if (0 <= ippiCompare_16s_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(size), op))
K
kdrobnyh 已提交
2571
            return;
I
Ilya Lavrenov 已提交
2572
        setIppErrorStatus();
K
kdrobnyh 已提交
2573 2574
    }
#endif
2575 2576
   //vz optimized cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);

2577
    int code = *(int*)_cmpop;
2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    if( code == CMP_GE || code == CMP_LT )
    {
        std::swap(src1, src2);
        std::swap(step1, step2);
        code = code == CMP_GE ? CMP_LE : CMP_GT;
    }

    if( code == CMP_GT || code == CMP_LE )
    {
        int m = code == CMP_GT ? 0 : 255;
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
        {
            int x =0;
2593 2594
            #if CV_SSE2
            if( USE_SSE2){//
2595
                __m128i m128 =  code == CMP_GT ? _mm_setzero_si128() : _mm_set1_epi16 (-1);
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617
                for( ; x <= size.width - 16; x += 16 )
                {
                    __m128i r00 = _mm_loadu_si128((const __m128i*)(src1 + x));
                    __m128i r10 = _mm_loadu_si128((const __m128i*)(src2 + x));
                    r00 = _mm_xor_si128 ( _mm_cmpgt_epi16 (r00, r10), m128);
                    __m128i r01 = _mm_loadu_si128((const __m128i*)(src1 + x + 8));
                    __m128i r11 = _mm_loadu_si128((const __m128i*)(src2 + x + 8));
                    r01 = _mm_xor_si128 ( _mm_cmpgt_epi16 (r01, r11), m128);
                    r11 = _mm_packs_epi16(r00, r01);
                    _mm_storeu_si128((__m128i*)(dst + x), r11);
                }
                if( x <= size.width-8)
                {
                    __m128i r00 = _mm_loadu_si128((const __m128i*)(src1 + x));
                    __m128i r10 = _mm_loadu_si128((const __m128i*)(src2 + x));
                    r00 = _mm_xor_si128 ( _mm_cmpgt_epi16 (r00, r10), m128);
                    r10 = _mm_packs_epi16(r00, r00);
                    _mm_storel_epi64((__m128i*)(dst + x), r10);

                    x += 8;
                }
            }
2618 2619
           #endif

2620
            for( ; x < size.width; x++ ){
2621
                 dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m);
2622
            }
2623 2624 2625 2626 2627
        }
    }
    else if( code == CMP_EQ || code == CMP_NE )
    {
        int m = code == CMP_EQ ? 0 : 255;
2628
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
2629 2630
        {
            int x = 0;
2631 2632
            #if CV_SSE2
            if( USE_SSE2 ){
2633
                __m128i m128 =  code == CMP_EQ ? _mm_setzero_si128() : _mm_set1_epi16 (-1);
2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
                for( ; x <= size.width - 16; x += 16 )
                {
                    __m128i r00 = _mm_loadu_si128((const __m128i*)(src1 + x));
                    __m128i r10 = _mm_loadu_si128((const __m128i*)(src2 + x));
                    r00 = _mm_xor_si128 ( _mm_cmpeq_epi16 (r00, r10), m128);
                    __m128i r01 = _mm_loadu_si128((const __m128i*)(src1 + x + 8));
                    __m128i r11 = _mm_loadu_si128((const __m128i*)(src2 + x + 8));
                    r01 = _mm_xor_si128 ( _mm_cmpeq_epi16 (r01, r11), m128);
                    r11 = _mm_packs_epi16(r00, r01);
                    _mm_storeu_si128((__m128i*)(dst + x), r11);
                }
                if( x <= size.width - 8)
                {
                    __m128i r00 = _mm_loadu_si128((const __m128i*)(src1 + x));
                    __m128i r10 = _mm_loadu_si128((const __m128i*)(src2 + x));
                    r00 = _mm_xor_si128 ( _mm_cmpeq_epi16 (r00, r10), m128);
                    r10 = _mm_packs_epi16(r00, r00);
                    _mm_storel_epi64((__m128i*)(dst + x), r10);

                    x += 8;
                }
            }
2656 2657 2658 2659 2660
           #endif
           for( ; x < size.width; x++ )
                dst[x] = (uchar)(-(src1[x] == src2[x]) ^ m);
        }
    }
2661 2662
}

2663 2664 2665 2666 2667
static void cmp32s(const int* src1, size_t step1, const int* src2, size_t step2,
                   uchar* dst, size_t step, Size size, void* _cmpop)
{
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
}
2668

2669 2670
static void cmp32f(const float* src1, size_t step1, const float* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2671
{
K
kdrobnyh 已提交
2672 2673 2674 2675 2676
#if ARITHM_USE_IPP
    IppCmpOp op = convert_cmp(*(int *)_cmpop);
    if( op  >= 0 )
    {
        fixSteps(size, sizeof(dst[0]), step1, step2, step);
I
Ilya Lavrenov 已提交
2677
        if (0 <= ippiCompare_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(size), op))
K
kdrobnyh 已提交
2678
            return;
I
Ilya Lavrenov 已提交
2679
        setIppErrorStatus();
K
kdrobnyh 已提交
2680 2681
    }
#endif
2682 2683
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
}
2684

2685 2686
static void cmp64f(const double* src1, size_t step1, const double* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2687
{
2688
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
2689 2690
}

2691
static BinaryFunc getCmpFunc(int depth)
2692
{
2693 2694 2695 2696 2697 2698 2699 2700
    static BinaryFunc cmpTab[] =
    {
        (BinaryFunc)GET_OPTIMIZED(cmp8u), (BinaryFunc)GET_OPTIMIZED(cmp8s),
        (BinaryFunc)GET_OPTIMIZED(cmp16u), (BinaryFunc)GET_OPTIMIZED(cmp16s),
        (BinaryFunc)GET_OPTIMIZED(cmp32s),
        (BinaryFunc)GET_OPTIMIZED(cmp32f), (BinaryFunc)cmp64f,
        0
    };
2701

2702 2703
    return cmpTab[depth];
}
2704

2705
static double getMinVal(int depth)
2706
{
2707 2708 2709
    static const double tab[] = {0, -128, 0, -32768, INT_MIN, -FLT_MAX, -DBL_MAX, 0};
    return tab[depth];
}
2710

2711
static double getMaxVal(int depth)
2712
{
2713 2714 2715
    static const double tab[] = {255, 127, 65535, 32767, INT_MAX, FLT_MAX, DBL_MAX, 0};
    return tab[depth];
}
2716

I
Ilya Lavrenov 已提交
2717 2718
#ifdef HAVE_OPENCL

A
Alexander Alekhin 已提交
2719
static bool ocl_compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op, bool haveScalar)
I
Ilya Lavrenov 已提交
2720
{
A
Alexander Alekhin 已提交
2721 2722
    const ocl::Device& dev = ocl::Device::getDefault();
    bool doubleSupport = dev.doubleFPConfig() > 0;
I
Ilya Lavrenov 已提交
2723 2724
    int type1 = _src1.type(), depth1 = CV_MAT_DEPTH(type1), cn = CV_MAT_CN(type1),
            type2 = _src2.type(), depth2 = CV_MAT_DEPTH(type2);
A
Alexander Alekhin 已提交
2725

2726 2727 2728 2729
    if (!doubleSupport && depth1 == CV_64F)
        return false;

    if (!haveScalar && (!_src1.sameSize(_src2) || type1 != type2))
A
Alexander Alekhin 已提交
2730
            return false;
I
Ilya Lavrenov 已提交
2731

A
Alexander Alekhin 已提交
2732
    int kercn = haveScalar ? cn : ocl::predictOptimalVectorWidth(_src1, _src2, _dst);
A
Alexander Alekhin 已提交
2733
    // Workaround for bug with "?:" operator in AMD OpenCL compiler
I
Ilya Lavrenov 已提交
2734
    if (depth1 >= CV_16U)
A
Alexander Alekhin 已提交
2735 2736
        kercn = 1;

A
Alexander Alekhin 已提交
2737
    int scalarcn = kercn == 3 ? 4 : kercn;
I
Ilya Lavrenov 已提交
2738
    const char * const operationMap[] = { "==", ">", ">=", "<", "<=", "!=" };
I
Ilya Lavrenov 已提交
2739 2740
    char cvt[40];

I
Ilya Lavrenov 已提交
2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753
    String opts = format("-D %s -D srcT1=%s -D dstT=%s -D workT=srcT1 -D cn=%d"
                         " -D convertToDT=%s -D OP_CMP -D CMP_OPERATOR=%s -D srcT1_C1=%s"
                         " -D srcT2_C1=%s -D dstT_C1=%s -D workST=%s%s",
                         haveScalar ? "UNARY_OP" : "BINARY_OP",
                         ocl::typeToStr(CV_MAKE_TYPE(depth1, kercn)),
                         ocl::typeToStr(CV_8UC(kercn)), kercn,
                         ocl::convertTypeStr(depth1, CV_8U, kercn, cvt),
                         operationMap[op], ocl::typeToStr(depth1),
                         ocl::typeToStr(depth1), ocl::typeToStr(CV_8U),
                         ocl::typeToStr(CV_MAKE_TYPE(depth1, scalarcn)),
                         doubleSupport ? " -D DOUBLE_SUPPORT" : "");

    ocl::Kernel k("KF", ocl::core::arithm_oclsrc, opts);
I
Ilya Lavrenov 已提交
2754 2755 2756
    if (k.empty())
        return false;

A
Alexander Alekhin 已提交
2757
    UMat src1 = _src1.getUMat();
I
Ilya Lavrenov 已提交
2758 2759 2760
    Size size = src1.size();
    _dst.create(size, CV_8UC(cn));
    UMat dst = _dst.getUMat();
I
Ilya Lavrenov 已提交
2761

A
Alexander Alekhin 已提交
2762 2763
    if (haveScalar)
    {
I
Ilya Lavrenov 已提交
2764 2765 2766 2767 2768 2769 2770 2771 2772
        size_t esz = CV_ELEM_SIZE1(type1) * scalarcn;
        double buf[4] = { 0, 0, 0, 0 };
        Mat src2 = _src2.getMat();

        if( depth1 > CV_32S )
            convertAndUnrollScalar( src2, depth1, (uchar *)buf, kercn );
        else
        {
            double fval = 0;
I
Ilya Lavrenov 已提交
2773
            getConvertFunc(depth2, CV_64F)(src2.data, 1, 0, 1, (uchar *)&fval, 1, Size(1, 1), 0);
I
Ilya Lavrenov 已提交
2774 2775
            if( fval < getMinVal(depth1) )
                return dst.setTo(Scalar::all(op == CMP_GT || op == CMP_GE || op == CMP_NE ? 255 : 0)), true;
A
Alexander Alekhin 已提交
2776

I
Ilya Lavrenov 已提交
2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
            if( fval > getMaxVal(depth1) )
                return dst.setTo(Scalar::all(op == CMP_LT || op == CMP_LE || op == CMP_NE ? 255 : 0)), true;

            int ival = cvRound(fval);
            if( fval != ival )
            {
                if( op == CMP_LT || op == CMP_GE )
                    ival = cvCeil(fval);
                else if( op == CMP_LE || op == CMP_GT )
                    ival = cvFloor(fval);
                else
                    return dst.setTo(Scalar::all(op == CMP_NE ? 255 : 0)), true;
            }
            convertAndUnrollScalar(Mat(1, 1, CV_32S, &ival), depth1, (uchar *)buf, kercn);
        }
A
Alexander Alekhin 已提交
2792 2793 2794 2795

        ocl::KernelArg scalararg = ocl::KernelArg(0, 0, 0, 0, buf, esz);

        k.args(ocl::KernelArg::ReadOnlyNoSize(src1, cn, kercn),
I
Ilya Lavrenov 已提交
2796
               ocl::KernelArg::WriteOnly(dst, cn, kercn), scalararg);
A
Alexander Alekhin 已提交
2797 2798 2799 2800 2801 2802 2803 2804 2805
    }
    else
    {
        UMat src2 = _src2.getUMat();

        k.args(ocl::KernelArg::ReadOnlyNoSize(src1),
               ocl::KernelArg::ReadOnlyNoSize(src2),
               ocl::KernelArg::WriteOnly(dst, cn, kercn));
    }
I
Ilya Lavrenov 已提交
2806

I
Ilya Lavrenov 已提交
2807
    size_t globalsize[2] = { dst.cols * cn / kercn, dst.rows };
I
Ilya Lavrenov 已提交
2808 2809 2810
    return k.run(2, globalsize, NULL, false);
}

I
Ilya Lavrenov 已提交
2811 2812
#endif

2813 2814
}

2815
void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op)
2816 2817 2818
{
    CV_Assert( op == CMP_LT || op == CMP_LE || op == CMP_EQ ||
               op == CMP_NE || op == CMP_GE || op == CMP_GT );
2819

A
Alexander Alekhin 已提交
2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840
    bool haveScalar = false;

    if ((_src1.isMatx() + _src2.isMatx()) == 1
            || !_src1.sameSize(_src2)
            || _src1.type() != _src2.type())
    {
        if (checkScalar(_src1, _src2.type(), _src1.kind(), _src2.kind()))
        {
            op = op == CMP_LT ? CMP_GT : op == CMP_LE ? CMP_GE :
                op == CMP_GE ? CMP_LE : op == CMP_GT ? CMP_LT : op;
            // src1 is a scalar; swap it with src2
            compare(_src2, _src1, _dst, op);
            return;
        }
        else if( !checkScalar(_src2, _src1.type(), _src2.kind(), _src1.kind()) )
            CV_Error( CV_StsUnmatchedSizes,
                     "The operation is neither 'array op array' (where arrays have the same size and the same type), "
                     "nor 'array op scalar', nor 'scalar op array'" );
        haveScalar = true;
    }

I
Ilya Lavrenov 已提交
2841
    CV_OCL_RUN(_src1.dims() <= 2 && _src2.dims() <= 2 && _dst.isUMat(),
A
Alexander Alekhin 已提交
2842
               ocl_compare(_src1, _src2, _dst, op, haveScalar))
I
Ilya Lavrenov 已提交
2843

2844 2845
    int kind1 = _src1.kind(), kind2 = _src2.kind();
    Mat src1 = _src1.getMat(), src2 = _src2.getMat();
2846

2847
    if( kind1 == kind2 && src1.dims <= 2 && src2.dims <= 2 && src1.size() == src2.size() && src1.type() == src2.type() )
2848
    {
2849 2850
        int cn = src1.channels();
        _dst.create(src1.size(), CV_8UC(cn));
2851 2852
        Mat dst = _dst.getMat();
        Size sz = getContinuousSize(src1, src2, dst, src1.channels());
2853
        getCmpFunc(src1.depth())(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, &op);
2854 2855
        return;
    }
2856

2857
    int cn = src1.channels(), depth1 = src1.depth(), depth2 = src2.depth();
2858

2859 2860 2861
    _dst.create(src1.dims, src1.size, CV_8UC(cn));
    src1 = src1.reshape(1); src2 = src2.reshape(1);
    Mat dst = _dst.getMat().reshape(1);
2862

2863 2864
    size_t esz = src1.elemSize();
    size_t blocksize0 = (size_t)(BLOCK_SIZE + esz-1)/esz;
2865
    BinaryFunc func = getCmpFunc(depth1);
2866

2867
    if( !haveScalar )
2868
    {
2869 2870
        const Mat* arrays[] = { &src1, &src2, &dst, 0 };
        uchar* ptrs[3];
2871

2872 2873
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size;
2874

2875 2876
        for( size_t i = 0; i < it.nplanes; i++, ++it )
            func( ptrs[0], 0, ptrs[1], 0, ptrs[2], 0, Size((int)total, 1), &op );
2877
    }
2878
    else
2879
    {
2880 2881
        const Mat* arrays[] = { &src1, &dst, 0 };
        uchar* ptrs[2];
2882

2883 2884
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = std::min(total, blocksize0);
2885

2886 2887 2888 2889 2890 2891
        AutoBuffer<uchar> _buf(blocksize*esz);
        uchar *buf = _buf;

        if( depth1 > CV_32S )
            convertAndUnrollScalar( src2, depth1, buf, blocksize );
        else
2892
        {
2893
            double fval=0;
I
Ilya Lavrenov 已提交
2894
            getConvertFunc(depth2, CV_64F)(src2.data, 1, 0, 1, (uchar*)&fval, 1, Size(1,1), 0);
2895 2896 2897 2898 2899
            if( fval < getMinVal(depth1) )
            {
                dst = Scalar::all(op == CMP_GT || op == CMP_GE || op == CMP_NE ? 255 : 0);
                return;
            }
2900

2901 2902 2903 2904 2905
            if( fval > getMaxVal(depth1) )
            {
                dst = Scalar::all(op == CMP_LT || op == CMP_LE || op == CMP_NE ? 255 : 0);
                return;
            }
2906

2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921
            int ival = cvRound(fval);
            if( fval != ival )
            {
                if( op == CMP_LT || op == CMP_GE )
                    ival = cvCeil(fval);
                else if( op == CMP_LE || op == CMP_GT )
                    ival = cvFloor(fval);
                else
                {
                    dst = Scalar::all(op == CMP_NE ? 255 : 0);
                    return;
                }
            }
            convertAndUnrollScalar(Mat(1, 1, CV_32S, &ival), depth1, buf, blocksize);
        }
2922

2923
        for( size_t i = 0; i < it.nplanes; i++, ++it )
2924
        {
2925 2926
            for( size_t j = 0; j < total; j += blocksize )
            {
2927
                int bsz = (int)MIN(total - j, blocksize);
2928 2929 2930 2931 2932
                func( ptrs[0], 0, buf, 0, ptrs[1], 0, Size(bsz, 1), &op);
                ptrs[0] += bsz*esz;
                ptrs[1] += bsz;
            }
        }
2933
    }
2934
}
2935

2936 2937 2938
/****************************************************************************************\
*                                        inRange                                         *
\****************************************************************************************/
2939

2940 2941
namespace cv
{
2942

2943 2944 2945 2946 2947 2948 2949 2950
template<typename T> static void
inRange_(const T* src1, size_t step1, const T* src2, size_t step2,
         const T* src3, size_t step3, uchar* dst, size_t step,
         Size size)
{
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    step3 /= sizeof(src3[0]);
2951

2952
    for( ; size.height--; src1 += step1, src2 += step2, src3 += step3, dst += step )
V
Vadim Pisarevsky 已提交
2953
    {
2954
        int x = 0;
2955
        #if CV_ENABLE_UNROLLED
2956
        for( ; x <= size.width - 4; x += 4 )
V
Vadim Pisarevsky 已提交
2957
        {
2958 2959 2960 2961 2962 2963 2964
            int t0, t1;
            t0 = src2[x] <= src1[x] && src1[x] <= src3[x];
            t1 = src2[x+1] <= src1[x+1] && src1[x+1] <= src3[x+1];
            dst[x] = (uchar)-t0; dst[x+1] = (uchar)-t1;
            t0 = src2[x+2] <= src1[x+2] && src1[x+2] <= src3[x+2];
            t1 = src2[x+3] <= src1[x+3] && src1[x+3] <= src3[x+3];
            dst[x+2] = (uchar)-t0; dst[x+3] = (uchar)-t1;
V
Vadim Pisarevsky 已提交
2965
        }
V
Victoria Zhislina 已提交
2966
        #endif
2967 2968
        for( ; x < size.width; x++ )
            dst[x] = (uchar)-(src2[x] <= src1[x] && src1[x] <= src3[x]);
V
Vadim Pisarevsky 已提交
2969
    }
2970 2971
}

2972

2973 2974
static void inRange8u(const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                      const uchar* src3, size_t step3, uchar* dst, size_t step, Size size)
2975
{
2976 2977
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
}
2978

2979 2980
static void inRange8s(const schar* src1, size_t step1, const schar* src2, size_t step2,
                      const schar* src3, size_t step3, uchar* dst, size_t step, Size size)
2981
{
2982 2983
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
}
2984

2985 2986
static void inRange16u(const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                       const ushort* src3, size_t step3, uchar* dst, size_t step, Size size)
2987
{
2988 2989
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
}
2990

2991 2992 2993 2994
static void inRange16s(const short* src1, size_t step1, const short* src2, size_t step2,
                       const short* src3, size_t step3, uchar* dst, size_t step, Size size)
{
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
2995 2996
}

2997 2998
static void inRange32s(const int* src1, size_t step1, const int* src2, size_t step2,
                       const int* src3, size_t step3, uchar* dst, size_t step, Size size)
2999
{
3000 3001
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
}
3002

3003 3004 3005 3006
static void inRange32f(const float* src1, size_t step1, const float* src2, size_t step2,
                       const float* src3, size_t step3, uchar* dst, size_t step, Size size)
{
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
3007 3008
}

3009 3010
static void inRange64f(const double* src1, size_t step1, const double* src2, size_t step2,
                       const double* src3, size_t step3, uchar* dst, size_t step, Size size)
3011
{
3012
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
3013
}
3014

3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030
static void inRangeReduce(const uchar* src, uchar* dst, size_t len, int cn)
{
    int k = cn % 4 ? cn % 4 : 4;
    size_t i, j;
    if( k == 1 )
        for( i = j = 0; i < len; i++, j += cn )
            dst[i] = src[j];
    else if( k == 2 )
        for( i = j = 0; i < len; i++, j += cn )
            dst[i] = src[j] & src[j+1];
    else if( k == 3 )
        for( i = j = 0; i < len; i++, j += cn )
            dst[i] = src[j] & src[j+1] & src[j+2];
    else
        for( i = j = 0; i < len; i++, j += cn )
            dst[i] = src[j] & src[j+1] & src[j+2] & src[j+3];
3031

3032 3033 3034 3035
    for( ; k < cn; k += 4 )
    {
        for( i = 0, j = k; i < len; i++, j += cn )
            dst[i] &= src[j] & src[j+1] & src[j+2] & src[j+3];
V
Vadim Pisarevsky 已提交
3036
    }
3037
}
3038

3039 3040
typedef void (*InRangeFunc)( const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                             const uchar* src3, size_t step3, uchar* dst, size_t step, Size sz );
3041

3042
static InRangeFunc getInRangeFunc(int depth)
3043
{
3044 3045 3046 3047 3048 3049 3050 3051 3052
    static InRangeFunc inRangeTab[] =
    {
        (InRangeFunc)GET_OPTIMIZED(inRange8u), (InRangeFunc)GET_OPTIMIZED(inRange8s), (InRangeFunc)GET_OPTIMIZED(inRange16u),
        (InRangeFunc)GET_OPTIMIZED(inRange16s), (InRangeFunc)GET_OPTIMIZED(inRange32s), (InRangeFunc)GET_OPTIMIZED(inRange32f),
        (InRangeFunc)inRange64f, 0
    };

    return inRangeTab[depth];
}
3053

I
Ilya Lavrenov 已提交
3054 3055
#ifdef HAVE_OPENCL

I
Ilya Lavrenov 已提交
3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120
static bool ocl_inRange( InputArray _src, InputArray _lowerb,
                         InputArray _upperb, OutputArray _dst )
{
    int skind = _src.kind(), lkind = _lowerb.kind(), ukind = _upperb.kind();
    Size ssize = _src.size(), lsize = _lowerb.size(), usize = _upperb.size();
    int stype = _src.type(), ltype = _lowerb.type(), utype = _upperb.type();
    int sdepth = CV_MAT_DEPTH(stype), ldepth = CV_MAT_DEPTH(ltype), udepth = CV_MAT_DEPTH(utype);
    int cn = CV_MAT_CN(stype);
    bool lbScalar = false, ubScalar = false;

    if( (lkind == _InputArray::MATX && skind != _InputArray::MATX) ||
        ssize != lsize || stype != ltype )
    {
        if( !checkScalar(_lowerb, stype, lkind, skind) )
            CV_Error( CV_StsUnmatchedSizes,
                     "The lower bounary is neither an array of the same size and same type as src, nor a scalar");
        lbScalar = true;
    }

    if( (ukind == _InputArray::MATX && skind != _InputArray::MATX) ||
        ssize != usize || stype != utype )
    {
        if( !checkScalar(_upperb, stype, ukind, skind) )
            CV_Error( CV_StsUnmatchedSizes,
                     "The upper bounary is neither an array of the same size and same type as src, nor a scalar");
        ubScalar = true;
    }

    if (lbScalar != ubScalar)
        return false;

    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0,
            haveScalar = lbScalar && ubScalar;

    if ( (!doubleSupport && sdepth == CV_64F) ||
         (!haveScalar && (sdepth != ldepth || sdepth != udepth)) )
        return false;

    ocl::Kernel ker("inrange", ocl::core::inrange_oclsrc,
                    format("%s-D cn=%d -D T=%s%s", haveScalar ? "-D HAVE_SCALAR " : "",
                           cn, ocl::typeToStr(sdepth), doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
    if (ker.empty())
        return false;

    _dst.create(ssize, CV_8UC1);
    UMat src = _src.getUMat(), dst = _dst.getUMat(), lscalaru, uscalaru;
    Mat lscalar, uscalar;

    if (lbScalar && ubScalar)
    {
        lscalar = _lowerb.getMat();
        uscalar = _upperb.getMat();

        size_t esz = src.elemSize();
        size_t blocksize = 36;

        AutoBuffer<uchar> _buf(blocksize*(((int)lbScalar + (int)ubScalar)*esz + cn) + 2*cn*sizeof(int) + 128);
        uchar *buf = alignPtr(_buf + blocksize*cn, 16);

        if( ldepth != sdepth && sdepth < CV_32S )
        {
            int* ilbuf = (int*)alignPtr(buf + blocksize*esz, 16);
            int* iubuf = ilbuf + cn;

            BinaryFunc sccvtfunc = getConvertFunc(ldepth, CV_32S);
I
Ilya Lavrenov 已提交
3121 3122
            sccvtfunc(lscalar.data, 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
            sccvtfunc(uscalar.data, 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
I
Ilya Lavrenov 已提交
3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161
            int minval = cvRound(getMinVal(sdepth)), maxval = cvRound(getMaxVal(sdepth));

            for( int k = 0; k < cn; k++ )
            {
                if( ilbuf[k] > iubuf[k] || ilbuf[k] > maxval || iubuf[k] < minval )
                    ilbuf[k] = minval+1, iubuf[k] = minval;
            }
            lscalar = Mat(cn, 1, CV_32S, ilbuf);
            uscalar = Mat(cn, 1, CV_32S, iubuf);
        }

        lscalar.convertTo(lscalar, stype);
        uscalar.convertTo(uscalar, stype);
    }
    else
    {
        lscalaru = _lowerb.getUMat();
        uscalaru = _upperb.getUMat();
    }

    ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
            dstarg = ocl::KernelArg::WriteOnly(dst);

    if (haveScalar)
    {
        lscalar.copyTo(lscalaru);
        uscalar.copyTo(uscalaru);

        ker.args(srcarg, dstarg, ocl::KernelArg::PtrReadOnly(lscalaru),
               ocl::KernelArg::PtrReadOnly(uscalaru));
    }
    else
        ker.args(srcarg, dstarg, ocl::KernelArg::ReadOnlyNoSize(lscalaru),
               ocl::KernelArg::ReadOnlyNoSize(uscalaru));

    size_t globalsize[2] = { ssize.width, ssize.height };
    return ker.run(2, globalsize, NULL, false);
}

I
Ilya Lavrenov 已提交
3162 3163
#endif

3164 3165
}

3166 3167
void cv::inRange(InputArray _src, InputArray _lowerb,
                 InputArray _upperb, OutputArray _dst)
3168
{
I
Ilya Lavrenov 已提交
3169 3170 3171
    CV_OCL_RUN(_src.dims() <= 2 && _lowerb.dims() <= 2 &&
               _upperb.dims() <= 2 && _dst.isUMat(),
               ocl_inRange(_src, _lowerb, _upperb, _dst))
I
Ilya Lavrenov 已提交
3172

3173 3174
    int skind = _src.kind(), lkind = _lowerb.kind(), ukind = _upperb.kind();
    Mat src = _src.getMat(), lb = _lowerb.getMat(), ub = _upperb.getMat();
3175

3176
    bool lbScalar = false, ubScalar = false;
3177

3178
    if( (lkind == _InputArray::MATX && skind != _InputArray::MATX) ||
3179 3180 3181 3182 3183 3184 3185
        src.size != lb.size || src.type() != lb.type() )
    {
        if( !checkScalar(lb, src.type(), lkind, skind) )
            CV_Error( CV_StsUnmatchedSizes,
                     "The lower bounary is neither an array of the same size and same type as src, nor a scalar");
        lbScalar = true;
    }
3186

3187
    if( (ukind == _InputArray::MATX && skind != _InputArray::MATX) ||
3188 3189 3190 3191 3192 3193 3194
        src.size != ub.size || src.type() != ub.type() )
    {
        if( !checkScalar(ub, src.type(), ukind, skind) )
            CV_Error( CV_StsUnmatchedSizes,
                     "The upper bounary is neither an array of the same size and same type as src, nor a scalar");
        ubScalar = true;
    }
3195

I
Ilya Lavrenov 已提交
3196
    CV_Assert(lbScalar == ubScalar);
3197

3198
    int cn = src.channels(), depth = src.depth();
3199

3200 3201
    size_t esz = src.elemSize();
    size_t blocksize0 = (size_t)(BLOCK_SIZE + esz-1)/esz;
3202

I
Ilya Lavrenov 已提交
3203
    _dst.create(src.dims, src.size, CV_8UC1);
3204
    Mat dst = _dst.getMat();
3205
    InRangeFunc func = getInRangeFunc(depth);
3206

3207 3208 3209
    const Mat* arrays_sc[] = { &src, &dst, 0 };
    const Mat* arrays_nosc[] = { &src, &dst, &lb, &ub, 0 };
    uchar* ptrs[4];
3210

3211 3212
    NAryMatIterator it(lbScalar && ubScalar ? arrays_sc : arrays_nosc, ptrs);
    size_t total = it.size, blocksize = std::min(total, blocksize0);
3213

3214 3215 3216
    AutoBuffer<uchar> _buf(blocksize*(((int)lbScalar + (int)ubScalar)*esz + cn) + 2*cn*sizeof(int) + 128);
    uchar *buf = _buf, *mbuf = buf, *lbuf = 0, *ubuf = 0;
    buf = alignPtr(buf + blocksize*cn, 16);
3217

3218 3219 3220 3221
    if( lbScalar && ubScalar )
    {
        lbuf = buf;
        ubuf = buf = alignPtr(buf + blocksize*esz, 16);
3222

3223 3224
        CV_Assert( lb.type() == ub.type() );
        int scdepth = lb.depth();
3225

3226 3227 3228 3229
        if( scdepth != depth && depth < CV_32S )
        {
            int* ilbuf = (int*)alignPtr(buf + blocksize*esz, 16);
            int* iubuf = ilbuf + cn;
3230

3231
            BinaryFunc sccvtfunc = getConvertFunc(scdepth, CV_32S);
I
Ilya Lavrenov 已提交
3232 3233
            sccvtfunc(lb.data, 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
            sccvtfunc(ub.data, 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
3234
            int minval = cvRound(getMinVal(depth)), maxval = cvRound(getMaxVal(depth));
3235

3236 3237 3238 3239 3240 3241 3242 3243
            for( int k = 0; k < cn; k++ )
            {
                if( ilbuf[k] > iubuf[k] || ilbuf[k] > maxval || iubuf[k] < minval )
                    ilbuf[k] = minval+1, iubuf[k] = minval;
            }
            lb = Mat(cn, 1, CV_32S, ilbuf);
            ub = Mat(cn, 1, CV_32S, iubuf);
        }
3244

3245 3246 3247
        convertAndUnrollScalar( lb, src.type(), lbuf, blocksize );
        convertAndUnrollScalar( ub, src.type(), ubuf, blocksize );
    }
3248

3249
    for( size_t i = 0; i < it.nplanes; i++, ++it )
V
Vadim Pisarevsky 已提交
3250
    {
3251 3252
        for( size_t j = 0; j < total; j += blocksize )
        {
3253
            int bsz = (int)MIN(total - j, blocksize);
3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267
            size_t delta = bsz*esz;
            uchar *lptr = lbuf, *uptr = ubuf;
            if( !lbScalar )
            {
                lptr = ptrs[2];
                ptrs[2] += delta;
            }
            if( !ubScalar )
            {
                int idx = !lbScalar ? 3 : 2;
                uptr = ptrs[idx];
                ptrs[idx] += delta;
            }
            func( ptrs[0], 0, lptr, 0, uptr, 0, cn == 1 ? ptrs[1] : mbuf, 0, Size(bsz*cn, 1));
3268
            if( cn > 1 )
3269 3270 3271 3272
                inRangeReduce(mbuf, ptrs[1], bsz, cn);
            ptrs[0] += delta;
            ptrs[1] += bsz;
        }
V
Vadim Pisarevsky 已提交
3273
    }
3274 3275 3276 3277 3278 3279 3280 3281 3282 3283
}

/****************************************************************************************\
*                                Earlier API: cvAdd etc.                                 *
\****************************************************************************************/

CV_IMPL void
cvNot( const CvArr* srcarr, CvArr* dstarr )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
3284
    CV_Assert( src.size == dst.size && src.type() == dst.type() );
3285 3286 3287 3288 3289 3290 3291 3292 3293
    cv::bitwise_not( src, dst );
}


CV_IMPL void
cvAnd( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
3294
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3295 3296 3297 3298 3299
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
    cv::bitwise_and( src1, src2, dst, mask );
}

3300

3301 3302 3303 3304 3305
CV_IMPL void
cvOr( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
3306
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
    cv::bitwise_or( src1, src2, dst, mask );
}


CV_IMPL void
cvXor( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
3318
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3319 3320 3321 3322 3323 3324 3325 3326 3327 3328
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
    cv::bitwise_xor( src1, src2, dst, mask );
}


CV_IMPL void
cvAndS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
3329
    CV_Assert( src.size == dst.size && src.type() == dst.type() );
3330 3331
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
3332
    cv::bitwise_and( src, (const cv::Scalar&)s, dst, mask );
3333 3334 3335 3336 3337 3338 3339
}


CV_IMPL void
cvOrS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
3340
    CV_Assert( src.size == dst.size && src.type() == dst.type() );
3341 3342
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
3343
    cv::bitwise_or( src, (const cv::Scalar&)s, dst, mask );
3344 3345 3346 3347 3348 3349 3350
}


CV_IMPL void
cvXorS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
3351
    CV_Assert( src.size == dst.size && src.type() == dst.type() );
3352 3353
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
3354
    cv::bitwise_xor( src, (const cv::Scalar&)s, dst, mask );
3355 3356
}

3357

3358 3359 3360 3361
CV_IMPL void cvAdd( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
3362
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
3363 3364
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
3365
    cv::add( src1, src2, dst, mask, dst.type() );
3366 3367
}

3368

3369 3370 3371 3372
CV_IMPL void cvSub( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
3373
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
3374 3375
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
3376
    cv::subtract( src1, src2, dst, mask, dst.type() );
3377 3378
}

3379

3380 3381 3382 3383
CV_IMPL void cvAddS( const CvArr* srcarr1, CvScalar value, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1),
        dst = cv::cvarrToMat(dstarr), mask;
3384
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
3385 3386
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
3387
    cv::add( src1, (const cv::Scalar&)value, dst, mask, dst.type() );
3388 3389
}

3390

3391 3392 3393 3394
CV_IMPL void cvSubRS( const CvArr* srcarr1, CvScalar value, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1),
        dst = cv::cvarrToMat(dstarr), mask;
3395
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
3396 3397
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
3398
    cv::subtract( (const cv::Scalar&)value, src1, dst, mask, dst.type() );
3399 3400
}

3401

3402 3403 3404 3405 3406
CV_IMPL void cvMul( const CvArr* srcarr1, const CvArr* srcarr2,
                    CvArr* dstarr, double scale )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr);
3407 3408
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
    cv::multiply( src1, src2, dst, scale, dst.type() );
3409 3410
}

3411

3412 3413 3414 3415 3416
CV_IMPL void cvDiv( const CvArr* srcarr1, const CvArr* srcarr2,
                    CvArr* dstarr, double scale )
{
    cv::Mat src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
3417
    CV_Assert( src2.size == dst.size && src2.channels() == dst.channels() );
3418 3419

    if( srcarr1 )
3420
        cv::divide( cv::cvarrToMat(srcarr1), src2, dst, scale, dst.type() );
3421
    else
3422
        cv::divide( scale, src2, dst, dst.type() );
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432
}


CV_IMPL void
cvAddWeighted( const CvArr* srcarr1, double alpha,
               const CvArr* srcarr2, double beta,
               double gamma, CvArr* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr);
3433 3434
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
    cv::addWeighted( src1, alpha, src2, beta, gamma, dst, dst.type() );
3435 3436 3437 3438 3439 3440 3441
}


CV_IMPL  void
cvAbsDiff( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3442
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3443 3444 3445 3446 3447 3448 3449 3450 3451

    cv::absdiff( src1, cv::cvarrToMat(srcarr2), dst );
}


CV_IMPL void
cvAbsDiffS( const CvArr* srcarr1, CvArr* dstarr, CvScalar scalar )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3452
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3453

3454
    cv::absdiff( src1, (const cv::Scalar&)scalar, dst );
3455 3456
}

3457

3458 3459 3460 3461 3462
CV_IMPL void
cvInRange( const void* srcarr1, const void* srcarr2,
           const void* srcarr3, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3463
    CV_Assert( src1.size == dst.size && dst.type() == CV_8U );
3464 3465 3466 3467

    cv::inRange( src1, cv::cvarrToMat(srcarr2), cv::cvarrToMat(srcarr3), dst );
}

3468

3469 3470 3471 3472
CV_IMPL void
cvInRangeS( const void* srcarr1, CvScalar lowerb, CvScalar upperb, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3473
    CV_Assert( src1.size == dst.size && dst.type() == CV_8U );
3474

3475
    cv::inRange( src1, (const cv::Scalar&)lowerb, (const cv::Scalar&)upperb, dst );
3476 3477 3478 3479 3480 3481 3482
}


CV_IMPL void
cvCmp( const void* srcarr1, const void* srcarr2, void* dstarr, int cmp_op )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3483
    CV_Assert( src1.size == dst.size && dst.type() == CV_8U );
3484 3485 3486 3487 3488 3489 3490 3491 3492

    cv::compare( src1, cv::cvarrToMat(srcarr2), dst, cmp_op );
}


CV_IMPL void
cvCmpS( const void* srcarr1, double value, void* dstarr, int cmp_op )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3493
    CV_Assert( src1.size == dst.size && dst.type() == CV_8U );
3494 3495 3496 3497 3498 3499 3500 3501 3502

    cv::compare( src1, value, dst, cmp_op );
}


CV_IMPL void
cvMin( const void* srcarr1, const void* srcarr2, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3503
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3504 3505 3506 3507 3508 3509 3510 3511 3512

    cv::min( src1, cv::cvarrToMat(srcarr2), dst );
}


CV_IMPL void
cvMax( const void* srcarr1, const void* srcarr2, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3513
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3514 3515 3516 3517

    cv::max( src1, cv::cvarrToMat(srcarr2), dst );
}

3518

3519 3520 3521 3522
CV_IMPL void
cvMinS( const void* srcarr1, double value, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3523
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3524 3525 3526 3527 3528 3529 3530 3531 3532

    cv::min( src1, value, dst );
}


CV_IMPL void
cvMaxS( const void* srcarr1, double value, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3533
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3534 3535 3536 3537 3538

    cv::max( src1, value, dst );
}

/* End of file. */