arithm.cpp 92.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                           License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

/* ////////////////////////////////////////////////////////////////////
//
45
//  Arithmetic and logical operations: +, -, *, /, &, |, ^, ~, abs ...
46 47 48 49 50 51 52 53
//
// */

#include "precomp.hpp"

namespace cv
{

54 55 56 57 58
#if ARITHM_USE_IPP
struct IPPArithmInitializer
{
    IPPArithmInitializer(void)
    {
59
        ippStaticInit();
60 61
    }
};
62

63 64
IPPArithmInitializer ippArithmInitializer;
#endif
65

66
struct NOP {};
67

68 69
template<typename T, class Op, class Op8>
void vBinOp8(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, size_t step, Size sz)
70
{
71 72
    Op8 op8;
    Op op;
73

74 75 76
    for( ; sz.height--; src1 += step1/sizeof(src1[0]),
                        src2 += step2/sizeof(src2[0]),
                        dst += step/sizeof(dst[0]) )
77 78
    {
        int x = 0;
79

80 81
    #if CV_SSE2
        if( USE_SSE2 )
82
        {
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
            for( ; x <= sz.width - 32; x += 32 )
            {
                __m128i r0 = _mm_loadu_si128((const __m128i*)(src1 + x));
                __m128i r1 = _mm_loadu_si128((const __m128i*)(src1 + x + 16));
                r0 = op8(r0,_mm_loadu_si128((const __m128i*)(src2 + x)));
                r1 = op8(r1,_mm_loadu_si128((const __m128i*)(src2 + x + 16)));
                _mm_storeu_si128((__m128i*)(dst + x), r0);
                _mm_storeu_si128((__m128i*)(dst + x + 16), r1);
            }
            for( ; x <= sz.width - 8; x += 8 )
            {
                __m128i r0 = _mm_loadl_epi64((const __m128i*)(src1 + x));
                r0 = op8(r0,_mm_loadl_epi64((const __m128i*)(src2 + x)));
                _mm_storel_epi64((__m128i*)(dst + x), r0);
            }
98
        }
99
    #endif
100

101
        for( ; x <= sz.width - 4; x += 4 )
102
        {
103 104 105 106 107 108
            T v0 = op(src1[x], src2[x]);
            T v1 = op(src1[x+1], src2[x+1]);
            dst[x] = v0; dst[x+1] = v1;
            v0 = op(src1[x+2], src2[x+2]);
            v1 = op(src1[x+3], src2[x+3]);
            dst[x+2] = v0; dst[x+3] = v1;
109
        }
110

111 112
        for( ; x < sz.width; x++ )
            dst[x] = op(src1[x], src2[x]);
113
    }
114
}
115

116 117 118
template<typename T, class Op, class Op16>
void vBinOp16(const T* src1, size_t step1, const T* src2, size_t step2,
              T* dst, size_t step, Size sz)
119
{
120 121
    Op16 op16;
    Op op;
122

123 124 125
    for( ; sz.height--; src1 += step1/sizeof(src1[0]),
        src2 += step2/sizeof(src2[0]),
        dst += step/sizeof(dst[0]) )
126 127
    {
        int x = 0;
128

129 130
    #if CV_SSE2
        if( USE_SSE2 )
131
        {
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
            for( ; x <= sz.width - 16; x += 16 )
            {
                __m128i r0 = _mm_loadu_si128((const __m128i*)(src1 + x));
                __m128i r1 = _mm_loadu_si128((const __m128i*)(src1 + x + 8));
                r0 = op16(r0,_mm_loadu_si128((const __m128i*)(src2 + x)));
                r1 = op16(r1,_mm_loadu_si128((const __m128i*)(src2 + x + 8)));
                _mm_storeu_si128((__m128i*)(dst + x), r0);
                _mm_storeu_si128((__m128i*)(dst + x + 16), r1);
            }
            for( ; x <= sz.width - 4; x += 4 )
            {
                __m128i r0 = _mm_loadl_epi64((const __m128i*)(src1 + x));
                r0 = op16(r0,_mm_loadl_epi64((const __m128i*)(src2 + x)));
                _mm_storel_epi64((__m128i*)(dst + x), r0);
            }
147
        }
148 149
        else
    #endif
150

151
        for( ; x <= sz.width - 4; x += 4 )
152
        {
153 154 155 156 157 158
            T v0 = op(src1[x], src2[x]);
            T v1 = op(src1[x+1], src2[x+1]);
            dst[x] = v0; dst[x+1] = v1;
            v0 = op(src1[x+2], src2[x+2]);
            v1 = op(src1[x+3], src2[x+3]);
            dst[x+2] = v0; dst[x+3] = v1;
159
        }
160

161 162
        for( ; x < sz.width; x++ )
            dst[x] = op(src1[x], src2[x]);
163
    }
164
}
165

166

167 168 169
template<class Op, class Op32>
void vBinOp32s(const int* src1, size_t step1, const int* src2, size_t step2,
               int* dst, size_t step, Size sz)
170
{
171 172
    Op32 op32;
    Op op;
173

174 175 176
    for( ; sz.height--; src1 += step1/sizeof(src1[0]),
        src2 += step2/sizeof(src2[0]),
        dst += step/sizeof(dst[0]) )
177 178
    {
        int x = 0;
179

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
#if CV_SSE2
        if( USE_SSE2 )
        {
            if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 )
                for( ; x <= sz.width - 8; x += 8 )
                {
                    __m128i r0 = _mm_load_si128((const __m128i*)(src1 + x));
                    __m128i r1 = _mm_load_si128((const __m128i*)(src1 + x + 4));
                    r0 = op32(r0,_mm_load_si128((const __m128i*)(src2 + x)));
                    r1 = op32(r1,_mm_load_si128((const __m128i*)(src2 + x + 4)));
                    _mm_store_si128((__m128i*)(dst + x), r0);
                    _mm_store_si128((__m128i*)(dst + x + 16), r1);
                }
            else
                for( ; x <= sz.width - 8; x += 8 )
                {
                    __m128i r0 = _mm_loadu_si128((const __m128i*)(src1 + x));
                    __m128i r1 = _mm_loadu_si128((const __m128i*)(src1 + x + 4));
                    r0 = op32(r0,_mm_loadu_si128((const __m128i*)(src2 + x)));
                    r1 = op32(r1,_mm_loadu_si128((const __m128i*)(src2 + x + 4)));
                    _mm_storeu_si128((__m128i*)(dst + x), r0);
                    _mm_storeu_si128((__m128i*)(dst + x + 16), r1);
                }
        }
#endif
205

206 207 208 209 210 211 212 213 214
        for( ; x <= sz.width - 4; x += 4 )
        {
            int v0 = op(src1[x], src2[x]);
            int v1 = op(src1[x+1], src2[x+1]);
            dst[x] = v0; dst[x+1] = v1;
            v0 = op(src1[x+2], src2[x+2]);
            v1 = op(src1[x+3], src2[x+3]);
            dst[x+2] = v0; dst[x+3] = v1;
        }
215

216 217 218 219 220
        for( ; x < sz.width; x++ )
            dst[x] = op(src1[x], src2[x]);
    }
}

221

222 223 224 225 226 227
template<class Op, class Op32>
void vBinOp32f(const float* src1, size_t step1, const float* src2, size_t step2,
               float* dst, size_t step, Size sz)
{
    Op32 op32;
    Op op;
228

229 230 231 232 233
    for( ; sz.height--; src1 += step1/sizeof(src1[0]),
        src2 += step2/sizeof(src2[0]),
        dst += step/sizeof(dst[0]) )
    {
        int x = 0;
234

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
    #if CV_SSE2
        if( USE_SSE2 )
        {
            if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 )
                for( ; x <= sz.width - 8; x += 8 )
                {
                    __m128 r0 = _mm_load_ps(src1 + x);
                    __m128 r1 = _mm_load_ps(src1 + x + 4);
                    r0 = op32(r0,_mm_load_ps(src2 + x));
                    r1 = op32(r1,_mm_load_ps(src2 + x + 4));
                    _mm_store_ps(dst + x, r0);
                    _mm_store_ps(dst + x + 4, r1);
                }
            else
                for( ; x <= sz.width - 8; x += 8 )
                {
                    __m128 r0 = _mm_loadu_ps(src1 + x);
                    __m128 r1 = _mm_loadu_ps(src1 + x + 4);
                    r0 = op32(r0,_mm_loadu_ps(src2 + x));
                    r1 = op32(r1,_mm_loadu_ps(src2 + x + 4));
                    _mm_storeu_ps(dst + x, r0);
                    _mm_storeu_ps(dst + x + 4, r1);
                }
        }
    #endif
        for( ; x <= sz.width - 4; x += 4 )
        {
            float v0 = op(src1[x], src2[x]);
            float v1 = op(src1[x+1], src2[x+1]);
            dst[x] = v0; dst[x+1] = v1;
            v0 = op(src1[x+2], src2[x+2]);
            v1 = op(src1[x+3], src2[x+3]);
            dst[x+2] = v0; dst[x+3] = v1;
        }
269

270 271 272 273 274 275 276 277 278 279 280
        for( ; x < sz.width; x++ )
            dst[x] = op(src1[x], src2[x]);
    }
}

template<class Op, class Op64>
void vBinOp64f(const double* src1, size_t step1, const double* src2, size_t step2,
               double* dst, size_t step, Size sz)
{
    Op64 op64;
    Op op;
281

282 283 284 285 286
    for( ; sz.height--; src1 += step1/sizeof(src1[0]),
        src2 += step2/sizeof(src2[0]),
        dst += step/sizeof(dst[0]) )
    {
        int x = 0;
287

288 289 290
    #if CV_SSE2
        if( USE_SSE2 && (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 )
            for( ; x <= sz.width - 4; x += 4 )
291
            {
292 293 294 295 296 297
                __m128d r0 = _mm_load_pd(src1 + x);
                __m128d r1 = _mm_load_pd(src1 + x + 2);
                r0 = op64(r0,_mm_load_pd(src2 + x));
                r1 = op64(r1,_mm_load_pd(src2 + x + 2));
                _mm_store_pd(dst + x, r0);
                _mm_store_pd(dst + x + 2, r1);
298 299
            }
        else
300 301 302 303 304 305 306 307 308 309
    #endif
        for( ; x <= sz.width - 4; x += 4 )
        {
            double v0 = op(src1[x], src2[x]);
            double v1 = op(src1[x+1], src2[x+1]);
            dst[x] = v0; dst[x+1] = v1;
            v0 = op(src1[x+2], src2[x+2]);
            v1 = op(src1[x+3], src2[x+3]);
            dst[x+2] = v0; dst[x+3] = v1;
        }
310

311 312
        for( ; x < sz.width; x++ )
            dst[x] = op(src1[x], src2[x]);
313
    }
314
}
315

316
#if CV_SSE2
317

318 319 320 321 322 323 324 325 326
struct _VAdd8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epu8(a,b); }};
struct _VSub8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epu8(a,b); }};
struct _VMin8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epu8(a,b); }};
struct _VMax8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epu8(a,b); }};
struct _VAbsDiff8u
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    { return _mm_add_epi8(_mm_subs_epu8(a,b),_mm_subs_epu8(b,a)); }
};
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355

struct _VAdd8s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epi8(a,b); }};
struct _VSub8s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epi8(a,b); }};
struct _VMin8s
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    {
        __m128i m = _mm_cmpgt_epi8(a, b);
        return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m));
    }
};
struct _VMax8s
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    {
        __m128i m = _mm_cmpgt_epi8(b, a);
        return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m));
    }
};
struct _VAbsDiff8s
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    {
        __m128i d = _mm_subs_epi8(a, b);
        __m128i m = _mm_cmpgt_epi8(b, a);
        return _mm_subs_epi8(_mm_xor_si128(d, m), m);
    }
};

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
struct _VAdd16u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epu16(a,b); }};
struct _VSub16u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epu16(a,b); }};
struct _VMin16u
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    { return _mm_subs_epu16(a,_mm_subs_epu16(a,b)); }
};
struct _VMax16u
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    { return _mm_adds_epu16(_mm_subs_epu16(a,b),b); }
};
struct _VAbsDiff16u
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    { return _mm_add_epi16(_mm_subs_epu16(a,b),_mm_subs_epu16(b,a)); }
};
373

374 375 376 377 378 379 380 381 382 383 384 385
struct _VAdd16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epi16(a,b); }};
struct _VSub16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epi16(a,b); }};
struct _VMin16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epi16(a,b); }};
struct _VMax16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epi16(a,b); }};
struct _VAbsDiff16s
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    {
        __m128i M = _mm_max_epi16(a,b), m = _mm_min_epi16(a,b);
        return _mm_subs_epi16(M, m);
    }
};
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412

struct _VAdd32s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_add_epi32(a,b); }};
struct _VSub32s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_sub_epi32(a,b); }};
struct _VMin32s
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    {
        __m128i m = _mm_cmpgt_epi32(a, b);
        return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m));
    }
};
struct _VMax32s
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    {
        __m128i m = _mm_cmpgt_epi32(b, a);
        return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m));
    }
};
struct _VAbsDiff32s
{
    __m128i operator()(const __m128i& a, const __m128i& b) const
    {
        __m128i d = _mm_sub_epi32(a, b);
        __m128i m = _mm_cmpgt_epi32(b, a);
        return _mm_sub_epi32(_mm_xor_si128(d, m), m);
    }
413
};
414

415 416 417 418 419 420 421 422 423 424 425 426 427
struct _VAdd32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_add_ps(a,b); }};
struct _VSub32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_sub_ps(a,b); }};
struct _VMin32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_min_ps(a,b); }};
struct _VMax32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_max_ps(a,b); }};
static int CV_DECL_ALIGNED(16) v32f_absmask[] = { 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff };
struct _VAbsDiff32f
{
    __m128 operator()(const __m128& a, const __m128& b) const
    {
        return _mm_and_ps(_mm_sub_ps(a,b), *(const __m128*)v32f_absmask);
    }
};

428 429 430 431
struct _VAdd64f { __m128d operator()(const __m128d& a, const __m128d& b) const { return _mm_add_pd(a,b); }};
struct _VSub64f { __m128d operator()(const __m128d& a, const __m128d& b) const { return _mm_sub_pd(a,b); }};
struct _VMin64f { __m128d operator()(const __m128d& a, const __m128d& b) const { return _mm_min_pd(a,b); }};
struct _VMax64f { __m128d operator()(const __m128d& a, const __m128d& b) const { return _mm_max_pd(a,b); }};
432

433 434 435 436 437 438 439
static int CV_DECL_ALIGNED(16) v64f_absmask[] = { 0xffffffff, 0x7fffffff, 0xffffffff, 0x7fffffff };
struct _VAbsDiff64f
{
    __m128d operator()(const __m128d& a, const __m128d& b) const
    {
        return _mm_and_pd(_mm_sub_pd(a,b), *(const __m128d*)v64f_absmask);
    }
440 441
};

442
struct _VAnd8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_and_si128(a,b); }};
443
struct _VOr8u  { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_or_si128(a,b); }};
444
struct _VXor8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_xor_si128(a,b); }};
445
struct _VNot8u { __m128i operator()(const __m128i& a, const __m128i&) const { return _mm_andnot_si128(_mm_setzero_si128(),a); }};
446

447
#endif
448

449 450
#if CV_SSE2
#define IF_SIMD(op) op
451
#else
452
#define IF_SIMD(op) NOP
453
#endif
454

455 456 457 458
template<> inline uchar OpAdd<uchar>::operator ()(uchar a, uchar b) const
{ return CV_FAST_CAST_8U(a + b); }
template<> inline uchar OpSub<uchar>::operator ()(uchar a, uchar b) const
{ return CV_FAST_CAST_8U(a - b); }
459

460
template<typename T> struct OpAbsDiff
461
{
462 463 464 465
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()(T a, T b) const { return (T)std::abs(a - b); }
466 467
};

468 469
template<> inline short OpAbsDiff<short>::operator ()(short a, short b) const
{ return saturate_cast<short>(std::abs(a - b)); }
470

471 472
template<> inline schar OpAbsDiff<schar>::operator ()(schar a, schar b) const
{ return saturate_cast<schar>(std::abs(a - b)); }
473

474
template<typename T, typename WT=T> struct OpAbsDiffS
475
{
476 477 478 479
    typedef T type1;
    typedef WT type2;
    typedef T rtype;
    T operator()(T a, WT b) const { return saturate_cast<T>(std::abs(a - b)); }
480 481
};

482
template<typename T> struct OpAnd
483
{
484 485 486 487
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()( T a, T b ) const { return a & b; }
488 489
};

490
template<typename T> struct OpOr
491
{
492 493 494 495
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()( T a, T b ) const { return a | b; }
496 497
};

498
template<typename T> struct OpXor
499
{
500 501 502 503
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()( T a, T b ) const { return a ^ b; }
504 505
};

506
template<typename T> struct OpNot
507
{
508 509 510 511
    typedef T type1;
    typedef T type2;
    typedef T rtype;
    T operator()( T a, T ) const { return ~a; }
512
};
513

514 515 516 517 518
static inline void fixSteps(Size sz, size_t elemSize, size_t& step1, size_t& step2, size_t& step)
{
    if( sz.height == 1 )
        step1 = step2 = step = sz.width*elemSize;
}
519

520 521 522 523 524
static void add8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
{
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
525
           ippiAdd_8u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz, 0),
526 527
           (vBinOp8<uchar, OpAdd<uchar>, IF_SIMD(_VAdd8u)>(src1, step1, src2, step2, dst, step, sz)));
}
528

529 530 531
static void add8s( const schar* src1, size_t step1,
                   const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* )
532
{
533 534
    vBinOp8<schar, OpAdd<schar>, IF_SIMD(_VAdd8s)>(src1, step1, src2, step2, dst, step, sz);
}
535

536 537 538
static void add16u( const ushort* src1, size_t step1,
                    const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* )
539
{
540
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
541
           ippiAdd_16u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz, 0),
542 543
            (vBinOp16<ushort, OpAdd<ushort>, IF_SIMD(_VAdd16u)>(src1, step1, src2, step2, dst, step, sz)));
}
544

545 546 547
static void add16s( const short* src1, size_t step1,
                    const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* )
548
{
549
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
550
           ippiAdd_16s_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz, 0),
551 552
           (vBinOp16<short, OpAdd<short>, IF_SIMD(_VAdd16s)>(src1, step1, src2, step2, dst, step, sz)));
}
553

554 555 556
static void add32s( const int* src1, size_t step1,
                    const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* )
557
{
558 559
    vBinOp32s<OpAdd<int>, IF_SIMD(_VAdd32s)>(src1, step1, src2, step2, dst, step, sz);
}
560

561 562 563
static void add32f( const float* src1, size_t step1,
                    const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* )
564
{
565
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
566
           ippiAdd_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz),
567 568
           (vBinOp32f<OpAdd<float>, IF_SIMD(_VAdd32f)>(src1, step1, src2, step2, dst, step, sz)));
}
569

570 571 572
static void add64f( const double* src1, size_t step1,
                    const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* )
573
{
574 575
    vBinOp64f<OpAdd<double>, IF_SIMD(_VAdd64f)>(src1, step1, src2, step2, dst, step, sz);
}
576

577 578 579
static void sub8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
580
{
581
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
582
           ippiSub_8u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz, 0),
583 584
           (vBinOp8<uchar, OpSub<uchar>, IF_SIMD(_VSub8u)>(src1, step1, src2, step2, dst, step, sz)));
}
585

586 587 588
static void sub8s( const schar* src1, size_t step1,
                   const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* )
589
{
590 591
    vBinOp8<schar, OpSub<schar>, IF_SIMD(_VSub8s)>(src1, step1, src2, step2, dst, step, sz);
}
592

593 594 595
static void sub16u( const ushort* src1, size_t step1,
                    const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* )
596
{
597
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
598
           ippiSub_16u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz, 0),
599 600
           (vBinOp16<ushort, OpSub<ushort>, IF_SIMD(_VSub16u)>(src1, step1, src2, step2, dst, step, sz)));
}
601

602 603 604
static void sub16s( const short* src1, size_t step1,
                    const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* )
605
{
606
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
607
           ippiSub_16s_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz, 0),
608 609
           (vBinOp16<short, OpSub<short>, IF_SIMD(_VSub16s)>(src1, step1, src2, step2, dst, step, sz)));
}
610

611 612 613
static void sub32s( const int* src1, size_t step1,
                    const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* )
614
{
615 616
    vBinOp32s<OpSub<int>, IF_SIMD(_VSub32s)>(src1, step1, src2, step2, dst, step, sz);
}
617

618 619 620
static void sub32f( const float* src1, size_t step1,
                   const float* src2, size_t step2,
                   float* dst, size_t step, Size sz, void* )
621
{
622
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
623
           ippiSub_32f_C1R(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz),
624 625
           (vBinOp32f<OpSub<float>, IF_SIMD(_VSub32f)>(src1, step1, src2, step2, dst, step, sz)));
}
626

627 628 629
static void sub64f( const double* src1, size_t step1,
                    const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* )
630
{
631
    vBinOp64f<OpSub<double>, IF_SIMD(_VSub64f)>(src1, step1, src2, step2, dst, step, sz);
632
}
633

634 635
template<> inline uchar OpMin<uchar>::operator ()(uchar a, uchar b) const { return CV_MIN_8U(a, b); }
template<> inline uchar OpMax<uchar>::operator ()(uchar a, uchar b) const { return CV_MAX_8U(a, b); }
636

637 638 639
static void max8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
640
{
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
#if (ARITHM_USE_IPP == 1)
  {
    uchar* s1 = (uchar*)src1;
    uchar* s2 = (uchar*)src2;
    uchar* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
    for(int i = 0; i < sz.height; i++)
    {
      ippsMaxEvery_8u(s1, s2, d, sz.width);
      s1 += step1;
      s2 += step2;
      d  += step;
    }
  }
#else
  vBinOp8<uchar, OpMax<uchar>, IF_SIMD(_VMax8u)>(src1, step1, src2, step2, dst, step, sz);
#endif

//    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
//           ippiMaxEvery_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
//           (vBinOp8<uchar, OpMax<uchar>, IF_SIMD(_VMax8u)>(src1, step1, src2, step2, dst, step, sz)));
662
}
663

664 665 666
static void max8s( const schar* src1, size_t step1,
                   const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* )
667
{
668 669
    vBinOp8<schar, OpMax<schar>, IF_SIMD(_VMax8s)>(src1, step1, src2, step2, dst, step, sz);
}
670

671 672 673
static void max16u( const ushort* src1, size_t step1,
                    const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* )
674
{
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
#if (ARITHM_USE_IPP == 1)
  {
    ushort* s1 = (ushort*)src1;
    ushort* s2 = (ushort*)src2;
    ushort* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
    for(int i = 0; i < sz.height; i++)
    {
      ippsMaxEvery_16u(s1, s2, d, sz.width);
      s1 = (ushort*)((uchar*)s1 + step1);
      s2 = (ushort*)((uchar*)s2 + step2);
      d  = (ushort*)((uchar*)d + step);
    }
  }
#else
  vBinOp16<ushort, OpMax<ushort>, IF_SIMD(_VMax16u)>(src1, step1, src2, step2, dst, step, sz);
#endif

//    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
//           ippiMaxEvery_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
//           (vBinOp16<ushort, OpMax<ushort>, IF_SIMD(_VMax16u)>(src1, step1, src2, step2, dst, step, sz)));
696
}
697

698 699 700
static void max16s( const short* src1, size_t step1,
                    const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* )
701
{
702
    vBinOp16<short, OpMax<short>, IF_SIMD(_VMax16s)>(src1, step1, src2, step2, dst, step, sz);
703
}
704

705 706 707
static void max32s( const int* src1, size_t step1,
                    const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* )
708
{
709 710
    vBinOp32s<OpMax<int>, IF_SIMD(_VMax32s)>(src1, step1, src2, step2, dst, step, sz);
}
711

712 713 714
static void max32f( const float* src1, size_t step1,
                    const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* )
715
{
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
#if (ARITHM_USE_IPP == 1)
  {
    float* s1 = (float*)src1;
    float* s2 = (float*)src2;
    float* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
    for(int i = 0; i < sz.height; i++)
    {
      ippsMaxEvery_32f(s1, s2, d, sz.width);
      s1 = (float*)((uchar*)s1 + step1);
      s2 = (float*)((uchar*)s2 + step2);
      d  = (float*)((uchar*)d + step);
    }
  }
#else
  vBinOp32f<OpMax<float>, IF_SIMD(_VMax32f)>(src1, step1, src2, step2, dst, step, sz);
#endif
//    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
//           ippiMaxEvery_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
//           (vBinOp32f<OpMax<float>, IF_SIMD(_VMax32f)>(src1, step1, src2, step2, dst, step, sz)));
736
}
737

738 739 740
static void max64f( const double* src1, size_t step1,
                    const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* )
741
{
742 743
    vBinOp64f<OpMax<double>, IF_SIMD(_VMax64f)>(src1, step1, src2, step2, dst, step, sz);
}
744

745 746 747
static void min8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
748
{
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
#if (ARITHM_USE_IPP == 1)
  {
    uchar* s1 = (uchar*)src1;
    uchar* s2 = (uchar*)src2;
    uchar* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
    for(int i = 0; i < sz.height; i++)
    {
      ippsMinEvery_8u(s1, s2, d, sz.width);
      s1 += step1;
      s2 += step2;
      d  += step;
    }
  }
#else
  vBinOp8<uchar, OpMin<uchar>, IF_SIMD(_VMin8u)>(src1, step1, src2, step2, dst, step, sz);
#endif

//    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
//           ippiMinEvery_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
//           (vBinOp8<uchar, OpMin<uchar>, IF_SIMD(_VMin8u)>(src1, step1, src2, step2, dst, step, sz)));
770
}
771

772 773 774 775 776 777
static void min8s( const schar* src1, size_t step1,
                   const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* )
{
    vBinOp8<schar, OpMin<schar>, IF_SIMD(_VMin8s)>(src1, step1, src2, step2, dst, step, sz);
}
778

779 780 781 782
static void min16u( const ushort* src1, size_t step1,
                    const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* )
{
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
#if (ARITHM_USE_IPP == 1)
  {
    ushort* s1 = (ushort*)src1;
    ushort* s2 = (ushort*)src2;
    ushort* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
    for(int i = 0; i < sz.height; i++)
    {
      ippsMinEvery_16u(s1, s2, d, sz.width);
      s1 = (ushort*)((uchar*)s1 + step1);
      s2 = (ushort*)((uchar*)s2 + step2);
      d  = (ushort*)((uchar*)d + step);
    }
  }
#else
  vBinOp16<ushort, OpMin<ushort>, IF_SIMD(_VMin16u)>(src1, step1, src2, step2, dst, step, sz);
#endif

//    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
//           ippiMinEvery_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
//           (vBinOp16<ushort, OpMin<ushort>, IF_SIMD(_VMin16u)>(src1, step1, src2, step2, dst, step, sz)));
804
}
805

806 807 808 809
static void min16s( const short* src1, size_t step1,
                    const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* )
{
810
    vBinOp16<short, OpMin<short>, IF_SIMD(_VMin16s)>(src1, step1, src2, step2, dst, step, sz);
811
}
812

813 814 815
static void min32s( const int* src1, size_t step1,
                    const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* )
816
{
817 818
    vBinOp32s<OpMin<int>, IF_SIMD(_VMin32s)>(src1, step1, src2, step2, dst, step, sz);
}
819

820 821 822
static void min32f( const float* src1, size_t step1,
                    const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* )
823
{
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
#if (ARITHM_USE_IPP == 1)
  {
    float* s1 = (float*)src1;
    float* s2 = (float*)src2;
    float* d  = dst;
    fixSteps(sz, sizeof(dst[0]), step1, step2, step);
    for(int i = 0; i < sz.height; i++)
    {
      ippsMinEvery_32f(s1, s2, d, sz.width);
      s1 = (float*)((uchar*)s1 + step1);
      s2 = (float*)((uchar*)s2 + step2);
      d  = (float*)((uchar*)d + step);
    }
  }
#else
  vBinOp32f<OpMin<float>, IF_SIMD(_VMin32f)>(src1, step1, src2, step2, dst, step, sz);
#endif
//    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
//           ippiMinEvery_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
//           (vBinOp32f<OpMin<float>, IF_SIMD(_VMin32f)>(src1, step1, src2, step2, dst, step, sz)));
844
}
845

846 847 848
static void min64f( const double* src1, size_t step1,
                    const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* )
849
{
850
    vBinOp64f<OpMin<double>, IF_SIMD(_VMin64f)>(src1, step1, src2, step2, dst, step, sz);
851
}
852

853 854 855
static void absdiff8u( const uchar* src1, size_t step1,
                       const uchar* src2, size_t step2,
                       uchar* dst, size_t step, Size sz, void* )
856
{
857
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
858
           ippiAbsDiff_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz),
859 860
           (vBinOp8<uchar, OpAbsDiff<uchar>, IF_SIMD(_VAbsDiff8u)>(src1, step1, src2, step2, dst, step, sz)));
}
861

862 863 864 865 866 867
static void absdiff8s( const schar* src1, size_t step1,
                       const schar* src2, size_t step2,
                       schar* dst, size_t step, Size sz, void* )
{
    vBinOp8<schar, OpAbsDiff<schar>, IF_SIMD(_VAbsDiff8s)>(src1, step1, src2, step2, dst, step, sz);
}
868

869 870 871 872 873
static void absdiff16u( const ushort* src1, size_t step1,
                        const ushort* src2, size_t step2,
                        ushort* dst, size_t step, Size sz, void* )
{
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
874
           ippiAbsDiff_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz),
875 876
           (vBinOp16<ushort, OpAbsDiff<ushort>, IF_SIMD(_VAbsDiff16u)>(src1, step1, src2, step2, dst, step, sz)));
}
877

878 879 880 881
static void absdiff16s( const short* src1, size_t step1,
                        const short* src2, size_t step2,
                        short* dst, size_t step, Size sz, void* )
{
882
    vBinOp16<short, OpAbsDiff<short>, IF_SIMD(_VAbsDiff16s)>(src1, step1, src2, step2, dst, step, sz);
883
}
884

885 886 887 888 889 890
static void absdiff32s( const int* src1, size_t step1,
                        const int* src2, size_t step2,
                        int* dst, size_t step, Size sz, void* )
{
    vBinOp32s<OpAbsDiff<int>, IF_SIMD(_VAbsDiff32s)>(src1, step1, src2, step2, dst, step, sz);
}
891

892 893 894 895 896
static void absdiff32f( const float* src1, size_t step1,
                        const float* src2, size_t step2,
                        float* dst, size_t step, Size sz, void* )
{
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
897
           ippiAbsDiff_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz),
898 899
           (vBinOp32f<OpAbsDiff<float>, IF_SIMD(_VAbsDiff32f)>(src1, step1, src2, step2, dst, step, sz)));
}
900

901 902 903 904 905
static void absdiff64f( const double* src1, size_t step1,
                        const double* src2, size_t step2,
                        double* dst, size_t step, Size sz, void* )
{
    vBinOp64f<OpAbsDiff<double>, IF_SIMD(_VAbsDiff64f)>(src1, step1, src2, step2, dst, step, sz);
906 907
}

908

909 910 911 912 913
static void and8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
{
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
914
           ippiAnd_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz),
915 916 917 918 919 920 921 922
           (vBinOp8<uchar, OpAnd<uchar>, IF_SIMD(_VAnd8u)>(src1, step1, src2, step2, dst, step, sz)));
}

static void or8u( const uchar* src1, size_t step1,
                  const uchar* src2, size_t step2,
                  uchar* dst, size_t step, Size sz, void* )
{
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
923
           ippiOr_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz),
924 925 926 927 928 929 930 931
           (vBinOp8<uchar, OpOr<uchar>, IF_SIMD(_VOr8u)>(src1, step1, src2, step2, dst, step, sz)));
}

static void xor8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
{
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
932
           ippiXor_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz),
933
           (vBinOp8<uchar, OpXor<uchar>, IF_SIMD(_VXor8u)>(src1, step1, src2, step2, dst, step, sz)));
934
}
935 936 937 938 939 940

static void not8u( const uchar* src1, size_t step1,
                   const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* )
{
    IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
941
           ippiNot_8u_C1R(src1, (int)step1, dst, (int)step, (IppiSize&)sz),
942 943
           (vBinOp8<uchar, OpNot<uchar>, IF_SIMD(_VNot8u)>(src1, step1, src2, step2, dst, step, sz)));
}
944

945 946 947
/****************************************************************************************\
*                                   logical operations                                   *
\****************************************************************************************/
948

949 950 951 952 953
static inline bool checkScalar(const Mat& sc, int atype, int sckind, int akind)
{
    if( sc.dims > 2 || (sc.cols != 1 && sc.rows != 1) || !sc.isContinuous() )
        return false;
    int cn = CV_MAT_CN(atype);
954
    if( akind == _InputArray::MATX && sckind != _InputArray::MATX )
955 956 957 958
        return false;
    return sc.size() == Size(1, 1) || sc.size() == Size(1, cn) || sc.size() == Size(cn, 1) ||
        (sc.size() == Size(1, 4) && sc.type() == CV_64F && cn <= 4);
}
959

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
static void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t blocksize )
{
    int scn = (int)sc.total(), cn = CV_MAT_CN(buftype);
    size_t esz = CV_ELEM_SIZE(buftype);
    getConvertFunc(sc.depth(), buftype)(sc.data, 0, 0, 0, scbuf, 0, Size(std::min(cn, scn), 1), 0);
    // unroll the scalar
    if( scn < cn )
    {
        CV_Assert( scn == 1 );
        size_t esz1 = CV_ELEM_SIZE1(buftype);
        for( size_t i = esz1; i < esz; i++ )
            scbuf[i] = scbuf[i - esz1];
    }
    for( size_t i = esz; i < blocksize*esz; i++ )
        scbuf[i] = scbuf[i - esz];
975

976
}
977

978 979
void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
               InputArray _mask, const BinaryFunc* tab, bool bitwise)
980 981 982 983 984 985
{
    int kind1 = _src1.kind(), kind2 = _src2.kind();
    Mat src1 = _src1.getMat(), src2 = _src2.getMat();
    bool haveMask = !_mask.empty(), haveScalar = false;
    BinaryFunc func;
    int c;
986

987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
    if( src1.dims <= 2 && src2.dims <= 2 && kind1 == kind2 &&
        src1.size() == src2.size() && src1.type() == src2.type() && !haveMask )
    {
        _dst.create(src1.size(), src1.type());
        Mat dst = _dst.getMat();
        if( bitwise )
        {
            func = *tab;
            c = (int)src1.elemSize();
        }
        else
        {
            func = tab[src1.depth()];
            c = src1.channels();
        }
1002

1003 1004 1005 1006
        Size sz = getContinuousSize(src1, src2, dst, c);
        func(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, 0);
        return;
    }
1007

1008
    if( (kind1 == _InputArray::MATX) + (kind2 == _InputArray::MATX) == 1 ||
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
        src1.size != src2.size || src1.type() != src2.type() )
    {
        if( checkScalar(src1, src2.type(), kind1, kind2) )
            // src1 is a scalar; swap it with src2
            swap(src1, src2);
        else if( !checkScalar(src2, src1.type(), kind2, kind1) )
            CV_Error( CV_StsUnmatchedSizes,
                      "The operation is neither 'array op array' (where arrays have the same size and type), "
                      "nor 'array op scalar', nor 'scalar op array'" );
        haveScalar = true;
    }
1020

1021 1022 1023 1024 1025
    size_t esz = src1.elemSize();
    size_t blocksize0 = (BLOCK_SIZE + esz-1)/esz;
    int cn = src1.channels();
    BinaryFunc copymask = 0;
    Mat mask;
1026

1027 1028 1029 1030 1031 1032 1033
    if( haveMask )
    {
        mask = _mask.getMat();
        CV_Assert( (mask.type() == CV_8UC1 || mask.type() == CV_8SC1) );
        CV_Assert( mask.size == src1.size );
        copymask = getCopyMaskFunc(esz);
    }
1034

1035 1036
    AutoBuffer<uchar> _buf;
    uchar *scbuf = 0, *maskbuf = 0;
1037

1038 1039
    _dst.create(src1.dims, src1.size, src1.type());
    Mat dst = _dst.getMat();
1040

1041 1042 1043 1044 1045 1046 1047 1048 1049
    if( bitwise )
    {
        func = *tab;
        c = (int)esz;
    }
    else
    {
        func = tab[src1.depth()];
        c = cn;
1050
    }
1051

1052
    if( !haveScalar )
1053
    {
1054 1055
        const Mat* arrays[] = { &src1, &src2, &dst, &mask, 0 };
        uchar* ptrs[4];
1056

1057 1058
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = total;
1059

1060 1061 1062 1063 1064 1065
        if( haveMask )
        {
            blocksize = std::min(blocksize, blocksize0);
            _buf.allocate(blocksize*esz);
            maskbuf = _buf;
        }
1066

1067
        for( size_t i = 0; i < it.nplanes; i++, ++it )
1068
        {
1069
            for( size_t j = 0; j < total; j += blocksize )
1070
            {
1071
                int bsz = (int)std::min(total - j, blocksize);
1072 1073

                func( ptrs[0], 0, ptrs[1], 0, haveMask ? maskbuf : ptrs[2], 0, Size(bsz*c, 1), 0 );
1074
                if( haveMask )
1075
                {
1076 1077
                    copymask( maskbuf, 0, ptrs[3], 0, ptrs[2], 0, Size(bsz, 1), &esz );
                    ptrs[3] += bsz;
1078
                }
1079

1080 1081
                bsz *= (int)esz;
                ptrs[0] += bsz; ptrs[1] += bsz; ptrs[2] += bsz;
1082 1083
            }
        }
1084 1085 1086 1087 1088
    }
    else
    {
        const Mat* arrays[] = { &src1, &dst, &mask, 0 };
        uchar* ptrs[3];
1089

1090 1091
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = std::min(total, blocksize0);
1092

1093 1094 1095
        _buf.allocate(blocksize*(haveMask ? 2 : 1)*esz + 32);
        scbuf = _buf;
        maskbuf = alignPtr(scbuf + blocksize*esz, 16);
1096

1097
        convertAndUnrollScalar( src2, src1.type(), scbuf, blocksize);
1098

1099
        for( size_t i = 0; i < it.nplanes; i++, ++it )
1100
        {
1101
            for( size_t j = 0; j < total; j += blocksize )
1102
            {
1103
                int bsz = (int)std::min(total - j, blocksize);
1104

1105 1106
                func( ptrs[0], 0, scbuf, 0, haveMask ? maskbuf : ptrs[1], 0, Size(bsz*c, 1), 0 );
                if( haveMask )
1107
                {
1108 1109
                    copymask( maskbuf, 0, ptrs[2], 0, ptrs[1], 0, Size(bsz, 1), &esz );
                    ptrs[2] += bsz;
1110
                }
1111

1112 1113
                bsz *= (int)esz;
                ptrs[0] += bsz; ptrs[1] += bsz;
1114 1115 1116 1117
            }
        }
    }
}
1118

1119
static BinaryFunc maxTab[] =
V
Vadim Pisarevsky 已提交
1120
{
1121 1122
    (BinaryFunc)max8u, (BinaryFunc)max8s, (BinaryFunc)max16u, (BinaryFunc)max16s,
    (BinaryFunc)max32s, (BinaryFunc)max32f, (BinaryFunc)max64f, 0
1123
};
1124

1125 1126 1127 1128 1129
static BinaryFunc minTab[] =
{
    (BinaryFunc)min8u, (BinaryFunc)min8s, (BinaryFunc)min16u, (BinaryFunc)min16s,
    (BinaryFunc)min32s, (BinaryFunc)min32f, (BinaryFunc)min64f, 0
};
1130

V
Vadim Pisarevsky 已提交
1131
}
1132

1133
void cv::bitwise_and(InputArray a, InputArray b, OutputArray c, InputArray mask)
1134
{
1135 1136
    BinaryFunc f = and8u;
    binary_op(a, b, c, mask, &f, true);
1137 1138
}

1139
void cv::bitwise_or(InputArray a, InputArray b, OutputArray c, InputArray mask)
1140
{
1141 1142
    BinaryFunc f = or8u;
    binary_op(a, b, c, mask, &f, true);
1143 1144
}

1145
void cv::bitwise_xor(InputArray a, InputArray b, OutputArray c, InputArray mask)
1146
{
1147 1148
    BinaryFunc f = xor8u;
    binary_op(a, b, c, mask, &f, true);
1149 1150
}

1151
void cv::bitwise_not(InputArray a, OutputArray c, InputArray mask)
1152
{
1153 1154
    BinaryFunc f = not8u;
    binary_op(a, a, c, mask, &f, true);
1155 1156
}

1157
void cv::max( InputArray src1, InputArray src2, OutputArray dst )
1158
{
1159
    binary_op(src1, src2, dst, None(), maxTab, false );
1160 1161
}

1162
void cv::min( InputArray src1, InputArray src2, OutputArray dst )
1163
{
1164
    binary_op(src1, src2, dst, None(), minTab, false );
1165 1166
}

1167
void cv::max(const Mat& src1, const Mat& src2, Mat& dst)
1168
{
1169
    OutputArray _dst(dst);
1170
    binary_op(src1, src2, _dst, None(), maxTab, false );
1171 1172
}

1173 1174 1175
void cv::min(const Mat& src1, const Mat& src2, Mat& dst)
{
    OutputArray _dst(dst);
1176
    binary_op(src1, src2, _dst, None(), minTab, false );
1177
}
1178

1179 1180 1181
void cv::max(const Mat& src1, double src2, Mat& dst)
{
    OutputArray _dst(dst);
1182
    binary_op(src1, src2, _dst, None(), maxTab, false );
1183
}
1184

1185
void cv::min(const Mat& src1, double src2, Mat& dst)
1186
{
1187
    OutputArray _dst(dst);
1188
    binary_op(src1, src2, _dst, None(), minTab, false );
1189
}
1190

1191 1192 1193
/****************************************************************************************\
*                                      add/subtract                                      *
\****************************************************************************************/
1194

1195 1196
namespace cv
{
1197

1198 1199
void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
               InputArray _mask, int dtype, BinaryFunc* tab, bool muldiv=false, void* usrdata=0)
1200
{
1201 1202 1203
    int kind1 = _src1.kind(), kind2 = _src2.kind();
    Mat src1 = _src1.getMat(), src2 = _src2.getMat();
    bool haveMask = !_mask.empty();
1204

1205 1206 1207 1208
    if( kind1 == kind2 && src1.dims <= 2 && src2.dims <= 2 &&
        src1.size() == src2.size() && src1.type() == src2.type() &&
        !haveMask && ((!_dst.fixedType() && (dtype < 0 || CV_MAT_DEPTH(dtype) == src1.depth())) ||
                       (_dst.fixedType() && _dst.type() == _src1.type())) )
V
Vadim Pisarevsky 已提交
1209
    {
1210 1211 1212 1213
        _dst.create(src1.size(), src1.type());
        Mat dst = _dst.getMat();
        Size sz = getContinuousSize(src1, src2, dst, src1.channels());
        tab[src1.depth()](src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, usrdata);
V
Vadim Pisarevsky 已提交
1214 1215
        return;
    }
1216

1217
    bool haveScalar = false, swapped12 = false;
1218

1219
    if( (kind1 == _InputArray::MATX) + (kind2 == _InputArray::MATX) == 1 ||
1220
        src1.size != src2.size || src1.channels() != src2.channels() )
1221
    {
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
        if( checkScalar(src1, src2.type(), kind1, kind2) )
        {
            // src1 is a scalar; swap it with src2
            swap(src1, src2);
            swapped12 = true;
        }
        else if( !checkScalar(src2, src1.type(), kind2, kind1) )
            CV_Error( CV_StsUnmatchedSizes,
                     "The operation is neither 'array op array' (where arrays have the same size and the same number of channels), "
                     "nor 'array op scalar', nor 'scalar op array'" );
        haveScalar = true;
    }
1234

1235 1236
    int cn = src1.channels(), depth1 = src1.depth(), depth2 = src2.depth(), wtype;
    BinaryFunc cvtsrc1 = 0, cvtsrc2 = 0, cvtdst = 0;
1237

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
    if( dtype < 0 )
    {
        if( _dst.fixedType() )
            dtype = _dst.type();
        else
        {
            if( !haveScalar && src1.type() != src2.type() )
                CV_Error(CV_StsBadArg,
                     "When the input arrays in add/subtract/multiply/divide functions have different types, "
                     "the output array type must be explicitly specified");
            dtype = src1.type();
        }
    }
    dtype = CV_MAT_DEPTH(dtype);
1252

1253 1254 1255 1256 1257 1258 1259
    if( depth1 == depth2 && dtype == depth1 )
        wtype = dtype;
    else if( !muldiv )
    {
        wtype = depth1 <= CV_8S && depth2 <= CV_8S ? CV_16S :
                depth1 <= CV_32S && depth2 <= CV_32S ? CV_32S : std::max(depth1, depth2);
        wtype = std::max(wtype, dtype);
1260

1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
        // when the result of addition should be converted to an integer type,
        // and just one of the input arrays is floating-point, it makes sense to convert that input to integer type before the operation,
        // instead of converting the other input to floating-point and then converting the operation result back to integers.
        if( dtype < CV_32F && (depth1 < CV_32F || depth2 < CV_32F) )
            wtype = CV_32S;
    }
    else
    {
        wtype = std::max(depth1, std::max(depth2, CV_32F));
        wtype = std::max(wtype, dtype);
    }
1272

1273 1274 1275
    cvtsrc1 = depth1 == wtype ? 0 : getConvertFunc(depth1, wtype);
    cvtsrc2 = depth2 == depth1 ? cvtsrc1 : depth2 == wtype ? 0 : getConvertFunc(depth2, wtype);
    cvtdst = dtype == wtype ? 0 : getConvertFunc(wtype, dtype);
1276

1277 1278
    dtype = CV_MAKETYPE(dtype, cn);
    wtype = CV_MAKETYPE(wtype, cn);
1279

1280 1281 1282 1283 1284
    size_t esz1 = src1.elemSize(), esz2 = src2.elemSize();
    size_t dsz = CV_ELEM_SIZE(dtype), wsz = CV_ELEM_SIZE(wtype);
    size_t blocksize0 = (size_t)(BLOCK_SIZE + wsz-1)/wsz;
    BinaryFunc copymask = 0;
    Mat mask;
1285

1286 1287 1288 1289 1290 1291 1292
    if( haveMask )
    {
        mask = _mask.getMat();
        CV_Assert( (mask.type() == CV_8UC1 || mask.type() == CV_8SC1) );
        CV_Assert( mask.size == src1.size );
        copymask = getCopyMaskFunc(dsz);
    }
1293

1294 1295 1296
    AutoBuffer<uchar> _buf;
    uchar *buf, *maskbuf = 0, *buf1 = 0, *buf2 = 0, *wbuf = 0;
    size_t bufesz = (cvtsrc1 ? wsz : 0) + (cvtsrc2 || haveScalar ? wsz : 0) + (cvtdst ? wsz : 0) + (haveMask ? dsz : 0);
1297

1298 1299 1300
    _dst.create(src1.dims, src1.size, src1.type());
    Mat dst = _dst.getMat();
    BinaryFunc func = tab[CV_MAT_DEPTH(wtype)];
1301

1302 1303 1304 1305
    if( !haveScalar )
    {
        const Mat* arrays[] = { &src1, &src2, &dst, &mask, 0 };
        uchar* ptrs[4];
1306

1307 1308
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = total;
1309

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
        if( haveMask || cvtsrc1 || cvtsrc2 || cvtdst )
            blocksize = std::min(blocksize, blocksize0);

        _buf.allocate(bufesz*blocksize + 64);
        buf = _buf;
        if( cvtsrc1 )
            buf1 = buf, buf = alignPtr(buf + blocksize*wsz, 16);
        if( cvtsrc2 )
            buf2 = buf, buf = alignPtr(buf + blocksize*wsz, 16);
        wbuf = maskbuf = buf;
        if( cvtdst )
            buf = alignPtr(buf + blocksize*wsz, 16);
        if( haveMask )
            maskbuf = buf;
1324

1325
        for( size_t i = 0; i < it.nplanes; i++, ++it )
1326
        {
1327
            for( size_t j = 0; j < total; j += blocksize )
1328
            {
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
                int bsz = (int)std::min(total - j, blocksize);
                Size bszn(bsz*cn, 1);
                const uchar *sptr1 = ptrs[0], *sptr2 = ptrs[1];
                uchar* dptr = ptrs[2];
                if( cvtsrc1 )
                {
                    cvtsrc1( sptr1, 0, 0, 0, buf1, 0, bszn, 0 );
                    sptr1 = buf1;
                }
                if( ptrs[0] == ptrs[1] )
                    sptr2 = sptr1;
                else if( cvtsrc2 )
                {
                    cvtsrc2( sptr2, 0, 0, 0, buf2, 0, bszn, 0 );
                    sptr2 = buf2;
                }
1345

1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
                if( !haveMask && !cvtdst )
                    func( sptr1, 0, sptr2, 0, dptr, 0, bszn, usrdata );
                else
                {
                    func( sptr1, 0, sptr2, 0, wbuf, 0, bszn, usrdata );
                    if( !haveMask )
                        cvtdst( wbuf, 0, 0, 0, dptr, 0, bszn, 0 );
                    else if( !cvtdst )
                    {
                        copymask( wbuf, 0, ptrs[3], 0, dptr, 0, Size(bsz, 1), &dsz );
                        ptrs[3] += bsz;
                    }
                    else
                    {
                        cvtdst( wbuf, 0, 0, 0, maskbuf, 0, bszn, 0 );
                        copymask( maskbuf, 0, ptrs[3], 0, dptr, 0, Size(bsz, 1), &dsz );
                        ptrs[3] += bsz;
                    }
                }
                ptrs[0] += bsz*esz1; ptrs[1] += bsz*esz2; ptrs[2] += bsz*dsz;
1366 1367
            }
        }
1368 1369 1370 1371 1372
    }
    else
    {
        const Mat* arrays[] = { &src1, &dst, &mask, 0 };
        uchar* ptrs[3];
1373

1374 1375
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = std::min(total, blocksize0);
1376

1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
        _buf.allocate(bufesz*blocksize + 64);
        buf = _buf;
        if( cvtsrc1 )
            buf1 = buf, buf = alignPtr(buf + blocksize*wsz, 16);
        buf2 = buf; buf = alignPtr(buf + blocksize*wsz, 16);
        wbuf = maskbuf = buf;
        if( cvtdst )
            buf = alignPtr(buf + blocksize*wsz, 16);
        if( haveMask )
            maskbuf = buf;
1387

1388
        convertAndUnrollScalar( src2, wtype, buf2, blocksize);
1389

1390
        for( size_t i = 0; i < it.nplanes; i++, ++it )
1391
        {
1392 1393 1394 1395 1396 1397 1398
            for( size_t j = 0; j < total; j += blocksize )
            {
                int bsz = (int)std::min(total - j, blocksize);
                Size bszn(bsz*cn, 1);
                const uchar *sptr1 = ptrs[0];
                const uchar* sptr2 = buf2;
                uchar* dptr = ptrs[1];
1399

1400 1401 1402 1403 1404
                if( cvtsrc1 )
                {
                    cvtsrc1( sptr1, 0, 0, 0, buf1, 0, bszn, 0 );
                    sptr1 = buf1;
                }
1405

1406 1407
                if( swapped12 )
                    std::swap(sptr1, sptr2);
1408

1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
                if( !haveMask && !cvtdst )
                    func( sptr1, 0, sptr2, 0, dptr, 0, bszn, usrdata );
                else
                {
                    func( sptr1, 0, sptr2, 0, wbuf, 0, bszn, usrdata );
                    if( !haveMask )
                        cvtdst( wbuf, 0, 0, 0, dptr, 0, bszn, 0 );
                    else if( !cvtdst )
                    {
                        copymask( wbuf, 0, ptrs[2], 0, dptr, 0, Size(bsz, 1), &dsz );
                        ptrs[2] += bsz;
                    }
                    else
                    {
                        cvtdst( wbuf, 0, 0, 0, maskbuf, 0, bszn, 0 );
                        copymask( maskbuf, 0, ptrs[2], 0, dptr, 0, Size(bsz, 1), &dsz );
                        ptrs[2] += bsz;
                    }
                }
                ptrs[0] += bsz*esz1; ptrs[1] += bsz*dsz;
            }
1430 1431 1432
        }
    }
}
1433

1434 1435
static BinaryFunc addTab[] =
{
1436 1437
    (BinaryFunc)add8u, (BinaryFunc)add8s, (BinaryFunc)add16u, (BinaryFunc)add16s,
    (BinaryFunc)add32s, (BinaryFunc)add32f, (BinaryFunc)add64f, 0
1438
};
1439

1440 1441
static BinaryFunc subTab[] =
{
1442 1443
    (BinaryFunc)sub8u, (BinaryFunc)sub8s, (BinaryFunc)sub16u, (BinaryFunc)sub16s,
    (BinaryFunc)sub32s, (BinaryFunc)sub32f, (BinaryFunc)sub64f, 0
1444 1445
};

1446
static BinaryFunc absdiffTab[] =
1447
{
1448 1449 1450
    (BinaryFunc)absdiff8u, (BinaryFunc)absdiff8s, (BinaryFunc)absdiff16u,
    (BinaryFunc)absdiff16s, (BinaryFunc)absdiff32s, (BinaryFunc)absdiff32f,
    (BinaryFunc)absdiff64f, 0
1451
};
1452 1453

}
1454

1455 1456
void cv::add( InputArray src1, InputArray src2, OutputArray dst,
          InputArray mask, int dtype )
1457
{
1458
    arithm_op(src1, src2, dst, mask, dtype, addTab );
1459 1460
}

1461 1462
void cv::subtract( InputArray src1, InputArray src2, OutputArray dst,
               InputArray mask, int dtype )
1463
{
1464
    arithm_op(src1, src2, dst, mask, dtype, subTab );
1465 1466
}

1467
void cv::absdiff( InputArray src1, InputArray src2, OutputArray dst )
1468
{
1469
    arithm_op(src1, src2, dst, None(), -1, absdiffTab);
1470
}
1471 1472 1473 1474 1475

/****************************************************************************************\
*                                    multiply/divide                                     *
\****************************************************************************************/

1476 1477 1478
namespace cv
{

1479
template<typename T, typename WT> static void
1480 1481
mul_( const T* src1, size_t step1, const T* src2, size_t step2,
      T* dst, size_t step, Size size, WT scale )
1482
{
1483 1484 1485
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    step /= sizeof(dst[0]);
1486

1487
    if( scale == (WT)1. )
1488
    {
1489
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1490 1491 1492 1493
        {
            int i;
            for( i = 0; i <= size.width - 4; i += 4 )
            {
1494 1495 1496 1497 1498 1499
                T t0;
                T t1;
                t0 = saturate_cast<T>(src1[i  ] * src2[i  ]);
                t1 = saturate_cast<T>(src1[i+1] * src2[i+1]);
                dst[i  ] = t0;
                dst[i+1] = t1;
1500 1501 1502

                t0 = saturate_cast<T>(src1[i+2] * src2[i+2]);
                t1 = saturate_cast<T>(src1[i+3] * src2[i+3]);
1503 1504
                dst[i+2] = t0;
                dst[i+3] = t1;
1505 1506 1507 1508 1509 1510 1511 1512
            }

            for( ; i < size.width; i++ )
                dst[i] = saturate_cast<T>(src1[i] * src2[i]);
        }
    }
    else
    {
1513
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
        {
            int i;
            for( i = 0; i <= size.width - 4; i += 4 )
            {
                T t0 = saturate_cast<T>(scale*(WT)src1[i]*src2[i]);
                T t1 = saturate_cast<T>(scale*(WT)src1[i+1]*src2[i+1]);
                dst[i] = t0; dst[i+1] = t1;

                t0 = saturate_cast<T>(scale*(WT)src1[i+2]*src2[i+2]);
                t1 = saturate_cast<T>(scale*(WT)src1[i+3]*src2[i+3]);
                dst[i+2] = t0; dst[i+3] = t1;
            }

            for( ; i < size.width; i++ )
                dst[i] = saturate_cast<T>(scale*(WT)src1[i]*src2[i]);
        }
    }
}

template<typename T> static void
1534 1535
div_( const T* src1, size_t step1, const T* src2, size_t step2,
      T* dst, size_t step, Size size, double scale )
1536
{
1537 1538 1539
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    step /= sizeof(dst[0]);
1540

1541
    for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
    {
        int i = 0;
        for( ; i <= size.width - 4; i += 4 )
        {
            if( src2[i] != 0 && src2[i+1] != 0 && src2[i+2] != 0 && src2[i+3] != 0 )
            {
                double a = (double)src2[i] * src2[i+1];
                double b = (double)src2[i+2] * src2[i+3];
                double d = scale/(a * b);
                b *= d;
                a *= d;
1553

1554 1555 1556 1557
                T z0 = saturate_cast<T>(src2[i+1] * ((double)src1[i] * b));
                T z1 = saturate_cast<T>(src2[i] * ((double)src1[i+1] * b));
                T z2 = saturate_cast<T>(src2[i+3] * ((double)src1[i+2] * a));
                T z3 = saturate_cast<T>(src2[i+2] * ((double)src1[i+3] * a));
1558

1559 1560 1561 1562 1563 1564 1565 1566 1567
                dst[i] = z0; dst[i+1] = z1;
                dst[i+2] = z2; dst[i+3] = z3;
            }
            else
            {
                T z0 = src2[i] != 0 ? saturate_cast<T>(src1[i]*scale/src2[i]) : 0;
                T z1 = src2[i+1] != 0 ? saturate_cast<T>(src1[i+1]*scale/src2[i+1]) : 0;
                T z2 = src2[i+2] != 0 ? saturate_cast<T>(src1[i+2]*scale/src2[i+2]) : 0;
                T z3 = src2[i+3] != 0 ? saturate_cast<T>(src1[i+3]*scale/src2[i+3]) : 0;
1568

1569 1570 1571 1572
                dst[i] = z0; dst[i+1] = z1;
                dst[i+2] = z2; dst[i+3] = z3;
            }
        }
1573

1574 1575 1576 1577 1578 1579
        for( ; i < size.width; i++ )
            dst[i] = src2[i] != 0 ? saturate_cast<T>(src1[i]*scale/src2[i]) : 0;
    }
}

template<typename T> static void
1580 1581
recip_( const T*, size_t, const T* src2, size_t step2,
        T* dst, size_t step, Size size, double scale )
1582
{
1583 1584
    step2 /= sizeof(src2[0]);
    step /= sizeof(dst[0]);
1585

1586
    for( ; size.height--; src2 += step2, dst += step )
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
    {
        int i = 0;
        for( ; i <= size.width - 4; i += 4 )
        {
            if( src2[i] != 0 && src2[i+1] != 0 && src2[i+2] != 0 && src2[i+3] != 0 )
            {
                double a = (double)src2[i] * src2[i+1];
                double b = (double)src2[i+2] * src2[i+3];
                double d = scale/(a * b);
                b *= d;
                a *= d;
1598

1599 1600 1601 1602
                T z0 = saturate_cast<T>(src2[i+1] * b);
                T z1 = saturate_cast<T>(src2[i] * b);
                T z2 = saturate_cast<T>(src2[i+3] * a);
                T z3 = saturate_cast<T>(src2[i+2] * a);
1603

1604 1605 1606 1607 1608 1609 1610 1611 1612
                dst[i] = z0; dst[i+1] = z1;
                dst[i+2] = z2; dst[i+3] = z3;
            }
            else
            {
                T z0 = src2[i] != 0 ? saturate_cast<T>(scale/src2[i]) : 0;
                T z1 = src2[i+1] != 0 ? saturate_cast<T>(scale/src2[i+1]) : 0;
                T z2 = src2[i+2] != 0 ? saturate_cast<T>(scale/src2[i+2]) : 0;
                T z3 = src2[i+3] != 0 ? saturate_cast<T>(scale/src2[i+3]) : 0;
1613
                
1614 1615 1616 1617
                dst[i] = z0; dst[i+1] = z1;
                dst[i+2] = z2; dst[i+3] = z3;
            }
        }
1618

1619 1620 1621 1622
        for( ; i < size.width; i++ )
            dst[i] = src2[i] != 0 ? saturate_cast<T>(scale/src2[i]) : 0;
    }
}
1623 1624


1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
static void mul8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale);
}

static void mul8s( const schar* src1, size_t step1, const schar* src2, size_t step2,
                   schar* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale);
}

static void mul16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale);
}

static void mul16s( const short* src1, size_t step1, const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale);
}

static void mul32s( const int* src1, size_t step1, const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}
1654

1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
static void mul32f( const float* src1, size_t step1, const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale);
}
    
static void mul64f( const double* src1, size_t step1, const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* scale)
{
    mul_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}
1666

1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
static void div8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                   uchar* dst, size_t step, Size sz, void* scale)
{
    if( src1 )
        div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
    else
        recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div8s( const schar* src1, size_t step1, const schar* src2, size_t step2,
                  schar* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                    ushort* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div16s( const short* src1, size_t step1, const short* src2, size_t step2,
                    short* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div32s( const int* src1, size_t step1, const int* src2, size_t step2,
                    int* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div32f( const float* src1, size_t step1, const float* src2, size_t step2,
                    float* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void div64f( const double* src1, size_t step1, const double* src2, size_t step2,
                    double* dst, size_t step, Size sz, void* scale)
{
    div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                  uchar* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip8s( const schar* src1, size_t step1, const schar* src2, size_t step2,
                  schar* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                   ushort* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip16s( const short* src1, size_t step1, const short* src2, size_t step2,
                   short* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}
1735

1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
static void recip32s( const int* src1, size_t step1, const int* src2, size_t step2,
                   int* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip32f( const float* src1, size_t step1, const float* src2, size_t step2,
                   float* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}

static void recip64f( const double* src1, size_t step1, const double* src2, size_t step2,
                   double* dst, size_t step, Size sz, void* scale)
{
    recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale);
}
1753 1754


1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
static BinaryFunc mulTab[] =
{
    (BinaryFunc)mul8u, (BinaryFunc)mul8s, (BinaryFunc)mul16u,
    (BinaryFunc)mul16s, (BinaryFunc)mul32s, (BinaryFunc)mul32f,
    (BinaryFunc)mul64f, 0
};

static BinaryFunc divTab[] =
{
    (BinaryFunc)div8u, (BinaryFunc)div8s, (BinaryFunc)div16u,
    (BinaryFunc)div16s, (BinaryFunc)div32s, (BinaryFunc)div32f,
    (BinaryFunc)div64f, 0
};

static BinaryFunc recipTab[] =
{
    (BinaryFunc)recip8u, (BinaryFunc)recip8s, (BinaryFunc)recip16u,
    (BinaryFunc)recip16s, (BinaryFunc)recip32s, (BinaryFunc)recip32f,
    (BinaryFunc)recip64f, 0
};
1775

1776

1777
}
1778

1779
void cv::multiply(InputArray src1, InputArray src2,
1780
                  OutputArray dst, double scale, int dtype)
1781
{
1782
    arithm_op(src1, src2, dst, None(), dtype, mulTab, true, &scale);
1783
}
1784

1785
void cv::divide(InputArray src1, InputArray src2,
1786 1787
                OutputArray dst, double scale, int dtype)
{
1788
    arithm_op(src1, src2, dst, None(), dtype, divTab, true, &scale);
1789 1790
}

1791
void cv::divide(double scale, InputArray src2,
1792 1793
                OutputArray dst, int dtype)
{
1794
    arithm_op(src2, src2, dst, None(), dtype, recipTab, true, &scale);
1795 1796
}

1797 1798 1799 1800
/****************************************************************************************\
*                                      addWeighted                                       *
\****************************************************************************************/

1801 1802 1803
namespace cv
{

1804
template<typename T, typename WT> static void
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814
addWeighted_( const T* src1, size_t step1, const T* src2, size_t step2,
              T* dst, size_t step, Size size, void* _scalars )
{
    const double* scalars = (const double*)_scalars;
    WT alpha = (WT)scalars[0], beta = (WT)scalars[1], gamma = (WT)scalars[2];
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    step /= sizeof(dst[0]);

    for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1815
    {
1816 1817
        int x = 0;
        for( ; x <= size.width - 4; x += 4 )
1818
        {
1819 1820 1821
            T t0 = saturate_cast<T>(src1[x]*alpha + src2[x]*beta + gamma);
            T t1 = saturate_cast<T>(src1[x+1]*alpha + src2[x+1]*beta + gamma);
            dst[x] = t0; dst[x+1] = t1;
1822

1823 1824 1825
            t0 = saturate_cast<T>(src1[x+2]*alpha + src2[x+2]*beta + gamma);
            t1 = saturate_cast<T>(src1[x+3]*alpha + src2[x+3]*beta + gamma);
            dst[x+2] = t0; dst[x+3] = t1;
1826 1827
        }

1828 1829
        for( ; x < size.width; x++ )
            dst[x] = saturate_cast<T>(src1[x]*alpha + src2[x]*beta + gamma);
1830 1831 1832 1833 1834
    }
}


static void
1835 1836 1837 1838 1839 1840 1841
addWeighted8u( const uchar* src1, size_t step1,
               const uchar* src2, size_t step2,
               uchar* dst, size_t step, Size size,
               void* _scalars )
{
    const double* scalars = (const double*)_scalars;
    float alpha = (float)scalars[0], beta = (float)scalars[1], gamma = (float)scalars[2];
1842

1843
    for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1844
    {
1845
        int x = 0;
1846

1847 1848
#if CV_SSE2
        if( USE_SSE2 )
1849
        {
1850 1851
            __m128 a4 = _mm_set1_ps(alpha), b4 = _mm_set1_ps(beta), g4 = _mm_set1_ps(gamma);
            __m128i z = _mm_setzero_si128();
1852

1853
            for( ; x <= size.width - 8; x += 8 )
1854
            {
1855 1856
                __m128i u = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(src1 + x)), z);
                __m128i v = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(src2 + x)), z);
1857

1858 1859 1860 1861
                __m128 u0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(u, z));
                __m128 u1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(u, z));
                __m128 v0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v, z));
                __m128 v1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v, z));
1862

1863 1864 1865
                u0 = _mm_add_ps(_mm_mul_ps(u0, a4), _mm_mul_ps(v0, b4));
                u1 = _mm_add_ps(_mm_mul_ps(u1, a4), _mm_mul_ps(v1, b4));
                u0 = _mm_add_ps(u0, g4); u1 = _mm_add_ps(u1, g4);
1866

1867 1868
                u = _mm_packs_epi32(_mm_cvtps_epi32(u0), _mm_cvtps_epi32(u1));
                u = _mm_packus_epi16(u, u);
1869

1870
                _mm_storel_epi64((__m128i*)(dst + x), u);
1871 1872
            }
        }
1873 1874
#endif
        for( ; x <= size.width - 4; x += 4 )
1875
        {
1876 1877 1878
            float t0, t1;
            t0 = CV_8TO32F(src1[x])*alpha + CV_8TO32F(src2[x])*beta + gamma;
            t1 = CV_8TO32F(src1[x+1])*alpha + CV_8TO32F(src2[x+1])*beta + gamma;
1879

1880 1881
            dst[x] = saturate_cast<uchar>(t0);
            dst[x+1] = saturate_cast<uchar>(t1);
1882

1883 1884
            t0 = CV_8TO32F(src1[x+2])*alpha + CV_8TO32F(src2[x+2])*beta + gamma;
            t1 = CV_8TO32F(src1[x+3])*alpha + CV_8TO32F(src2[x+3])*beta + gamma;
1885

1886 1887 1888
            dst[x+2] = saturate_cast<uchar>(t0);
            dst[x+3] = saturate_cast<uchar>(t1);
        }
1889

1890 1891 1892 1893
        for( ; x < size.width; x++ )
        {
            float t0 = CV_8TO32F(src1[x])*alpha + CV_8TO32F(src2[x])*beta + gamma;
            dst[x] = saturate_cast<uchar>(t0);
1894 1895 1896 1897
        }
    }
}

1898 1899
static void addWeighted8s( const schar* src1, size_t step1, const schar* src2, size_t step2,
                           schar* dst, size_t step, Size sz, void* scalars )
1900
{
1901
    addWeighted_<schar, float>(src1, step1, src2, step2, dst, step, sz, scalars);
1902 1903
}

1904 1905
static void addWeighted16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                            ushort* dst, size_t step, Size sz, void* scalars )
1906
{
1907 1908
    addWeighted_<ushort, float>(src1, step1, src2, step2, dst, step, sz, scalars);
}
1909

1910 1911
static void addWeighted16s( const short* src1, size_t step1, const short* src2, size_t step2,
                            short* dst, size_t step, Size sz, void* scalars )
1912
{
1913 1914
    addWeighted_<short, float>(src1, step1, src2, step2, dst, step, sz, scalars);
}
1915

1916 1917
static void addWeighted32s( const int* src1, size_t step1, const int* src2, size_t step2,
                            int* dst, size_t step, Size sz, void* scalars )
1918
{
1919
    addWeighted_<int, double>(src1, step1, src2, step2, dst, step, sz, scalars);
1920 1921
}

1922 1923
static void addWeighted32f( const float* src1, size_t step1, const float* src2, size_t step2,
                            float* dst, size_t step, Size sz, void* scalars )
1924
{
1925 1926
    addWeighted_<float, double>(src1, step1, src2, step2, dst, step, sz, scalars);
}
1927

1928 1929 1930 1931 1932
static void addWeighted64f( const double* src1, size_t step1, const double* src2, size_t step2,
                            double* dst, size_t step, Size sz, void* scalars )
{
    addWeighted_<double, double>(src1, step1, src2, step2, dst, step, sz, scalars);
}
V
Vadim Pisarevsky 已提交
1933

1934 1935 1936 1937 1938 1939
static BinaryFunc addWeightedTab[] =
{
    (BinaryFunc)addWeighted8u, (BinaryFunc)addWeighted8s, (BinaryFunc)addWeighted16u,
    (BinaryFunc)addWeighted16s, (BinaryFunc)addWeighted32s, (BinaryFunc)addWeighted32f,
    (BinaryFunc)addWeighted64f, 0
};
1940

1941
}
1942

1943
void cv::addWeighted( InputArray src1, double alpha, InputArray src2,
1944 1945 1946
                      double beta, double gamma, OutputArray dst, int dtype )
{
    double scalars[] = {alpha, beta, gamma};
1947
    arithm_op(src1, src2, dst, None(), dtype, addWeightedTab, true, scalars);
1948 1949
}

1950

1951
/****************************************************************************************\
1952
*                                          compare                                       *
1953 1954
\****************************************************************************************/

1955
namespace cv
1956 1957
{

1958 1959 1960
template<typename T> static void
cmp_(const T* src1, size_t step1, const T* src2, size_t step2,
     uchar* dst, size_t step, Size size, int code)
1961
{
1962 1963 1964
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    if( code == CMP_GE || code == CMP_LT )
1965
    {
1966 1967 1968
        std::swap(src1, src2);
        std::swap(step1, step2);
        code = code == CMP_GE ? CMP_LE : CMP_GT;
1969
    }
1970

1971
    if( code == CMP_GT || code == CMP_LE )
1972
    {
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
        int m = code == CMP_GT ? 0 : 255;
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
        {
            int x = 0;
            for( ; x <= size.width - 4; x += 4 )
            {
                int t0, t1;
                t0 = -(src1[x] > src2[x]) ^ m;
                t1 = -(src1[x+1] > src2[x+1]) ^ m;
                dst[x] = (uchar)t0; dst[x+1] = (uchar)t1;
                t0 = -(src1[x+2] > src2[x+2]) ^ m;
                t1 = -(src1[x+3] > src2[x+3]) ^ m;
                dst[x+2] = (uchar)t0; dst[x+3] = (uchar)t1;
            }
1987

1988 1989 1990
            for( ; x < size.width; x++ )
                dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m);
        }
1991
    }
1992
    else if( code == CMP_EQ || code == CMP_NE )
1993
    {
1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007
        int m = code == CMP_EQ ? 0 : 255;
        for( ; size.height--; src1 += step1, src2 += step2, dst += step )
        {
            int x = 0;
            for( ; x <= size.width - 4; x += 4 )
            {
                int t0, t1;
                t0 = -(src1[x] == src2[x]) ^ m;
                t1 = -(src1[x+1] == src2[x+1]) ^ m;
                dst[x] = (uchar)t0; dst[x+1] = (uchar)t1;
                t0 = -(src1[x+2] == src2[x+2]) ^ m;
                t1 = -(src1[x+3] == src2[x+3]) ^ m;
                dst[x+2] = (uchar)t0; dst[x+3] = (uchar)t1;
            }
2008

2009 2010 2011
            for( ; x < size.width; x++ )
                dst[x] = (uchar)(-(src1[x] == src2[x]) ^ m);
        }
2012
    }
2013
}
2014 2015


2016 2017
static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2018
{
2019
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
2020 2021
}

2022 2023
static void cmp8s(const schar* src1, size_t step1, const schar* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2024
{
2025
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
2026 2027
}

2028 2029
static void cmp16u(const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2030
{
2031
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
2032 2033
}

2034 2035
static void cmp16s(const short* src1, size_t step1, const short* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2036
{
2037
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
2038 2039
}

2040 2041 2042 2043 2044
static void cmp32s(const int* src1, size_t step1, const int* src2, size_t step2,
                   uchar* dst, size_t step, Size size, void* _cmpop)
{
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
}
2045

2046 2047
static void cmp32f(const float* src1, size_t step1, const float* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2048
{
2049 2050
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
}
2051

2052 2053
static void cmp64f(const double* src1, size_t step1, const double* src2, size_t step2,
                  uchar* dst, size_t step, Size size, void* _cmpop)
2054
{
2055
    cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop);
2056 2057
}

2058 2059 2060 2061 2062
static BinaryFunc cmpTab[] =
{
    (BinaryFunc)cmp8u, (BinaryFunc)cmp8s, (BinaryFunc)cmp16u,
    (BinaryFunc)cmp16s, (BinaryFunc)cmp32s, (BinaryFunc)cmp32f,
    (BinaryFunc)cmp64f, 0
2063 2064
};

2065

2066
static double getMinVal(int depth)
2067
{
2068 2069 2070
    static const double tab[] = {0, -128, 0, -32768, INT_MIN, -FLT_MAX, -DBL_MAX, 0};
    return tab[depth];
}
2071

2072
static double getMaxVal(int depth)
2073
{
2074 2075 2076
    static const double tab[] = {255, 127, 65535, 32767, INT_MAX, FLT_MAX, DBL_MAX, 0};
    return tab[depth];
}
2077 2078 2079

}

2080
void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op)
2081 2082 2083
{
    CV_Assert( op == CMP_LT || op == CMP_LE || op == CMP_EQ ||
               op == CMP_NE || op == CMP_GE || op == CMP_GT );
2084

2085 2086
    int kind1 = _src1.kind(), kind2 = _src2.kind();
    Mat src1 = _src1.getMat(), src2 = _src2.getMat();
2087

2088
    if( kind1 == kind2 && src1.dims <= 2 && src2.dims <= 2 && src1.size() == src2.size() && src1.type() == src2.type() )
2089
    {
2090 2091 2092 2093 2094 2095
        _dst.create(src1.size(), CV_8UC1);
        Mat dst = _dst.getMat();
        Size sz = getContinuousSize(src1, src2, dst, src1.channels());
        cmpTab[src1.depth()](src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, &op);
        return;
    }
2096

2097
    bool haveScalar = false;
2098

2099
    if( (kind1 == _InputArray::MATX) + (kind2 == _InputArray::MATX) == 1 ||
2100 2101 2102
        src1.size != src2.size || src1.type() != src2.type() )
    {
        if( checkScalar(src1, src2.type(), kind1, kind2) )
2103
        {
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
            // src1 is a scalar; swap it with src2
            swap(src1, src2);
            op = op == CMP_LT ? CMP_GT : op == CMP_LE ? CMP_GE :
                op == CMP_GE ? CMP_LE : op == CMP_GT ? CMP_LT : op;
        }
        else if( !checkScalar(src2, src1.type(), kind2, kind1) )
            CV_Error( CV_StsUnmatchedSizes,
                     "The operation is neither 'array op array' (where arrays have the same size and the same type), "
                     "nor 'array op scalar', nor 'scalar op array'" );
        haveScalar = true;
    }
2115

2116 2117 2118
    int cn = src1.channels(), depth1 = src1.depth(), depth2 = src2.depth();
    if( cn != 1 )
        CV_Error( CV_StsUnsupportedFormat, "compare() can only process single-channel arrays" );
2119

2120 2121
    size_t esz = src1.elemSize();
    size_t blocksize0 = (size_t)(BLOCK_SIZE + esz-1)/esz;
2122

2123 2124 2125
    _dst.create(src1.dims, src1.size, CV_8U);
    Mat dst = _dst.getMat();
    BinaryFunc func = cmpTab[depth1];
2126

2127
    if( !haveScalar )
2128
    {
2129 2130
        const Mat* arrays[] = { &src1, &src2, &dst, 0 };
        uchar* ptrs[3];
2131

2132 2133
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size;
2134

2135 2136
        for( size_t i = 0; i < it.nplanes; i++, ++it )
            func( ptrs[0], 0, ptrs[1], 0, ptrs[2], 0, Size((int)total, 1), &op );
2137
    }
2138
    else
2139
    {
2140 2141
        const Mat* arrays[] = { &src1, &dst, 0 };
        uchar* ptrs[2];
2142

2143 2144
        NAryMatIterator it(arrays, ptrs);
        size_t total = it.size, blocksize = std::min(total, blocksize0);
2145

2146 2147 2148 2149 2150 2151
        AutoBuffer<uchar> _buf(blocksize*esz);
        uchar *buf = _buf;

        if( depth1 > CV_32S )
            convertAndUnrollScalar( src2, depth1, buf, blocksize );
        else
2152
        {
2153 2154 2155 2156 2157 2158 2159
            double fval=0;
            getConvertFunc(depth2, CV_64F)(src2.data, 0, 0, 0, (uchar*)&fval, 0, Size(1,1), 0);
            if( fval < getMinVal(depth1) )
            {
                dst = Scalar::all(op == CMP_GT || op == CMP_GE || op == CMP_NE ? 255 : 0);
                return;
            }
2160

2161 2162 2163 2164 2165
            if( fval > getMaxVal(depth1) )
            {
                dst = Scalar::all(op == CMP_LT || op == CMP_LE || op == CMP_NE ? 255 : 0);
                return;
            }
2166

2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
            int ival = cvRound(fval);
            if( fval != ival )
            {
                if( op == CMP_LT || op == CMP_GE )
                    ival = cvCeil(fval);
                else if( op == CMP_LE || op == CMP_GT )
                    ival = cvFloor(fval);
                else
                {
                    dst = Scalar::all(op == CMP_NE ? 255 : 0);
                    return;
                }
            }
            convertAndUnrollScalar(Mat(1, 1, CV_32S, &ival), depth1, buf, blocksize);
        }
2182

2183
        for( size_t i = 0; i < it.nplanes; i++, ++it )
2184
        {
2185 2186 2187 2188 2189 2190 2191 2192
            for( size_t j = 0; j < total; j += blocksize )
            {
                int bsz = (int)std::min(total - j, blocksize);
                func( ptrs[0], 0, buf, 0, ptrs[1], 0, Size(bsz, 1), &op);
                ptrs[0] += bsz*esz;
                ptrs[1] += bsz;
            }
        }
2193
    }
2194
}
2195

2196 2197 2198
/****************************************************************************************\
*                                        inRange                                         *
\****************************************************************************************/
2199

2200 2201
namespace cv
{
2202

2203 2204 2205 2206 2207 2208 2209 2210
template<typename T> static void
inRange_(const T* src1, size_t step1, const T* src2, size_t step2,
         const T* src3, size_t step3, uchar* dst, size_t step,
         Size size)
{
    step1 /= sizeof(src1[0]);
    step2 /= sizeof(src2[0]);
    step3 /= sizeof(src3[0]);
2211

2212
    for( ; size.height--; src1 += step1, src2 += step2, src3 += step3, dst += step )
V
Vadim Pisarevsky 已提交
2213
    {
2214 2215
        int x = 0;
        for( ; x <= size.width - 4; x += 4 )
V
Vadim Pisarevsky 已提交
2216
        {
2217 2218 2219 2220 2221 2222 2223
            int t0, t1;
            t0 = src2[x] <= src1[x] && src1[x] <= src3[x];
            t1 = src2[x+1] <= src1[x+1] && src1[x+1] <= src3[x+1];
            dst[x] = (uchar)-t0; dst[x+1] = (uchar)-t1;
            t0 = src2[x+2] <= src1[x+2] && src1[x+2] <= src3[x+2];
            t1 = src2[x+3] <= src1[x+3] && src1[x+3] <= src3[x+3];
            dst[x+2] = (uchar)-t0; dst[x+3] = (uchar)-t1;
V
Vadim Pisarevsky 已提交
2224
        }
2225

2226 2227
        for( ; x < size.width; x++ )
            dst[x] = (uchar)-(src2[x] <= src1[x] && src1[x] <= src3[x]);
V
Vadim Pisarevsky 已提交
2228
    }
2229 2230
}

2231

2232 2233
static void inRange8u(const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                      const uchar* src3, size_t step3, uchar* dst, size_t step, Size size)
2234
{
2235 2236
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
}
2237

2238 2239
static void inRange8s(const schar* src1, size_t step1, const schar* src2, size_t step2,
                      const schar* src3, size_t step3, uchar* dst, size_t step, Size size)
2240
{
2241 2242
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
}
2243

2244 2245
static void inRange16u(const ushort* src1, size_t step1, const ushort* src2, size_t step2,
                       const ushort* src3, size_t step3, uchar* dst, size_t step, Size size)
2246
{
2247 2248
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
}
2249

2250 2251 2252 2253
static void inRange16s(const short* src1, size_t step1, const short* src2, size_t step2,
                       const short* src3, size_t step3, uchar* dst, size_t step, Size size)
{
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
2254 2255
}

2256 2257
static void inRange32s(const int* src1, size_t step1, const int* src2, size_t step2,
                       const int* src3, size_t step3, uchar* dst, size_t step, Size size)
2258
{
2259 2260
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
}
2261

2262 2263 2264 2265
static void inRange32f(const float* src1, size_t step1, const float* src2, size_t step2,
                       const float* src3, size_t step3, uchar* dst, size_t step, Size size)
{
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
2266 2267
}

2268 2269
static void inRange64f(const double* src1, size_t step1, const double* src2, size_t step2,
                       const double* src3, size_t step3, uchar* dst, size_t step, Size size)
2270
{
2271
    inRange_(src1, step1, src2, step2, src3, step3, dst, step, size);
2272
}
2273

2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
static void inRangeReduce(const uchar* src, uchar* dst, size_t len, int cn)
{
    int k = cn % 4 ? cn % 4 : 4;
    size_t i, j;
    if( k == 1 )
        for( i = j = 0; i < len; i++, j += cn )
            dst[i] = src[j];
    else if( k == 2 )
        for( i = j = 0; i < len; i++, j += cn )
            dst[i] = src[j] & src[j+1];
    else if( k == 3 )
        for( i = j = 0; i < len; i++, j += cn )
            dst[i] = src[j] & src[j+1] & src[j+2];
    else
        for( i = j = 0; i < len; i++, j += cn )
            dst[i] = src[j] & src[j+1] & src[j+2] & src[j+3];
2290

2291 2292 2293 2294
    for( ; k < cn; k += 4 )
    {
        for( i = 0, j = k; i < len; i++, j += cn )
            dst[i] &= src[j] & src[j+1] & src[j+2] & src[j+3];
V
Vadim Pisarevsky 已提交
2295
    }
2296
}
2297

2298 2299
typedef void (*InRangeFunc)( const uchar* src1, size_t step1, const uchar* src2, size_t step2,
                             const uchar* src3, size_t step3, uchar* dst, size_t step, Size sz );
2300

2301
static InRangeFunc inRangeTab[] =
2302
{
2303 2304 2305 2306
    (InRangeFunc)inRange8u, (InRangeFunc)inRange8s, (InRangeFunc)inRange16u,
    (InRangeFunc)inRange16s, (InRangeFunc)inRange32s, (InRangeFunc)inRange32f,
    (InRangeFunc)inRange64f, 0
};
2307

2308 2309
}

2310 2311
void cv::inRange(InputArray _src, InputArray _lowerb,
                 InputArray _upperb, OutputArray _dst)
2312 2313 2314
{
    int skind = _src.kind(), lkind = _lowerb.kind(), ukind = _upperb.kind();
    Mat src = _src.getMat(), lb = _lowerb.getMat(), ub = _upperb.getMat();
2315

2316
    bool lbScalar = false, ubScalar = false;
2317

2318
    if( (lkind == _InputArray::MATX && skind != _InputArray::MATX) ||
2319 2320 2321 2322 2323 2324 2325
        src.size != lb.size || src.type() != lb.type() )
    {
        if( !checkScalar(lb, src.type(), lkind, skind) )
            CV_Error( CV_StsUnmatchedSizes,
                     "The lower bounary is neither an array of the same size and same type as src, nor a scalar");
        lbScalar = true;
    }
2326

2327
    if( (ukind == _InputArray::MATX && skind != _InputArray::MATX) ||
2328 2329 2330 2331 2332 2333 2334
        src.size != ub.size || src.type() != ub.type() )
    {
        if( !checkScalar(ub, src.type(), ukind, skind) )
            CV_Error( CV_StsUnmatchedSizes,
                     "The upper bounary is neither an array of the same size and same type as src, nor a scalar");
        ubScalar = true;
    }
2335

2336
    CV_Assert( ((int)lbScalar ^ (int)ubScalar) == 0 );
2337

2338
    int cn = src.channels(), depth = src.depth();
2339

2340 2341
    size_t esz = src.elemSize();
    size_t blocksize0 = (size_t)(BLOCK_SIZE + esz-1)/esz;
2342

2343 2344 2345
    _dst.create(src.dims, src.size, CV_8U);
    Mat dst = _dst.getMat();
    InRangeFunc func = inRangeTab[depth];
2346

2347 2348 2349
    const Mat* arrays_sc[] = { &src, &dst, 0 };
    const Mat* arrays_nosc[] = { &src, &dst, &lb, &ub, 0 };
    uchar* ptrs[4];
2350

2351 2352
    NAryMatIterator it(lbScalar && ubScalar ? arrays_sc : arrays_nosc, ptrs);
    size_t total = it.size, blocksize = std::min(total, blocksize0);
2353

2354 2355 2356
    AutoBuffer<uchar> _buf(blocksize*(((int)lbScalar + (int)ubScalar)*esz + cn) + 2*cn*sizeof(int) + 128);
    uchar *buf = _buf, *mbuf = buf, *lbuf = 0, *ubuf = 0;
    buf = alignPtr(buf + blocksize*cn, 16);
2357

2358 2359 2360 2361
    if( lbScalar && ubScalar )
    {
        lbuf = buf;
        ubuf = buf = alignPtr(buf + blocksize*esz, 16);
2362

2363 2364
        CV_Assert( lb.type() == ub.type() );
        int scdepth = lb.depth();
2365

2366 2367 2368 2369
        if( scdepth != depth && depth < CV_32S )
        {
            int* ilbuf = (int*)alignPtr(buf + blocksize*esz, 16);
            int* iubuf = ilbuf + cn;
2370

2371 2372 2373
            BinaryFunc sccvtfunc = getConvertFunc(scdepth, CV_32S);
            sccvtfunc(lb.data, 0, 0, 0, (uchar*)ilbuf, 0, Size(cn, 1), 0);
            sccvtfunc(ub.data, 0, 0, 0, (uchar*)iubuf, 0, Size(cn, 1), 0);
2374
            int minval = cvRound(getMinVal(depth)), maxval = cvRound(getMaxVal(depth));
2375

2376 2377 2378 2379 2380 2381 2382 2383
            for( int k = 0; k < cn; k++ )
            {
                if( ilbuf[k] > iubuf[k] || ilbuf[k] > maxval || iubuf[k] < minval )
                    ilbuf[k] = minval+1, iubuf[k] = minval;
            }
            lb = Mat(cn, 1, CV_32S, ilbuf);
            ub = Mat(cn, 1, CV_32S, iubuf);
        }
2384

2385 2386 2387
        convertAndUnrollScalar( lb, src.type(), lbuf, blocksize );
        convertAndUnrollScalar( ub, src.type(), ubuf, blocksize );
    }
2388

2389
    for( size_t i = 0; i < it.nplanes; i++, ++it )
V
Vadim Pisarevsky 已提交
2390
    {
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412
        for( size_t j = 0; j < total; j += blocksize )
        {
            int bsz = (int)std::min(total - j, blocksize);
            size_t delta = bsz*esz;
            uchar *lptr = lbuf, *uptr = ubuf;
            if( !lbScalar )
            {
                lptr = ptrs[2];
                ptrs[2] += delta;
            }
            if( !ubScalar )
            {
                int idx = !lbScalar ? 3 : 2;
                uptr = ptrs[idx];
                ptrs[idx] += delta;
            }
            func( ptrs[0], 0, lptr, 0, uptr, 0, cn == 1 ? ptrs[1] : mbuf, 0, Size(bsz*cn, 1));
            if( cn > 1 )
                inRangeReduce(mbuf, ptrs[1], bsz, cn);
            ptrs[0] += delta;
            ptrs[1] += bsz;
        }
V
Vadim Pisarevsky 已提交
2413
    }
2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
}

/****************************************************************************************\
*                                Earlier API: cvAdd etc.                                 *
\****************************************************************************************/

CV_IMPL void
cvNot( const CvArr* srcarr, CvArr* dstarr )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2424
    CV_Assert( src.size == dst.size && src.type() == dst.type() );
2425 2426 2427 2428 2429 2430 2431 2432 2433
    cv::bitwise_not( src, dst );
}


CV_IMPL void
cvAnd( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
2434
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
2435 2436 2437 2438 2439
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
    cv::bitwise_and( src1, src2, dst, mask );
}

2440

2441 2442 2443 2444 2445
CV_IMPL void
cvOr( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
2446
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
    cv::bitwise_or( src1, src2, dst, mask );
}


CV_IMPL void
cvXor( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
2458
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
    cv::bitwise_xor( src1, src2, dst, mask );
}


CV_IMPL void
cvAndS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
2469
    CV_Assert( src.size == dst.size && src.type() == dst.type() );
2470 2471
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
2472
    cv::bitwise_and( src, (const cv::Scalar&)s, dst, mask );
2473 2474 2475 2476 2477 2478 2479
}


CV_IMPL void
cvOrS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
2480
    CV_Assert( src.size == dst.size && src.type() == dst.type() );
2481 2482
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
2483
    cv::bitwise_or( src, (const cv::Scalar&)s, dst, mask );
2484 2485 2486 2487 2488 2489 2490
}


CV_IMPL void
cvXorS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
2491
    CV_Assert( src.size == dst.size && src.type() == dst.type() );
2492 2493
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
2494
    cv::bitwise_xor( src, (const cv::Scalar&)s, dst, mask );
2495 2496
}

2497

2498 2499 2500 2501
CV_IMPL void cvAdd( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
2502
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
2503 2504
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
2505
    cv::add( src1, src2, dst, mask, dst.type() );
2506 2507
}

2508

2509 2510 2511 2512
CV_IMPL void cvSub( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
2513
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
2514 2515
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
2516
    cv::subtract( src1, src2, dst, mask, dst.type() );
2517 2518
}

2519

2520 2521 2522 2523
CV_IMPL void cvAddS( const CvArr* srcarr1, CvScalar value, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1),
        dst = cv::cvarrToMat(dstarr), mask;
2524
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
2525 2526
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
2527
    cv::add( src1, (const cv::Scalar&)value, dst, mask, dst.type() );
2528 2529
}

2530

2531 2532 2533 2534
CV_IMPL void cvSubRS( const CvArr* srcarr1, CvScalar value, CvArr* dstarr, const CvArr* maskarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1),
        dst = cv::cvarrToMat(dstarr), mask;
2535
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
2536 2537
    if( maskarr )
        mask = cv::cvarrToMat(maskarr);
2538
    cv::subtract( (const cv::Scalar&)value, src1, dst, mask, dst.type() );
2539 2540
}

2541

2542 2543 2544 2545 2546
CV_IMPL void cvMul( const CvArr* srcarr1, const CvArr* srcarr2,
                    CvArr* dstarr, double scale )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr);
2547 2548
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
    cv::multiply( src1, src2, dst, scale, dst.type() );
2549 2550
}

2551

2552 2553 2554 2555 2556
CV_IMPL void cvDiv( const CvArr* srcarr1, const CvArr* srcarr2,
                    CvArr* dstarr, double scale )
{
    cv::Mat src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr), mask;
2557
    CV_Assert( src2.size == dst.size && src2.channels() == dst.channels() );
2558 2559

    if( srcarr1 )
2560
        cv::divide( cv::cvarrToMat(srcarr1), src2, dst, scale, dst.type() );
2561
    else
2562
        cv::divide( scale, src2, dst, dst.type() );
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572
}


CV_IMPL void
cvAddWeighted( const CvArr* srcarr1, double alpha,
               const CvArr* srcarr2, double beta,
               double gamma, CvArr* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
        dst = cv::cvarrToMat(dstarr);
2573 2574
    CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() );
    cv::addWeighted( src1, alpha, src2, beta, gamma, dst, dst.type() );
2575 2576 2577 2578 2579 2580 2581
}


CV_IMPL  void
cvAbsDiff( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2582
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
2583 2584 2585 2586 2587 2588 2589 2590 2591

    cv::absdiff( src1, cv::cvarrToMat(srcarr2), dst );
}


CV_IMPL void
cvAbsDiffS( const CvArr* srcarr1, CvArr* dstarr, CvScalar scalar )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2592
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
2593

2594
    cv::absdiff( src1, (const cv::Scalar&)scalar, dst );
2595 2596
}

2597

2598 2599 2600 2601 2602
CV_IMPL void
cvInRange( const void* srcarr1, const void* srcarr2,
           const void* srcarr3, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2603
    CV_Assert( src1.size == dst.size && dst.type() == CV_8U );
2604 2605 2606 2607

    cv::inRange( src1, cv::cvarrToMat(srcarr2), cv::cvarrToMat(srcarr3), dst );
}

2608

2609 2610 2611 2612
CV_IMPL void
cvInRangeS( const void* srcarr1, CvScalar lowerb, CvScalar upperb, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2613
    CV_Assert( src1.size == dst.size && dst.type() == CV_8U );
2614

2615
    cv::inRange( src1, (const cv::Scalar&)lowerb, (const cv::Scalar&)upperb, dst );
2616 2617 2618 2619 2620 2621 2622
}


CV_IMPL void
cvCmp( const void* srcarr1, const void* srcarr2, void* dstarr, int cmp_op )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2623
    CV_Assert( src1.size == dst.size && dst.type() == CV_8U );
2624 2625 2626 2627 2628 2629 2630 2631 2632

    cv::compare( src1, cv::cvarrToMat(srcarr2), dst, cmp_op );
}


CV_IMPL void
cvCmpS( const void* srcarr1, double value, void* dstarr, int cmp_op )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2633
    CV_Assert( src1.size == dst.size && dst.type() == CV_8U );
2634 2635 2636 2637 2638 2639 2640 2641 2642

    cv::compare( src1, value, dst, cmp_op );
}


CV_IMPL void
cvMin( const void* srcarr1, const void* srcarr2, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2643
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
2644 2645 2646 2647 2648 2649 2650 2651 2652

    cv::min( src1, cv::cvarrToMat(srcarr2), dst );
}


CV_IMPL void
cvMax( const void* srcarr1, const void* srcarr2, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2653
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
2654 2655 2656 2657

    cv::max( src1, cv::cvarrToMat(srcarr2), dst );
}

2658

2659 2660 2661 2662
CV_IMPL void
cvMinS( const void* srcarr1, double value, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2663
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
2664 2665 2666 2667 2668 2669 2670 2671 2672

    cv::min( src1, value, dst );
}


CV_IMPL void
cvMaxS( const void* srcarr1, double value, void* dstarr )
{
    cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2673
    CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
2674 2675 2676 2677 2678

    cv::max( src1, value, dst );
}

/* End of file. */