提交 b88323af 编写于 作者: A Andrey Kamaev

Merge pull request #91 from taka-no-me/warnings/windows

......@@ -95,6 +95,8 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4244) # vs2008
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4267 /wd4305 /wd4306) # vs2008 Win64
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4703) # vs2012
ocv_warnings_disable(CMAKE_C_FLAGS /wd4267 /wd4244 /wd4018)
if(UNIX AND (CMAKE_COMPILER_IS_GNUCXX OR CV_ICC))
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
endif()
......
......@@ -806,6 +806,7 @@ struct Mutex::Impl
int refcount;
};
#ifndef __GNUC__
int _interlockedExchangeAdd(int* addr, int delta)
{
#if defined _MSC_VER && _MSC_VER >= 1500
......@@ -814,6 +815,7 @@ int _interlockedExchangeAdd(int* addr, int delta)
return (int)InterlockedExchangeAdd((long volatile*)addr, delta);
#endif
}
#endif // __GNUC__
#elif defined __APPLE__
......
......@@ -44,6 +44,10 @@ The references are:
#include "precomp.hpp"
#include "fast_score.hpp"
#if defined _MSC_VER
# pragma warning( disable : 4127)
#endif
namespace cv
{
......
......@@ -120,10 +120,14 @@ PERF_TEST_P( TestWarpPerspectiveNear_t, WarpPerspectiveNear,
resize(src, src, size);
int shift = src.cols*0.04;
Mat srcVertices = (Mat_<Vec2f>(1, 4) << Vec2f(0, 0), Vec2f(size.width-1, 0),
Vec2f(size.width-1, size.height-1), Vec2f(0, size.height-1));
Mat dstVertices = (Mat_<Vec2f>(1, 4) << Vec2f(0, shift), Vec2f(size.width-shift/2, 0),
Vec2f(size.width-shift, size.height-shift), Vec2f(shift/2, size.height-1));
Mat srcVertices = (Mat_<Vec2f>(1, 4) << Vec2f(0, 0),
Vec2f(static_cast<float>(size.width-1), 0),
Vec2f(static_cast<float>(size.width-1), static_cast<float>(size.height-1)),
Vec2f(0, static_cast<float>(size.height-1)));
Mat dstVertices = (Mat_<Vec2f>(1, 4) << Vec2f(0, static_cast<float>(shift)),
Vec2f(static_cast<float>(size.width-shift/2), 0),
Vec2f(static_cast<float>(size.width-shift), static_cast<float>(size.height-shift)),
Vec2f(static_cast<float>(shift/2), static_cast<float>(size.height-1)));
Mat warpMat = getPerspectiveTransform(srcVertices, dstVertices);
Mat dst(size, type);
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
/*======================= KALMAN FILTER AS TRACKER =========================*/
/* State vector is (x,y,w,h,dx,dy,dw,dh). */
/* Measurement is (x,y,w,h) */
/* Dynamic matrix A: */
const float A8[] = { 1, 0, 0, 0, 1, 0, 0, 0,
0, 1, 0, 0, 0, 1, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1};
/* Measurement matrix H: */
const float H8[] = { 1, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0};
/* Matices for zero size velocity: */
/* Dynamic matrix A: */
const float A6[] = { 1, 0, 0, 0, 1, 0,
0, 1, 0, 0, 0, 1,
0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1};
/* Measurement matrix H: */
const float H6[] = { 1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 0, 0};
#define STATE_NUM 6
#define A A6
#define H H6
class CvBlobTrackerOneKalman:public CvBlobTrackerOne
{
private:
CvBlob m_Blob;
CvKalman* m_pKalman;
int m_Frame;
public:
CvBlobTrackerOneKalman()
{
m_Frame = 0;
m_pKalman = cvCreateKalman(STATE_NUM,4);
memcpy( m_pKalman->transition_matrix->data.fl, A, sizeof(A));
memcpy( m_pKalman->measurement_matrix->data.fl, H, sizeof(H));
cvSetIdentity( m_pKalman->process_noise_cov, cvRealScalar(1e-5) );
cvSetIdentity( m_pKalman->measurement_noise_cov, cvRealScalar(1e-1) );
// CV_MAT_ELEM(*m_pKalman->measurement_noise_cov, float, 2,2) *= (float)pow(20,2);
// CV_MAT_ELEM(*m_pKalman->measurement_noise_cov, float, 3,3) *= (float)pow(20,2);
cvSetIdentity( m_pKalman->error_cov_post, cvRealScalar(1));
cvZero(m_pKalman->state_post);
cvZero(m_pKalman->state_pre);
SetModuleName("Kalman");
}
~CvBlobTrackerOneKalman()
{
cvReleaseKalman(&m_pKalman);
}
virtual void Init(CvBlob* pBlob, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL)
{
m_Blob = pBlob[0];
m_pKalman->state_post->data.fl[0] = CV_BLOB_X(pBlob);
m_pKalman->state_post->data.fl[1] = CV_BLOB_Y(pBlob);
m_pKalman->state_post->data.fl[2] = CV_BLOB_WX(pBlob);
m_pKalman->state_post->data.fl[3] = CV_BLOB_WY(pBlob);
}
virtual CvBlob* Process(CvBlob* pBlob, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL)
{
CvBlob* pBlobRes = &m_Blob;
float Z[4];
CvMat Zmat = cvMat(4,1,CV_32F,Z);
m_Blob = pBlob[0];
if(m_Frame < 2)
{ /* First call: */
m_pKalman->state_post->data.fl[0+4] = CV_BLOB_X(pBlob)-m_pKalman->state_post->data.fl[0];
m_pKalman->state_post->data.fl[1+4] = CV_BLOB_Y(pBlob)-m_pKalman->state_post->data.fl[1];
if(m_pKalman->DP>6)
{
m_pKalman->state_post->data.fl[2+4] = CV_BLOB_WX(pBlob)-m_pKalman->state_post->data.fl[2];
m_pKalman->state_post->data.fl[3+4] = CV_BLOB_WY(pBlob)-m_pKalman->state_post->data.fl[3];
}
m_pKalman->state_post->data.fl[0] = CV_BLOB_X(pBlob);
m_pKalman->state_post->data.fl[1] = CV_BLOB_Y(pBlob);
m_pKalman->state_post->data.fl[2] = CV_BLOB_WX(pBlob);
m_pKalman->state_post->data.fl[3] = CV_BLOB_WY(pBlob);
memcpy(m_pKalman->state_pre->data.fl,m_pKalman->state_post->data.fl,sizeof(float)*STATE_NUM);
}
else
{ /* Another call: */
Z[0] = CV_BLOB_X(pBlob);
Z[1] = CV_BLOB_Y(pBlob);
Z[2] = CV_BLOB_WX(pBlob);
Z[3] = CV_BLOB_WY(pBlob);
cvKalmanCorrect(m_pKalman,&Zmat);
cvKalmanPredict(m_pKalman,0);
cvMatMulAdd(m_pKalman->measurement_matrix, m_pKalman->state_pre, NULL, &Zmat);
CV_BLOB_X(pBlobRes) = Z[0];
CV_BLOB_Y(pBlobRes) = Z[1];
CV_BLOB_WX(pBlobRes) = Z[2];
CV_BLOB_WY(pBlobRes) = Z[3];
}
m_Frame++;
return pBlobRes;
}
virtual void Release()
{
delete this;
}
}; /* class CvBlobTrackerOneKalman */
#if 0
static CvBlobTrackerOne* cvCreateModuleBlobTrackerOneKalman()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneKalman;
}
CvBlobTracker* cvCreateBlobTrackerKalman()
{
return cvCreateBlobTrackerList(cvCreateModuleBlobTrackerOneKalman);
}
#endif
......@@ -54,6 +54,9 @@
#if CV_AVX
# define CV_HAAR_USE_AVX 1
# if defined _MSC_VER
# pragma warning( disable : 4752 )
# endif
#else
# if CV_SSE2 || CV_SSE3
# define CV_HAAR_USE_SSE 1
......@@ -412,6 +415,9 @@ icvCreateHidHaarClassifierCascade( CvHaarClassifierCascade* cascade )
#define calc_sum(rect,offset) \
((rect).p0[offset] - (rect).p1[offset] - (rect).p2[offset] + (rect).p3[offset])
#define calc_sumf(rect,offset) \
static_cast<float>((rect).p0[offset] - (rect).p1[offset] - (rect).p2[offset] + (rect).p3[offset])
CV_IMPL void
cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* _cascade,
......@@ -652,7 +658,7 @@ double icvEvalHidHaarClassifierAVX( CvHidHaarClassifier* classifier,
nodes[6] = (classifier+6)->node + idxV[6];
nodes[7] = (classifier+7)->node + idxV[7];
__m256 t = _mm256_set1_ps(variance_norm_factor);
__m256 t = _mm256_set1_ps(static_cast<float>(variance_norm_factor));
t = _mm256_mul_ps(t, _mm256_set_ps(nodes[7]->threshold,
nodes[6]->threshold,
......@@ -663,14 +669,14 @@ double icvEvalHidHaarClassifierAVX( CvHidHaarClassifier* classifier,
nodes[1]->threshold,
nodes[0]->threshold));
__m256 offset = _mm256_set_ps(calc_sum(nodes[7]->feature.rect[0], p_offset),
calc_sum(nodes[6]->feature.rect[0], p_offset),
calc_sum(nodes[5]->feature.rect[0], p_offset),
calc_sum(nodes[4]->feature.rect[0], p_offset),
calc_sum(nodes[3]->feature.rect[0], p_offset),
calc_sum(nodes[2]->feature.rect[0], p_offset),
calc_sum(nodes[1]->feature.rect[0], p_offset),
calc_sum(nodes[0]->feature.rect[0], p_offset));
__m256 offset = _mm256_set_ps(calc_sumf(nodes[7]->feature.rect[0], p_offset),
calc_sumf(nodes[6]->feature.rect[0], p_offset),
calc_sumf(nodes[5]->feature.rect[0], p_offset),
calc_sumf(nodes[4]->feature.rect[0], p_offset),
calc_sumf(nodes[3]->feature.rect[0], p_offset),
calc_sumf(nodes[2]->feature.rect[0], p_offset),
calc_sumf(nodes[1]->feature.rect[0], p_offset),
calc_sumf(nodes[0]->feature.rect[0], p_offset));
__m256 weight = _mm256_set_ps(nodes[7]->feature.rect[0].weight,
nodes[6]->feature.rect[0].weight,
......@@ -683,14 +689,14 @@ double icvEvalHidHaarClassifierAVX( CvHidHaarClassifier* classifier,
__m256 sum = _mm256_mul_ps(offset, weight);
offset = _mm256_set_ps(calc_sum(nodes[7]->feature.rect[1], p_offset),
calc_sum(nodes[6]->feature.rect[1], p_offset),
calc_sum(nodes[5]->feature.rect[1], p_offset),
calc_sum(nodes[4]->feature.rect[1], p_offset),
calc_sum(nodes[3]->feature.rect[1], p_offset),
calc_sum(nodes[2]->feature.rect[1], p_offset),
calc_sum(nodes[1]->feature.rect[1], p_offset),
calc_sum(nodes[0]->feature.rect[1], p_offset));
offset = _mm256_set_ps(calc_sumf(nodes[7]->feature.rect[1], p_offset),
calc_sumf(nodes[6]->feature.rect[1], p_offset),
calc_sumf(nodes[5]->feature.rect[1], p_offset),
calc_sumf(nodes[4]->feature.rect[1], p_offset),
calc_sumf(nodes[3]->feature.rect[1], p_offset),
calc_sumf(nodes[2]->feature.rect[1], p_offset),
calc_sumf(nodes[1]->feature.rect[1], p_offset),
calc_sumf(nodes[0]->feature.rect[1], p_offset));
weight = _mm256_set_ps(nodes[7]->feature.rect[1].weight,
nodes[6]->feature.rect[1].weight,
......@@ -704,21 +710,21 @@ double icvEvalHidHaarClassifierAVX( CvHidHaarClassifier* classifier,
sum = _mm256_add_ps(sum, _mm256_mul_ps(offset, weight));
if( nodes[0]->feature.rect[2].p0 )
tmp[0] = calc_sum(nodes[0]->feature.rect[2], p_offset) * nodes[0]->feature.rect[2].weight;
tmp[0] = calc_sumf(nodes[0]->feature.rect[2], p_offset) * nodes[0]->feature.rect[2].weight;
if( nodes[1]->feature.rect[2].p0 )
tmp[1] = calc_sum(nodes[1]->feature.rect[2], p_offset) * nodes[1]->feature.rect[2].weight;
tmp[1] = calc_sumf(nodes[1]->feature.rect[2], p_offset) * nodes[1]->feature.rect[2].weight;
if( nodes[2]->feature.rect[2].p0 )
tmp[2] = calc_sum(nodes[2]->feature.rect[2], p_offset) * nodes[2]->feature.rect[2].weight;
tmp[2] = calc_sumf(nodes[2]->feature.rect[2], p_offset) * nodes[2]->feature.rect[2].weight;
if( nodes[3]->feature.rect[2].p0 )
tmp[3] = calc_sum(nodes[3]->feature.rect[2], p_offset) * nodes[3]->feature.rect[2].weight;
tmp[3] = calc_sumf(nodes[3]->feature.rect[2], p_offset) * nodes[3]->feature.rect[2].weight;
if( nodes[4]->feature.rect[2].p0 )
tmp[4] = calc_sum(nodes[4]->feature.rect[2], p_offset) * nodes[4]->feature.rect[2].weight;
tmp[4] = calc_sumf(nodes[4]->feature.rect[2], p_offset) * nodes[4]->feature.rect[2].weight;
if( nodes[5]->feature.rect[2].p0 )
tmp[5] = calc_sum(nodes[5]->feature.rect[2], p_offset) * nodes[5]->feature.rect[2].weight;
tmp[5] = calc_sumf(nodes[5]->feature.rect[2], p_offset) * nodes[5]->feature.rect[2].weight;
if( nodes[6]->feature.rect[2].p0 )
tmp[6] = calc_sum(nodes[6]->feature.rect[2], p_offset) * nodes[6]->feature.rect[2].weight;
tmp[6] = calc_sumf(nodes[6]->feature.rect[2], p_offset) * nodes[6]->feature.rect[2].weight;
if( nodes[7]->feature.rect[2].p0 )
tmp[7] = calc_sum(nodes[7]->feature.rect[2], p_offset) * nodes[7]->feature.rect[2].weight;
tmp[7] = calc_sumf(nodes[7]->feature.rect[2], p_offset) * nodes[7]->feature.rect[2].weight;
sum = _mm256_add_ps(sum,_mm256_load_ps(tmp));
......@@ -918,7 +924,7 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
classifiers[7] = cascade->stage_classifier[i].classifier + j + 7;
nodes[7] = classifiers[7]->node;
__m256 t = _mm256_set1_ps(variance_norm_factor);
__m256 t = _mm256_set1_ps(static_cast<float>(variance_norm_factor));
t = _mm256_mul_ps(t, _mm256_set_ps(nodes[7]->threshold,
nodes[6]->threshold,
nodes[5]->threshold,
......@@ -928,14 +934,14 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
nodes[1]->threshold,
nodes[0]->threshold));
__m256 offset = _mm256_set_ps(calc_sum(nodes[7]->feature.rect[0], p_offset),
calc_sum(nodes[6]->feature.rect[0], p_offset),
calc_sum(nodes[5]->feature.rect[0], p_offset),
calc_sum(nodes[4]->feature.rect[0], p_offset),
calc_sum(nodes[3]->feature.rect[0], p_offset),
calc_sum(nodes[2]->feature.rect[0], p_offset),
calc_sum(nodes[1]->feature.rect[0], p_offset),
calc_sum(nodes[0]->feature.rect[0], p_offset));
__m256 offset = _mm256_set_ps(calc_sumf(nodes[7]->feature.rect[0], p_offset),
calc_sumf(nodes[6]->feature.rect[0], p_offset),
calc_sumf(nodes[5]->feature.rect[0], p_offset),
calc_sumf(nodes[4]->feature.rect[0], p_offset),
calc_sumf(nodes[3]->feature.rect[0], p_offset),
calc_sumf(nodes[2]->feature.rect[0], p_offset),
calc_sumf(nodes[1]->feature.rect[0], p_offset),
calc_sumf(nodes[0]->feature.rect[0], p_offset));
__m256 weight = _mm256_set_ps(nodes[7]->feature.rect[0].weight,
nodes[6]->feature.rect[0].weight,
......@@ -948,14 +954,14 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
__m256 sum = _mm256_mul_ps(offset, weight);
offset = _mm256_set_ps(calc_sum(nodes[7]->feature.rect[1], p_offset),
calc_sum(nodes[6]->feature.rect[1], p_offset),
calc_sum(nodes[5]->feature.rect[1], p_offset),
calc_sum(nodes[4]->feature.rect[1], p_offset),
calc_sum(nodes[3]->feature.rect[1], p_offset),
calc_sum(nodes[2]->feature.rect[1], p_offset),
calc_sum(nodes[1]->feature.rect[1], p_offset),
calc_sum(nodes[0]->feature.rect[1], p_offset));
offset = _mm256_set_ps(calc_sumf(nodes[7]->feature.rect[1], p_offset),
calc_sumf(nodes[6]->feature.rect[1], p_offset),
calc_sumf(nodes[5]->feature.rect[1], p_offset),
calc_sumf(nodes[4]->feature.rect[1], p_offset),
calc_sumf(nodes[3]->feature.rect[1], p_offset),
calc_sumf(nodes[2]->feature.rect[1], p_offset),
calc_sumf(nodes[1]->feature.rect[1], p_offset),
calc_sumf(nodes[0]->feature.rect[1], p_offset));
weight = _mm256_set_ps(nodes[7]->feature.rect[1].weight,
nodes[6]->feature.rect[1].weight,
......@@ -1023,7 +1029,7 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
classifiers[7] = cascade->stage_classifier[i].classifier + j + 7;
nodes[7] = classifiers[7]->node;
__m256 t = _mm256_set1_ps(variance_norm_factor);
__m256 t = _mm256_set1_ps(static_cast<float>(variance_norm_factor));
t = _mm256_mul_ps(t, _mm256_set_ps(nodes[7]->threshold,
nodes[6]->threshold,
......@@ -1034,14 +1040,14 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
nodes[1]->threshold,
nodes[0]->threshold));
__m256 offset = _mm256_set_ps(calc_sum(nodes[7]->feature.rect[0], p_offset),
calc_sum(nodes[6]->feature.rect[0], p_offset),
calc_sum(nodes[5]->feature.rect[0], p_offset),
calc_sum(nodes[4]->feature.rect[0], p_offset),
calc_sum(nodes[3]->feature.rect[0], p_offset),
calc_sum(nodes[2]->feature.rect[0], p_offset),
calc_sum(nodes[1]->feature.rect[0], p_offset),
calc_sum(nodes[0]->feature.rect[0], p_offset));
__m256 offset = _mm256_set_ps(calc_sumf(nodes[7]->feature.rect[0], p_offset),
calc_sumf(nodes[6]->feature.rect[0], p_offset),
calc_sumf(nodes[5]->feature.rect[0], p_offset),
calc_sumf(nodes[4]->feature.rect[0], p_offset),
calc_sumf(nodes[3]->feature.rect[0], p_offset),
calc_sumf(nodes[2]->feature.rect[0], p_offset),
calc_sumf(nodes[1]->feature.rect[0], p_offset),
calc_sumf(nodes[0]->feature.rect[0], p_offset));
__m256 weight = _mm256_set_ps(nodes[7]->feature.rect[0].weight,
nodes[6]->feature.rect[0].weight,
......@@ -1054,14 +1060,14 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
__m256 sum = _mm256_mul_ps(offset, weight);
offset = _mm256_set_ps(calc_sum(nodes[7]->feature.rect[1], p_offset),
calc_sum(nodes[6]->feature.rect[1], p_offset),
calc_sum(nodes[5]->feature.rect[1], p_offset),
calc_sum(nodes[4]->feature.rect[1], p_offset),
calc_sum(nodes[3]->feature.rect[1], p_offset),
calc_sum(nodes[2]->feature.rect[1], p_offset),
calc_sum(nodes[1]->feature.rect[1], p_offset),
calc_sum(nodes[0]->feature.rect[1], p_offset));
offset = _mm256_set_ps(calc_sumf(nodes[7]->feature.rect[1], p_offset),
calc_sumf(nodes[6]->feature.rect[1], p_offset),
calc_sumf(nodes[5]->feature.rect[1], p_offset),
calc_sumf(nodes[4]->feature.rect[1], p_offset),
calc_sumf(nodes[3]->feature.rect[1], p_offset),
calc_sumf(nodes[2]->feature.rect[1], p_offset),
calc_sumf(nodes[1]->feature.rect[1], p_offset),
calc_sumf(nodes[0]->feature.rect[1], p_offset));
weight = _mm256_set_ps(nodes[7]->feature.rect[1].weight,
nodes[6]->feature.rect[1].weight,
......@@ -1075,21 +1081,21 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
sum = _mm256_add_ps(sum, _mm256_mul_ps(offset, weight));
if( nodes[0]->feature.rect[2].p0 )
tmp[0] = calc_sum(nodes[0]->feature.rect[2],p_offset) * nodes[0]->feature.rect[2].weight;
tmp[0] = calc_sumf(nodes[0]->feature.rect[2],p_offset) * nodes[0]->feature.rect[2].weight;
if( nodes[1]->feature.rect[2].p0 )
tmp[1] = calc_sum(nodes[1]->feature.rect[2],p_offset) * nodes[1]->feature.rect[2].weight;
tmp[1] = calc_sumf(nodes[1]->feature.rect[2],p_offset) * nodes[1]->feature.rect[2].weight;
if( nodes[2]->feature.rect[2].p0 )
tmp[2] = calc_sum(nodes[2]->feature.rect[2],p_offset) * nodes[2]->feature.rect[2].weight;
tmp[2] = calc_sumf(nodes[2]->feature.rect[2],p_offset) * nodes[2]->feature.rect[2].weight;
if( nodes[3]->feature.rect[2].p0 )
tmp[3] = calc_sum(nodes[3]->feature.rect[2],p_offset) * nodes[3]->feature.rect[2].weight;
tmp[3] = calc_sumf(nodes[3]->feature.rect[2],p_offset) * nodes[3]->feature.rect[2].weight;
if( nodes[4]->feature.rect[2].p0 )
tmp[4] = calc_sum(nodes[4]->feature.rect[2],p_offset) * nodes[4]->feature.rect[2].weight;
tmp[4] = calc_sumf(nodes[4]->feature.rect[2],p_offset) * nodes[4]->feature.rect[2].weight;
if( nodes[5]->feature.rect[2].p0 )
tmp[5] = calc_sum(nodes[5]->feature.rect[2],p_offset) * nodes[5]->feature.rect[2].weight;
tmp[5] = calc_sumf(nodes[5]->feature.rect[2],p_offset) * nodes[5]->feature.rect[2].weight;
if( nodes[6]->feature.rect[2].p0 )
tmp[6] = calc_sum(nodes[6]->feature.rect[2],p_offset) * nodes[6]->feature.rect[2].weight;
tmp[6] = calc_sumf(nodes[6]->feature.rect[2],p_offset) * nodes[6]->feature.rect[2].weight;
if( nodes[7]->feature.rect[2].p0 )
tmp[7] = calc_sum(nodes[7]->feature.rect[2],p_offset) * nodes[7]->feature.rect[2].weight;
tmp[7] = calc_sumf(nodes[7]->feature.rect[2],p_offset) * nodes[7]->feature.rect[2].weight;
sum = _mm256_add_ps(sum, _mm256_load_ps(tmp));
......
......@@ -628,7 +628,7 @@ bool DpSeamFinder::getSeamTips(int comp1, int comp2, Point &p1, Point &p2)
{
for (int j = i+1; j < nlabels; ++j)
{
double size1 = points[i].size(), size2 = points[j].size();
double size1 = static_cast<double>(points[i].size()), size2 = static_cast<double>(points[j].size());
double cx1 = cvRound(sum[i].x / size1), cy1 = cvRound(sum[i].y / size1);
double cx2 = cvRound(sum[j].x / size2), cy2 = cvRound(sum[j].y / size1);
......@@ -648,7 +648,7 @@ bool DpSeamFinder::getSeamTips(int comp1, int comp2, Point &p1, Point &p2)
for (int i = 0; i < 2; ++i)
{
double size = points[idx[i]].size();
double size = static_cast<double>(points[idx[i]].size());
double cx = cvRound(sum[idx[i]].x / size);
double cy = cvRound(sum[idx[i]].y / size);
......@@ -1036,7 +1036,7 @@ void DpSeamFinder::updateLabelsUsingSeam(
for (map<int, int>::iterator itr = connect2.begin(); itr != connect2.end(); ++itr)
{
double len = contours_[comp1].size();
double len = static_cast<double>(contours_[comp1].size());
isAdjComp[itr->first] = itr->second / len > 0.05 && connectOther.find(itr->first)->second / len < 0.1;
}
......
......@@ -6352,7 +6352,9 @@ namespace internal {
// Valid only for fast death tests. Indicates the code is running in the
// child process of a fast style death test.
# if !GTEST_OS_WINDOWS
static bool g_in_fast_death_test_child = false;
# endif
// Returns a Boolean value indicating whether the caller is currently
// executing in the context of the death test child process. Tools such as
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册