提交 aebffb71 编写于 作者: A Andrey Kamaev 提交者: OpenCV Buildbot

Merge pull request #726 from jet47:fix-pvs-studio-warnings

......@@ -459,14 +459,14 @@ void CV_StereoMatchingTest::run(int)
continue;
}
int dispScaleFactor = datasetsParams[datasetName].dispScaleFactor;
Mat tmp; trueLeftDisp.convertTo( tmp, CV_32FC1, 1.f/dispScaleFactor ); trueLeftDisp = tmp; tmp.release();
trueLeftDisp.convertTo( trueLeftDisp, CV_32FC1, 1.f/dispScaleFactor );
if( !trueRightDisp.empty() )
trueRightDisp.convertTo( tmp, CV_32FC1, 1.f/dispScaleFactor ); trueRightDisp = tmp; tmp.release();
trueRightDisp.convertTo( trueRightDisp, CV_32FC1, 1.f/dispScaleFactor );
Mat leftDisp, rightDisp;
int ignBorder = max(runStereoMatchingAlgorithm(leftImg, rightImg, leftDisp, rightDisp, ci), EVAL_IGNORE_BORDER);
leftDisp.convertTo( tmp, CV_32FC1 ); leftDisp = tmp; tmp.release();
rightDisp.convertTo( tmp, CV_32FC1 ); rightDisp = tmp; tmp.release();
leftDisp.convertTo( leftDisp, CV_32FC1 );
rightDisp.convertTo( rightDisp, CV_32FC1 );
int tempCode = processStereoMatchingResults( resFS, ci, isWrite,
leftImg, rightImg, trueLeftDisp, trueRightDisp, leftDisp, rightDisp, QualityEvalParams(ignBorder));
......
......@@ -380,6 +380,7 @@ void CvFuzzyMeanShiftTracker::SearchWindow::initDepthValues(IplImage *maskImage,
{
if (*depthData)
{
d = *depthData;
m1 += d;
if (d < mind)
mind = d;
......
......@@ -431,7 +431,7 @@ template<typename _Tp> inline _Tp* Mat::ptr(int y)
template<typename _Tp> inline const _Tp* Mat::ptr(int y) const
{
CV_DbgAssert( y == 0 || (data && dims >= 1 && data && (unsigned)y < (unsigned)size.p[0]) );
CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) );
return (const _Tp*)(data + step.p[0]*y);
}
......
......@@ -319,7 +319,7 @@ void MatOp::augAssignXor(const MatExpr& expr, Mat& m) const
{
Mat temp;
expr.op->assign(expr, temp);
m /= temp;
m ^= temp;
}
......
......@@ -184,7 +184,7 @@ static void finalizeHdr(Mat& m)
void Mat::create(int d, const int* _sizes, int _type)
{
int i;
CV_Assert(0 <= d && _sizes && d <= CV_MAX_DIM && _sizes);
CV_Assert(0 <= d && d <= CV_MAX_DIM && _sizes);
_type = CV_MAT_TYPE(_type);
if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() )
......
......@@ -192,7 +192,7 @@ struct KeypointResponseGreater
void KeyPointsFilter::retainBest(vector<KeyPoint>& keypoints, int n_points)
{
//this is only necessary if the keypoints size is greater than the number of desired points.
if( n_points > 0 && keypoints.size() > (size_t)n_points )
if( n_points >= 0 && keypoints.size() > (size_t)n_points )
{
if (n_points==0)
{
......
......@@ -406,7 +406,7 @@ bool CvCaptureCAM_VFW::open( int wIndex )
fourcc = (DWORD)-1;
memset( &caps, 0, sizeof(caps));
capDriverGetCaps( hWndC, &caps, sizeof(&caps));
capDriverGetCaps( hWndC, &caps, sizeof(caps));
::MoveWindow( hWndC, 0, 0, 320, 240, TRUE );
capSetUserData( hWndC, (size_t)this );
capSetCallbackOnFrame( hWndC, frameCallback );
......
......@@ -3669,7 +3669,7 @@ cv2DRotationMatrix( CvPoint2D32f center, double angle,
double scale, CvMat* matrix )
{
cv::Mat M0 = cv::cvarrToMat(matrix), M = cv::getRotationMatrix2D(center, angle, scale);
CV_Assert( M.size() == M.size() );
CV_Assert( M.size() == M0.size() );
M.convertTo(M0, M0.type());
return matrix;
}
......@@ -3682,7 +3682,7 @@ cvGetPerspectiveTransform( const CvPoint2D32f* src,
{
cv::Mat M0 = cv::cvarrToMat(matrix),
M = cv::getPerspectiveTransform((const cv::Point2f*)src, (const cv::Point2f*)dst);
CV_Assert( M.size() == M.size() );
CV_Assert( M.size() == M0.size() );
M.convertTo(M0, M0.type());
return matrix;
}
......
......@@ -429,10 +429,11 @@ void CvBlobTrackerAuto1::Process(IplImage* pImg, IplImage* pMask)
for(i=0; i<NewBlobList.GetBlobNum(); ++i)
{
CvBlob* pBN = NewBlobList.GetBlob(i);
pBN->ID = m_NextBlobID;
if(pBN && pBN->w >= CV_BLOB_MINW && pBN->h >= CV_BLOB_MINH)
{
pBN->ID = m_NextBlobID;
CvBlob* pB = m_pBT->AddBlob(pBN, pImg, pmask );
if(pB)
{
......
......@@ -235,7 +235,7 @@ void CvCalibFilter::SetCameraCount( int count )
cvReleaseMat( &rectMap[i][1] );
}
memset( latestCounts, 0, sizeof(latestPoints) );
memset( latestCounts, 0, sizeof(latestCounts) );
maxPoints = 0;
cameraCount = count;
}
......
......@@ -2115,7 +2115,7 @@ CV_IMPL IplImage* icvCreateIsometricImage( IplImage* src, IplImage* dst,
if( !dst || dst->depth != desired_depth ||
dst->nChannels != desired_num_channels ||
dst_size.width != src_size.width ||
dst_size.height != dst_size.height )
dst_size.height != src_size.height )
{
cvReleaseImage( &dst );
dst = cvCreateImage( src_size, desired_depth, desired_num_channels );
......
......@@ -2627,7 +2627,7 @@ void HOGDescriptor::readALTModel(std::string modelfile)
detector.push_back((float)-linearbias);
setSVMDetector(detector);
delete linearwt;
delete [] linearwt;
} else {
throw Exception();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册