提交 b476bf2a 编写于 作者: V Vadim Pisarevsky

Merge pull request #3294 from mshabunin:fix-ios-warnings-24

......@@ -82,7 +82,7 @@ if(UNIX)
endif()
endif()
ocv_warnings_disable(CMAKE_C_FLAGS -Wattributes -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations)
ocv_warnings_disable(CMAKE_C_FLAGS -Wshorten-64-to-32 -Wattributes -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations)
set_target_properties(${ZLIB_LIBRARY} PROPERTIES
OUTPUT_NAME ${ZLIB_LIBRARY}
......
......@@ -112,6 +112,10 @@ if(CMAKE_COMPILER_IS_GNUCXX)
add_extra_compiler_option(-march=i686)
endif()
if(APPLE)
add_extra_compiler_option(-Wno-semicolon-before-method-body)
endif()
# Other optimizations
if(ENABLE_OMIT_FRAME_POINTER)
add_extra_compiler_option(-fomit-frame-pointer)
......
......@@ -491,7 +491,7 @@ void DetectionBasedTracker::process(const Mat& imageGray)
} else {
LOGD("DetectionBasedTracker::process: get _rectsWhereRegions from previous positions");
for(size_t i = 0; i < trackedObjects.size(); i++) {
int n = trackedObjects[i].lastPositions.size();
int n = (int)trackedObjects[i].lastPositions.size();
CV_Assert(n > 0);
Rect r = trackedObjects[i].lastPositions[n-1];
......@@ -535,7 +535,7 @@ void DetectionBasedTracker::getObjects(std::vector<cv::Rect>& result) const
result.clear();
for(size_t i=0; i < trackedObjects.size(); i++) {
Rect r=calcTrackedObjectPositionToShow(i);
Rect r=calcTrackedObjectPositionToShow((int)i);
if (r.area()==0) {
continue;
}
......@@ -549,7 +549,7 @@ void DetectionBasedTracker::getObjects(std::vector<Object>& result) const
result.clear();
for(size_t i=0; i < trackedObjects.size(); i++) {
Rect r=calcTrackedObjectPositionToShow(i);
Rect r=calcTrackedObjectPositionToShow((int)i);
if (r.area()==0) {
continue;
}
......@@ -581,8 +581,8 @@ void DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detectedObj
INTERSECTED_RECTANGLE=-2
};
int N1=trackedObjects.size();
int N2=detectedObjects.size();
int N1=(int)trackedObjects.size();
int N2=(int)detectedObjects.size();
LOGD("DetectionBasedTracker::updateTrackedObjects: N1=%d, N2=%d", N1, N2);
for(int i=0; i < N1; i++) {
......@@ -600,7 +600,7 @@ void DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detectedObj
int bestIndex=-1;
int bestArea=-1;
int numpositions=curObject.lastPositions.size();
int numpositions=(int)curObject.lastPositions.size();
CV_Assert(numpositions > 0);
Rect prevRect=curObject.lastPositions[numpositions-1];
LOGD("DetectionBasedTracker::updateTrackedObjects: prevRect[%d]={%d, %d, %d x %d}", i, prevRect.x, prevRect.y, prevRect.width, prevRect.height);
......@@ -682,7 +682,7 @@ void DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detectedObj
)
)
{
int numpos=it->lastPositions.size();
int numpos=(int)it->lastPositions.size();
CV_Assert(numpos > 0);
Rect r = it->lastPositions[numpos-1];
LOGD("DetectionBasedTracker::updateTrackedObjects: deleted object {%d, %d, %d x %d}",
......@@ -711,7 +711,7 @@ Rect DetectionBasedTracker::calcTrackedObjectPositionToShow(int i) const
const TrackedObject::PositionsVector& lastPositions=trackedObjects[i].lastPositions;
int N=lastPositions.size();
int N=(int)lastPositions.size();
if (N<=0) {
LOGE("DetectionBasedTracker::calcTrackedObjectPositionToShow: ERROR: no positions for i=%d", i);
return Rect();
......
......@@ -499,8 +499,8 @@ void CvFuzzyMeanShiftTracker::SearchWindow::extractInfo(IplImage *maskImage, Ipl
if (m00 > 0)
{
xGc = (m10 / m00);
yGc = (m01 / m00);
xGc = (int)(m10 / m00);
yGc = (int)(m01 / m00);
double a, b, c, e1, e2, e3;
a = ((double)m20/(double)m00)-(xGc * xGc);
......
......@@ -523,8 +523,6 @@ private:
RetinaColor _colorEngine;
ImageLogPolProjection *_photoreceptorsLogSampling;
bool _useMinimalMemoryForToneMappingONLY;
bool _normalizeParvoOutput_0_maxOutputValue;
bool _normalizeMagnoOutput_0_maxOutputValue;
float _maxOutputValue;
......
......@@ -327,7 +327,7 @@ void StereoVar::FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level)
void StereoVar::autoParams()
{
int maxD = MAX(labs(maxDisp), labs(minDisp));
int maxD = (int)MAX(labs(maxDisp), labs(minDisp));
if (!maxD) pyrScale = 0.85;
else if (maxD < 8) pyrScale = 0.5;
......@@ -351,7 +351,7 @@ void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
{
CV_Assert(left.size() == right.size() && left.type() == right.type());
CvSize imgSize = left.size();
int MaxD = MAX(labs(minDisp), labs(maxDisp));
int MaxD = (int)MAX(labs(minDisp), labs(maxDisp));
int SignD = 1; if (MIN(minDisp, maxDisp) < 0) SignD = -1;
if (minDisp >= maxDisp) {MaxD = 256; SignD = 1;}
......
......@@ -167,7 +167,7 @@ class CV_EXPORTS GlArrays
{
public:
inline GlArrays()
: vertex_(GlBuffer::ARRAY_BUFFER), color_(GlBuffer::ARRAY_BUFFER), bgra_(true), normal_(GlBuffer::ARRAY_BUFFER), texCoord_(GlBuffer::ARRAY_BUFFER)
: vertex_(GlBuffer::ARRAY_BUFFER), color_(GlBuffer::ARRAY_BUFFER), normal_(GlBuffer::ARRAY_BUFFER), texCoord_(GlBuffer::ARRAY_BUFFER)
{
}
......@@ -194,7 +194,6 @@ public:
private:
GlBuffer vertex_;
GlBuffer color_;
bool bgra_;
GlBuffer normal_;
GlBuffer texCoord_;
};
......@@ -287,35 +286,6 @@ public:
void setupProjectionMatrix() const;
void setupModelViewMatrix() const;
private:
Point3d eye_;
Point3d center_;
Point3d up_;
Point3d pos_;
double yaw_;
double pitch_;
double roll_;
bool useLookAtParams_;
Point3d scale_;
Mat projectionMatrix_;
double fov_;
double aspect_;
double left_;
double right_;
double bottom_;
double top_;
double zNear_;
double zFar_;
bool perspectiveProjection_;
};
inline void GlBuffer::create(Size _size, int _type, Usage _usage) { create(_size.height, _size.width, _type, _usage); }
......
......@@ -46,7 +46,6 @@
namespace cv
{
static const int MAX_BLOCK_SIZE = 1024;
typedef void (*MathFunc)(const void* src, void* dst, int len);
static const float atan2_p1 = 0.9997878412794807f*(float)(180/CV_PI);
......
......@@ -264,18 +264,7 @@ void cv::render(const string&, const Ptr<GlFont>&, Scalar, Point2d)
////////////////////////////////////////////////////////////////////////
// GlCamera
cv::GlCamera::GlCamera() :
eye_(0.0, 0.0, -5.0), center_(0.0, 0.0, 0.0), up_(0.0, 1.0, 0.0),
pos_(0.0, 0.0, -5.0), yaw_(0.0), pitch_(0.0), roll_(0.0),
useLookAtParams_(false),
scale_(1.0, 1.0, 1.0),
projectionMatrix_(),
fov_(45.0), aspect_(0.0),
left_(0.0), right_(1.0), bottom_(1.0), top_(0.0),
zNear_(-1.0), zFar_(1.0),
perspectiveProjection_(false)
cv::GlCamera::GlCamera()
{
CV_Error(CV_StsNotImplemented, "This function in deprecated, do not use it");
}
......
......@@ -177,7 +177,7 @@ namespace
static void block_function(void* context, size_t index)
{
ProxyLoopBody* ptr_body = static_cast<ProxyLoopBody*>(context);
(*ptr_body)(cv::Range(index, index + 1));
(*ptr_body)(cv::Range((int)index, (int)index + 1));
}
#elif defined HAVE_CONCURRENCY
class ProxyLoopBody : public ParallelLoopBodyWrapper
......
......@@ -1208,7 +1208,7 @@ force_int:
int val, is_hex = d == 'x';
c = ptr[3];
ptr[3] = '\0';
val = strtol( ptr + is_hex, &endptr, is_hex ? 8 : 16 );
val = (int)strtol( ptr + is_hex, &endptr, is_hex ? 8 : 16 );
ptr[3] = c;
if( endptr == ptr + is_hex )
buf[len++] = 'x';
......@@ -2819,7 +2819,7 @@ cvOpenFileStorage( const char* filename, CvMemStorage* dststorage, int flags, co
// find the last occurence of </opencv_storage>
for(;;)
{
int line_offset = ftell( fs->file );
int line_offset = (int)ftell( fs->file );
char* ptr0 = icvGets( fs, xml_buf, xml_buf_size ), *ptr;
if( !ptr0 )
break;
......
......@@ -44,8 +44,6 @@
namespace cv
{
static const double FREAK_SQRT2 = 1.4142135623731;
static const double FREAK_INV_SQRT2 = 1.0 / FREAK_SQRT2;
static const double FREAK_LOG2 = 0.693147180559945;
static const int FREAK_NB_ORIENTATION = 256;
static const int FREAK_NB_POINTS = 43;
......
......@@ -233,7 +233,7 @@ MSERNewHistory( MSERConnectedComp* comp, MSERGrowHistory* history )
history->shortcut = comp->history->shortcut;
history->stable = comp->history->stable;
}
history->val = comp->grey_level;
history->val = (int)comp->grey_level;
history->size = comp->size;
comp->history = history;
}
......@@ -263,7 +263,7 @@ MSERMergeComp( MSERConnectedComp* comp1,
}
if ( NULL != comp2->history && comp2->history->stable > history->stable )
history->stable = comp2->history->stable;
history->val = comp1->grey_level;
history->val = (int)comp1->grey_level;
history->size = comp1->size;
// put comp1 to history
comp->var = comp1->var;
......@@ -288,7 +288,7 @@ MSERMergeComp( MSERConnectedComp* comp1,
}
if ( NULL != comp1->history && comp1->history->stable > history->stable )
history->stable = comp1->history->stable;
history->val = comp2->grey_level;
history->val = (int)comp2->grey_level;
history->size = comp2->size;
// put comp2 to history
comp->var = comp2->var;
......@@ -312,7 +312,7 @@ static float
MSERVariationCalc( MSERConnectedComp* comp, int delta )
{
MSERGrowHistory* history = comp->history;
int val = comp->grey_level;
int val = (int)comp->grey_level;
if ( NULL != history )
{
MSERGrowHistory* shortcut = history->shortcut;
......
......@@ -43,7 +43,6 @@ namespace cv
{
const float HARRIS_K = 0.04f;
const int DESCRIPTOR_SIZE = 32;
/**
* Function that computes the Harris responses in a
......
......@@ -185,7 +185,6 @@ class CvVideoWriter_AVFoundation : public CvVideoWriter{
AVAssetWriterInput* mMovieWriterInput;
AVAssetWriterInputPixelBufferAdaptor* mMovieWriterAdaptor;
unsigned char* imagedata;
NSString* path;
NSString* codec;
NSString* fileType;
......@@ -494,15 +493,15 @@ double CvCaptureCAM::getProperty(int property_id){
CMFormatDescriptionRef format = [[ports objectAtIndex:0] formatDescription];
CGSize s1 = CMVideoFormatDescriptionGetPresentationDimensions(format, YES, YES);
int width=(int)s1.width, height=(int)s1.height;
int w=(int)s1.width, h=(int)s1.height;
[localpool drain];
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
return width;
return w;
case CV_CAP_PROP_FRAME_HEIGHT:
return height;
return h;
case CV_CAP_PROP_IOS_DEVICE_FOCUS:
return mCaptureDevice.focusMode;
......@@ -660,7 +659,8 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection{
(void)captureOutput;
(void)connection;
// Failed
// connection.videoOrientation = AVCaptureVideoOrientationPortrait;
......@@ -714,26 +714,26 @@ fromConnection:(AVCaptureConnection *)connection{
memcpy(imagedata, baseaddress, currSize);
if (image == NULL) {
image = cvCreateImageHeader(cvSize(width,height), IPL_DEPTH_8U, 4);
image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 4);
}
image->width =width;
image->height = height;
image->width = (int)width;
image->height = (int)height;
image->nChannels = 4;
image->depth = IPL_DEPTH_8U;
image->widthStep = (int)rowBytes;
image->imageData = imagedata;
image->imageSize = currSize;
image->imageSize = (int)currSize;
if (bgr_image == NULL) {
bgr_image = cvCreateImageHeader(cvSize(width,height), IPL_DEPTH_8U, 3);
bgr_image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 3);
}
bgr_image->width =width;
bgr_image->height = height;
bgr_image->width = (int)width;
bgr_image->height = (int)height;
bgr_image->nChannels = 3;
bgr_image->depth = IPL_DEPTH_8U;
bgr_image->widthStep = (int)rowBytes;
bgr_image->imageData = bgr_imagedata;
bgr_image->imageSize = currSize;
bgr_image->imageSize = (int)currSize;
cvCvtColor(image, bgr_image, CV_BGRA2BGR);
......@@ -742,7 +742,7 @@ fromConnection:(AVCaptureConnection *)connection{
// iOS provides hardware accelerated rotation through AVCaptureConnection class
// I can't get it work.
if (bgr_image_r90 == NULL){
bgr_image_r90 = cvCreateImage(cvSize(height, width), IPL_DEPTH_8U, 3);
bgr_image_r90 = cvCreateImage(cvSize((int)height, (int)width), IPL_DEPTH_8U, 3);
}
cvTranspose(bgr_image, bgr_image_r90);
cvFlip(bgr_image_r90, NULL, 1);
......@@ -942,29 +942,29 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
memcpy(imagedata, baseaddress, currSize);
if (image == NULL) {
image = cvCreateImageHeader(cvSize(width,height), IPL_DEPTH_8U, 4);
image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 4);
}
image->width =width;
image->height = height;
image->width = (int)width;
image->height = (int)height;
image->nChannels = 4;
image->depth = IPL_DEPTH_8U;
image->widthStep = rowBytes;
image->widthStep = (int)rowBytes;
image->imageData = imagedata;
image->imageSize = currSize;
image->imageSize = (int)currSize;
if (bgr_image == NULL) {
bgr_image = cvCreateImageHeader(cvSize(width,height), IPL_DEPTH_8U, 3);
bgr_image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 3);
}
bgr_image->width =width;
bgr_image->height = height;
bgr_image->width = (int)width;
bgr_image->height = (int)height;
bgr_image->nChannels = 3;
bgr_image->depth = IPL_DEPTH_8U;
bgr_image->widthStep = rowBytes;
bgr_image->widthStep = (int)rowBytes;
bgr_image->imageData = bgr_imagedata;
bgr_image->imageSize = currSize;
bgr_image->imageSize = (int)currSize;
cvCvtColor(image, bgr_image,CV_BGRA2BGR);
......@@ -1014,7 +1014,7 @@ double CvCaptureFile::getFPS() {
}
double CvCaptureFile::getProperty(int property_id){
(void)property_id;
/*
if (mCaptureSession == nil) return 0;
......@@ -1055,7 +1055,8 @@ double CvCaptureFile::getProperty(int property_id){
}
bool CvCaptureFile::setProperty(int property_id, double value) {
(void)property_id;
(void)value;
/*
if (mCaptureSession == nil) return false;
......@@ -1265,7 +1266,7 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
// writer status check
if (![mMovieWriterInput isReadyForMoreMediaData] || mMovieWriter.status != AVAssetWriterStatusWriting ) {
NSLog(@"[mMovieWriterInput isReadyForMoreMediaData] Not ready for media data or ...");
NSLog(@"mMovieWriter.status: %d. Error: %@", mMovieWriter.status, [mMovieWriter.error localizedDescription]);
NSLog(@"mMovieWriter.status: %d. Error: %@", (int)mMovieWriter.status, [mMovieWriter.error localizedDescription]);
[localpool drain];
return false;
}
......
......@@ -150,6 +150,7 @@
{
[[NSNotificationCenter defaultCenter] removeObserver:self];
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
[super dealloc];
}
......@@ -234,6 +235,7 @@
- (void)deviceOrientationDidChange:(NSNotification*)notification
{
(void)notification;
UIDeviceOrientation orientation = [UIDevice currentDevice].orientation;
switch (orientation)
......@@ -250,7 +252,7 @@
default:
break;
}
NSLog(@"deviceOrientationDidChange: %d", orientation);
NSLog(@"deviceOrientationDidChange: %d", (int)orientation);
[self updateOrientation];
}
......@@ -324,7 +326,7 @@
// support for autofocus
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
NSError *error = nil;
error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
[device unlockForConfiguration];
......
......@@ -101,7 +101,7 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
[super start];
if (self.recordVideo == YES) {
NSError* error;
NSError* error = nil;
if ([[NSFileManager defaultManager] fileExistsAtPath:[self videoFileString]]) {
[[NSFileManager defaultManager] removeItemAtPath:[self videoFileString] error:&error];
}
......@@ -424,6 +424,8 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
(void)captureOutput;
(void)connection;
if (self.delegate) {
// convert from Core Media to Core Video
......@@ -462,9 +464,8 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
}
// delegate image processing to the delegate
cv::Mat image(height, width, format_opencv, bufferAddress, bytesPerRow);
cv::Mat image((int)height, (int)width, format_opencv, bufferAddress, bytesPerRow);
cv::Mat* result = NULL;
CGImage* dstImage;
if ([self.delegate respondsToSelector:@selector(processImage:)]) {
......@@ -473,7 +474,7 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
// check if matrix data pointer or dimensions were changed by the delegate
bool iOSimage = false;
if (height == image.rows && width == image.cols && format_opencv == image.type() && bufferAddress == image.data && bytesPerRow == image.step) {
if (height == (size_t)image.rows && width == (size_t)image.cols && format_opencv == image.type() && bufferAddress == image.data && bytesPerRow == image.step) {
iOSimage = true;
}
......@@ -591,7 +592,7 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
if ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:[self videoFileURL]]) {
[library writeVideoAtPathToSavedPhotosAlbum:[self videoFileURL]
completionBlock:^(NSURL *assetURL, NSError *error){}];
completionBlock:^(NSURL *assetURL, NSError *error){ (void)assetURL; (void)error; }];
}
}
......
......@@ -58,10 +58,10 @@
namespace cv
{
static const char fmtSignTiffII[] = "II\x2a\x00";
static const char fmtSignTiffMM[] = "MM\x00\x2a";
#ifdef HAVE_TIFF
static const char fmtSignTiffMM[] = "MM\x00\x2a";
static int grfmt_tiff_err_handler_init = 0;
static void GrFmtSilentTIFFErrorHandler( const char*, const char*, va_list ) {}
......
......@@ -43,6 +43,9 @@
#import "opencv2/highgui/cap_ios.h"
#include "precomp.hpp"
UIImage* MatToUIImage(const cv::Mat& image);
void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist);
UIImage* MatToUIImage(const cv::Mat& image) {
NSData *data = [NSData dataWithBytes:image.data
......
......@@ -355,7 +355,7 @@ type* prefix##get_##type(CVPOS pos)\
}\
int prefix##get_count_##type(_CVLIST* list)\
{\
return list->m_size;\
return (int)list->m_size;\
}
#define DECLARE_AND_IMPLEMENT_LIST(type, prefix)\
......
......@@ -43,10 +43,10 @@
#include "../precomp.hpp"
#include "imgwarp_avx2.hpp"
#if CV_AVX2
const int INTER_RESIZE_COEF_BITS=11;
const int INTER_RESIZE_COEF_SCALE=1 << INTER_RESIZE_COEF_BITS;
#if CV_AVX2
int VResizeLinearVec_32s8u_avx2(const uchar** _src, uchar* dst, const uchar* _beta, int width )
{
const int** src = (const int**)_src;
......
......@@ -2973,10 +2973,10 @@ class RemapInvoker :
{
public:
RemapInvoker(const Mat& _src, Mat& _dst, const Mat *_m1,
const Mat *_m2, int _interpolation, int _borderType, const Scalar &_borderValue,
const Mat *_m2, int _borderType, const Scalar &_borderValue,
int _planar_input, RemapNNFunc _nnfunc, RemapFunc _ifunc, const void *_ctab) :
ParallelLoopBody(), src(&_src), dst(&_dst), m1(_m1), m2(_m2),
interpolation(_interpolation), borderType(_borderType), borderValue(_borderValue),
borderType(_borderType), borderValue(_borderValue),
planar_input(_planar_input), nnfunc(_nnfunc), ifunc(_ifunc), ctab(_ctab)
{
}
......@@ -3162,7 +3162,7 @@ private:
const Mat* src;
Mat* dst;
const Mat *m1, *m2;
int interpolation, borderType;
int borderType;
Scalar borderValue;
int planar_input;
RemapNNFunc nnfunc;
......@@ -3263,7 +3263,7 @@ void cv::remap( InputArray _src, OutputArray _dst,
planar_input = map1.channels() == 1;
}
RemapInvoker invoker(src, dst, m1, m2, interpolation,
RemapInvoker invoker(src, dst, m1, m2,
borderType, borderValue, planar_input, nnfunc, ifunc,
ctab);
parallel_for_(Range(0, dst.rows), invoker, dst.total()/(double)(1<<16));
......
......@@ -699,7 +699,6 @@ public:
private:
Mat src;
Mat dst;
int nStripes;
double thresh;
double maxval;
......
......@@ -364,7 +364,7 @@ inline void ReallocImage(IplImage** ppImage, CvSize sz, long lChNum)
cvReleaseImage( &pImage );
}
if( pImage == NULL )
pImage = cvCreateImage( sz, IPL_DEPTH_8U, lChNum);
pImage = cvCreateImage( sz, IPL_DEPTH_8U, (int)lChNum);
*ppImage = pImage;
}
......@@ -385,9 +385,9 @@ inline BoostingFaceTemplate::BoostingFaceTemplate(long lNumber,CvRect rect):Face
long EyeWidth = rect.width/5;
long EyeHeight = EyeWidth;
CvRect LeftEyeRect = cvRect(rect.x + EyeWidth,rect.y + rect.height/2 - EyeHeight,EyeWidth,EyeHeight);
CvRect RightEyeRect = cvRect(rect.x + 3*EyeWidth,rect.y + rect.height/2 - EyeHeight,EyeWidth,EyeHeight);
CvRect MouthRect = cvRect(rect.x + 3*EyeWidth/2,rect.y + 3*rect.height/4 - EyeHeight/2,2*EyeWidth,EyeHeight);
CvRect LeftEyeRect = cvRect((int)(rect.x + EyeWidth),(int)(rect.y + rect.height/2 - EyeHeight),(int)EyeWidth,(int)EyeHeight);
CvRect RightEyeRect = cvRect((int)(rect.x + 3*EyeWidth),(int)(rect.y + rect.height/2 - EyeHeight),(int)EyeWidth,(int)EyeHeight);
CvRect MouthRect = cvRect((int)(rect.x + 3*EyeWidth/2),(int)(rect.y + 3*rect.height/4 - EyeHeight/2),(int)(2*EyeWidth),(int)EyeHeight);
CvRect * lpMouthRect = new CvRect();
*lpMouthRect = MouthRect;
......
......@@ -156,7 +156,7 @@ inline void ReallocImage(IplImage** ppImage, CvSize sz, long lChNum)
cvReleaseImage( &pImage );
}
if( pImage == NULL )
pImage = cvCreateImage( sz, IPL_DEPTH_8U, lChNum);
pImage = cvCreateImage( sz, IPL_DEPTH_8U, (int)lChNum);
*ppImage = pImage;
}
......
......@@ -63,10 +63,8 @@ private:
float m_FVMin[MAX_FV_SIZE];
float m_FVVar[MAX_FV_SIZE];
int m_Dim;
CvBlob m_BlobSeq[BLOB_NUM];
int m_Frame;
int m_State;
int m_LastFrame;
int m_ClearFlag;
void Clear()
{
......@@ -251,10 +249,8 @@ private:
float m_FVMin[MAX_FV_SIZE];
float m_FVVar[MAX_FV_SIZE];
int m_Dim;
CvBlob m_BlobSeq[BLOB_NUM];
int m_Frame;
int m_State;
int m_LastFrame;
int m_ClearFlag;
void Clear()
{
......@@ -1132,7 +1128,6 @@ class CvBlobTrackAnalysisSVM : public CvBlobTrackAnalysis
{
/*---------------- Internal functions: --------------------*/
private:
CvMemStorage* m_pMem;
int m_TrackNum;
int m_Frame;
char m_DataFileName[1024];
......
......@@ -44,6 +44,7 @@
/* State vector is (x,y,w,h,dx,dy,dw,dh). */
/* Measurement is (x,y,w,h). */
#if 0
/* Dynamic matrix A: */
const float A8[] = { 1, 0, 0, 0, 1, 0, 0, 0,
0, 1, 0, 0, 0, 1, 0, 0,
......@@ -60,6 +61,12 @@ const float H8[] = { 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0};
#define STATE_NUM 8
#define A A8
#define H H8
#else
/* Matrices for zero size velocity: */
/* Dinamic matrix A: */
const float A6[] = { 1, 0, 0, 0, 1, 0,
......@@ -79,6 +86,8 @@ const float H6[] = { 1, 0, 0, 0, 0, 0,
#define A A6
#define H H6
#endif
class CvBlobTrackPostProcKalman:public CvBlobTrackPostProcOne
{
......
......@@ -588,7 +588,9 @@ private:
float m_param_roi_scale;
int m_param_only_roi;
#ifdef USE_OBJECT_DETECTOR
CvObjectDetector* m_split_detector;
#endif
CvSize m_min_window_size;
int m_max_border;
......@@ -603,7 +605,9 @@ CvBlobDetector* cvCreateBlobDetectorCC(){return new CvBlobDetectorCC;}
/* Constructor for BlobDetector: */
CvBlobDetectorCC::CvBlobDetectorCC() :
#ifdef USE_OBJECT_DETECTOR
m_split_detector(0),
#endif
m_detected_blob_seq(sizeof(CvDetectedBlob)),
m_roi_seq(0),
m_debug_blob_seq(sizeof(CvDetectedBlob))
......
......@@ -154,7 +154,7 @@ bool RFace::CheckElem(void * lpCandidat,void * lpIdeal)
long x = Rect.x + cvRound(Rect.width/2);
long y = Rect.y + cvRound(Rect.height/2);
if ( isPointInRect(cvPoint(x,y),IdealRect) )
if ( isPointInRect(cvPoint((int)x,(int)y),IdealRect) )
return true;
// if ( isPointInRect(cvPoint(Rect.x,Rect.y),UpRect) &&
......@@ -329,24 +329,24 @@ inline void RFace::ResizeRect(CvRect Rect,CvRect * lpRect,long lDir,long lD)
{
if (lDir == UP_SCALE)
{
lpRect->x = Rect.x - lD;
lpRect->y = Rect.y - lD;
lpRect->width = Rect.width + 2*lD;
lpRect->height = Rect.height + 2*lD;
lpRect->x = Rect.x - (int)lD;
lpRect->y = Rect.y - (int)lD;
lpRect->width = Rect.width + (int)(2*lD);
lpRect->height = Rect.height + (int)(2*lD);
}
if (lDir == DOWN_SCALE)
{
lpRect->x = Rect.x + lD;
lpRect->y = Rect.y + lD;
lpRect->x = Rect.x + (int)lD;
lpRect->y = Rect.y + (int)lD;
if (Rect.width - 2*lD >= 0)
{
lpRect->width = Rect.width - 2*lD;
lpRect->width = Rect.width - (int)(2*lD);
}else
lpRect->width = 0;
if (Rect.height - 2*lD >= 0)
{
lpRect->height = Rect.height - 2*lD;
lpRect->height = Rect.height - (int)(2*lD);
}else
lpRect->height = 0;
}
......
......@@ -491,7 +491,7 @@ FaceDetectionList::~FaceDetectionList()
int FaceDetectionList::AddElem(Face * pFace)
{
new FaceDetectionListElem(pFace,m_pHead);
return m_FacesCount++;
return (int)m_FacesCount++;
}//FaceDetectionList::AddElem(Face * pFace)
Face * FaceDetectionList::GetData()
......
......@@ -111,21 +111,6 @@ namespace cv
/******************************* Defs and macros *****************************/
// default number of sampled intervals per octave
static const int SIFT_INTVLS = 3;
// default sigma for initial gaussian smoothing
static const float SIFT_SIGMA = 1.6f;
// default threshold on keypoint contrast |D(x)|
static const float SIFT_CONTR_THR = 0.04f;
// default threshold on keypoint ratio of principle curvatures
static const float SIFT_CURV_THR = 10.f;
// double image size before pyramid construction?
static const bool SIFT_IMG_DBL = true;
// default width of descriptor histogram array
static const int SIFT_DESCR_WIDTH = 4;
......
......@@ -52,6 +52,7 @@ unsigned char ccblk[256] = { 34,17,2,17,19,19,2,17,36,36,2,36,19,19,2,17,51,51,2
36,19,19,2,32,66,66,2,66,19,19,2,66,36,36,2,36,19,19,2,66,51,51,2,51,19,19,2,51,36,36,2,36,19,19,2,32,49,49,2,49,
19,19,2,49,36,36,2,36,19,19,2,49,51,51,2,51,19,19,2,51,36,36,2,36,19,19,2,49,66,66,2,66,19,19,2,66,36,36,2,36,19,
19,2,66,51,51,2,51,19,19,2,51,36,36,2,36,19,19,2,34 };
#if CV_SSE2
static const CvPoint pickup[64] = { {7,6},{8,6},{7,5},{8,5},{1,5},{7,4},{8,4},{1,4},{1,8},{2,8},{1,7},{2,7},{3,7},
{1,6},{2,6},{3,6},{3,2},{4,2},{3,1},{4,1},{5,1},{3,8},{4,8},{5,8},{6,1},{7,1},{6,8},{7,8},{8,8},{6,7},{7,7},{8,7},
{4,7},{5,7},{4,6},{5,6},{6,6},{4,5},{5,5},{6,5},{2,5},{3,5},{2,4},{3,4},{4,4},{2,3},{3,3},{4,3},{8,3},{1,3},{8,2},
......@@ -76,23 +77,19 @@ static const uchar Log[256] = { (uchar)-255,255,1,240,2,225,241,53,3,38,226,133,
157,143,169,82,72,182,215,191,251,47,178,89,151,101,94,160,123,26,112,232,21,51,238,208,131,
58,69,148,18,15,16,68,17,121,149,129,19,155,59,249,70,214,250,168,71,201,156,64,60,237,130,
111,20,93,122,177,150 };
#endif
#define dethresh 0.92f
#define eincO (2 * dethresh) // e increment orthogonal
#define eincD (1.414f * dethresh) // e increment diagonal
static const float eincs[] = {
eincO, eincD,
eincO, eincD,
eincO, eincD,
eincO, eincD,
999 };
#define Ki(x) _mm_set_epi32((x),(x),(x),(x))
#define Kf(x) _mm_set_ps((x),(x),(x),(x))
#if CV_SSE2
static const int CV_DECL_ALIGNED(16) absmask[] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
#define _mm_abs_ps(x) _mm_and_ps((x), *(const __m128*)absmask)
#endif
static void writexy(CvMat *m, int r, CvPoint p)
{
......
......@@ -1977,7 +1977,7 @@ cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size )
if( !f )
CV_Error( CV_StsError, "" );
fseek( f, 0, SEEK_END );
size = ftell( f );
size = (int)ftell( f );
fseek( f, 0, SEEK_SET );
size_t elements_read = fread( ptr, 1, size, f );
CV_Assert(elements_read == (size_t)(size));
......
......@@ -122,8 +122,10 @@ private:
gpu::SURF_GPU surf_;
gpu::GpuMat keypoints_;
gpu::GpuMat descriptors_;
#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
int num_octaves_, num_layers_;
int num_octaves_descr_, num_layers_descr_;
#endif
};
#endif
......
......@@ -232,8 +232,16 @@ class CV_EXPORTS GraphCutSeamFinderGpu : public GraphCutSeamFinderBase, public P
public:
GraphCutSeamFinderGpu(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f,
float bad_region_penalty = 1000.f)
: cost_type_(cost_type), terminal_cost_(terminal_cost),
bad_region_penalty_(bad_region_penalty) {}
#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
: cost_type_(cost_type),
terminal_cost_(terminal_cost),
bad_region_penalty_(bad_region_penalty)
#endif
{
(void)cost_type;
(void)terminal_cost;
(void)bad_region_penalty;
}
void find(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners,
std::vector<cv::Mat> &masks);
......@@ -246,9 +254,11 @@ private:
const cv::Mat &dy1, const cv::Mat &dy2, const cv::Mat &mask1, const cv::Mat &mask2,
cv::Mat &terminals, cv::Mat &leftT, cv::Mat &rightT, cv::Mat &top, cv::Mat &bottom);
std::vector<Mat> dx_, dy_;
#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
int cost_type_;
float terminal_cost_;
float bad_region_penalty_;
#endif
};
} // namespace detail
......
......@@ -15,7 +15,7 @@ Script will create <outputdir>, if it's missing, and a few its subdirectories:
build/
iPhoneOS-*/
[cmake-generated build tree for an iOS device target]
iPhoneSimulator/
iPhoneSimulator-*/
[cmake-generated build tree for iOS simulator]
opencv2.framework/
[the framework content]
......@@ -25,7 +25,18 @@ The script should handle minor OpenCV updates efficiently
However, opencv2.framework directory is erased and recreated on each run.
"""
import glob, re, os, os.path, shutil, string, sys
import glob, re, os, os.path, shutil, string, sys, exceptions, subprocess
def execute(cmd):
try:
print >>sys.stderr, "Executing:", cmd
retcode = subprocess.call(cmd, shell=True)
if retcode < 0:
raise Exception("Child was terminated by signal:", -retcode)
elif retcode > 0:
raise Exception("Child returned:", retcode)
except OSError as e:
raise Exception("Execution failed:", e)
def build_opencv(srcroot, buildroot, target, arch):
"builds OpenCV for device or simulator"
......@@ -42,19 +53,23 @@ def build_opencv(srcroot, buildroot, target, arch):
"-DBUILD_opencv_world=ON " +
"-DCMAKE_C_FLAGS=\"-Wno-implicit-function-declaration\" " +
"-DCMAKE_INSTALL_PREFIX=install") % (srcroot, target)
# if cmake cache exists, just rerun cmake to update OpenCV.xproj if necessary
if arch.startswith("armv"):
cmakeargs += " -DENABLE_NEON=ON"
# if cmake cache exists, just rerun cmake to update OpenCV.xcodeproj if necessary
if os.path.isfile(os.path.join(builddir, "CMakeCache.txt")):
os.system("cmake %s ." % (cmakeargs,))
execute("cmake %s ." % (cmakeargs,))
else:
os.system("cmake %s %s" % (cmakeargs, srcroot))
execute("cmake %s %s" % (cmakeargs, srcroot))
for wlib in [builddir + "/modules/world/UninstalledProducts/libopencv_world.a",
builddir + "/lib/Release/libopencv_world.a"]:
if os.path.isfile(wlib):
os.remove(wlib)
os.system("xcodebuild IPHONEOS_DEPLOYMENT_TARGET=6.0 -parallelizeTargets ARCHS=%s -jobs 8 -sdk %s -configuration Release -target ALL_BUILD" % (arch, target.lower()))
os.system("xcodebuild IPHONEOS_DEPLOYMENT_TARGET=6.0 ARCHS=%s -sdk %s -configuration Release -target install install" % (arch, target.lower()))
execute("xcodebuild IPHONEOS_DEPLOYMENT_TARGET=6.0 -parallelizeTargets ARCHS=%s -jobs 8 -sdk %s -configuration Release -target ALL_BUILD" % (arch, target.lower()))
execute("xcodebuild IPHONEOS_DEPLOYMENT_TARGET=6.0 ARCHS=%s -sdk %s -configuration Release -target install install" % (arch, target.lower()))
os.chdir(currdir)
def put_framework_together(srcroot, dstroot):
......@@ -82,7 +97,7 @@ def put_framework_together(srcroot, dstroot):
# make universal static lib
wlist = " ".join(["../build/" + t + "/lib/Release/libopencv_world.a" for t in targetlist])
os.system("lipo -create " + wlist + " -o " + dstdir + "/opencv2")
execute("lipo -create " + wlist + " -o " + dstdir + "/opencv2")
# copy Info.plist
shutil.copyfile(tdir0 + "/ios/Info.plist", dstdir + "/Resources/Info.plist")
......@@ -97,10 +112,13 @@ def put_framework_together(srcroot, dstroot):
def build_framework(srcroot, dstroot):
"main function to do all the work"
targets = ["iPhoneOS", "iPhoneOS", "iPhoneOS", "iPhoneSimulator", "iPhoneSimulator"]
archs = ["armv7", "armv7s", "arm64", "i386", "x86_64"]
for i in range(len(targets)):
build_opencv(srcroot, os.path.join(dstroot, "build"), targets[i], archs[i])
targets = [("armv7", "iPhoneOS"),
("armv7s", "iPhoneOS"),
("arm64", "iPhoneOS"),
("i386", "iPhoneSimulator"),
("x86_64", "iPhoneSimulator")]
for t in targets:
build_opencv(srcroot, os.path.join(dstroot, "build"), t[1], t[0])
put_framework_together(srcroot, dstroot)
......@@ -110,4 +128,8 @@ if __name__ == "__main__":
print "Usage:\n\t./build_framework.py <outputdir>\n\n"
sys.exit(0)
build_framework(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../..")), os.path.abspath(sys.argv[1]))
try:
build_framework(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../..")), os.path.abspath(sys.argv[1]))
except Exception as e:
print >>sys.stderr, e
sys.exit(1)
......@@ -39,8 +39,9 @@ set (CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSI
set (CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}")
# Hidden visibilty is required for cxx on iOS
set (CMAKE_C_FLAGS "")
set (CMAKE_CXX_FLAGS "-stdlib=libc++ -headerpad_max_install_names -fvisibility=hidden -fvisibility-inlines-hidden")
set (no_warn "-Wno-unused-function -Wno-overloaded-virtual")
set (CMAKE_C_FLAGS "${no_warn}")
set (CMAKE_CXX_FLAGS "-stdlib=libc++ -fvisibility=hidden -fvisibility-inlines-hidden ${no_warn}")
set (CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG -O3 -fomit-frame-pointer -ffast-math")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册