提交 f4e33ea0 编写于 作者: A Andrey Kamaev

Fix build of samples

上级 7225f89e
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/contrib/detection_based_tracker.hpp>
#include <stdio.h>
#include <string>
#include <vector>
using namespace std;
using namespace cv;
const string WindowName = "Face Detection example";
class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
{
public:
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
IDetector(),
Detector(detector)
{
CV_Assert(!detector.empty());
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
{
Detector->detectMultiScale(Image, objects, scaleFactor, minNeighbours, 0, minObjSize, maxObjSize);
}
virtual ~CascadeDetectorAdapter()
{}
private:
CascadeDetectorAdapter();
cv::Ptr<cv::CascadeClassifier> Detector;
};
int main(int , char** )
{
namedWindow(WindowName);
VideoCapture VideoStream(0);
if (!VideoStream.isOpened())
{
printf("Error: Cannot open video stream from camera\n");
return 1;
}
std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml";
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker Detector(MainDetector, TrackingDetector, params);
if (!Detector.run())
{
printf("Error: Detector initialization failed\n");
return 2;
}
Mat ReferenceFrame;
Mat GrayFrame;
vector<Rect> Faces;
while(true)
{
VideoStream >> ReferenceFrame;
cvtColor(ReferenceFrame, GrayFrame, COLOR_RGB2GRAY);
Detector.process(GrayFrame);
Detector.getObjects(Faces);
for (size_t i = 0; i < Faces.size(); i++)
{
rectangle(ReferenceFrame, Faces[i], CV_RGB(0,255,0));
}
imshow(WindowName, ReferenceFrame);
if (cvWaitKey(30) >= 0) break;
}
Detector.stop();
return 0;
}
#else
#include <stdio.h>
int main()
{
printf("This sample works for UNIX or ANDROID only\n");
return 0;
}
#endif
......@@ -43,6 +43,8 @@
#define LOGE(...) do{} while(0)
#endif
using namespace cv;
using namespace std;
......@@ -61,31 +63,9 @@ static void usage()
LOGE0("\t (e.g.\"opencv/data/lbpcascades/lbpcascade_frontalface.xml\" ");
}
class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
{
public:
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
Detector(detector)
{
CV_Assert(!detector.empty());
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
{
Detector->detectMultiScale(Image, objects, 1.1, 3, 0, minObjSize, maxObjSize);
}
virtual ~CascadeDetectorAdapter()
{}
private:
CascadeDetectorAdapter();
cv::Ptr<cv::CascadeClassifier> Detector;
};
static int test_FaceDetector(int argc, char *argv[])
{
if (argc < 4)
{
if (argc < 4) {
usage();
return -1;
}
......@@ -100,14 +80,12 @@ static int test_FaceDetector(int argc, char *argv[])
vector<Mat> images;
{
char filename[256];
for(int n=1; ; n++)
{
for(int n=1; ; n++) {
snprintf(filename, sizeof(filename), filepattern, n);
LOGD("filename='%s'", filename);
Mat m0;
m0=imread(filename);
if (m0.empty())
{
if (m0.empty()) {
LOGI0("Cannot read the file --- break");
break;
}
......@@ -116,15 +94,10 @@ static int test_FaceDetector(int argc, char *argv[])
LOGD("read %d images", (int)images.size());
}
DetectionBasedTracker::Parameters params;
std::string cascadeFrontalfilename=cascadefile;
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker fd(MainDetector, TrackingDetector, params);
DetectionBasedTracker fd(cascadeFrontalfilename, params);
fd.run();
......@@ -135,13 +108,12 @@ static int test_FaceDetector(int argc, char *argv[])
double freq=getTickFrequency();
int num_images=images.size();
for(int n=1; n <= num_images; n++)
{
for(int n=1; n <= num_images; n++) {
int64 tcur=getTickCount();
int64 dt=tcur-tprev;
tprev=tcur;
double t_ms=((double)dt)/freq * 1000.0;
LOGD("\n\nSTEP n=%d from prev step %f ms\n", n, t_ms);
LOGD("\n\nSTEP n=%d from prev step %f ms\n\n", n, t_ms);
m=images[n-1];
CV_Assert(! m.empty());
cvtColor(m, gray, CV_BGR2GRAY);
......@@ -151,8 +123,11 @@ static int test_FaceDetector(int argc, char *argv[])
vector<Rect> result;
fd.getObjects(result);
for(size_t i=0; i < result.size(); i++)
{
for(size_t i=0; i < result.size(); i++) {
Rect r=result[i];
CV_Assert(r.area() > 0);
Point tl=r.tl();
......@@ -161,14 +136,14 @@ static int test_FaceDetector(int argc, char *argv[])
rectangle(m, tl, br, color, 3);
}
}
char outfilename[256];
for(int n=1; n <= num_images; n++)
{
snprintf(outfilename, sizeof(outfilename), outfilepattern, n);
LOGD("outfilename='%s'", outfilename);
m=images[n-1];
imwrite(outfilename, m);
char outfilename[256];
for(int n=1; n <= num_images; n++) {
snprintf(outfilename, sizeof(outfilename), outfilepattern, n);
LOGD("outfilename='%s'", outfilename);
m=images[n-1];
imwrite(outfilename, m);
}
}
fd.stop();
......@@ -176,6 +151,8 @@ static int test_FaceDetector(int argc, char *argv[])
return 0;
}
int main(int argc, char *argv[])
{
return test_FaceDetector(argc, argv);
......
......@@ -26,41 +26,41 @@ static Mat loadImage(const string& name)
int main(int argc, const char* argv[])
{
CommandLineParser cmd(argc, argv,
"{ image i | pic1.png | input image }"
"{ template t | templ.png | template image }"
"{ scale s | | estimate scale }"
"{ rotation r | | estimate rotation }"
"{ gpu | | use gpu version }"
"{ minDist | 100 | minimum distance between the centers of the detected objects }"
"{ levels | 360 | R-Table levels }"
"{ votesThreshold | 30 | the accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected }"
"{ angleThresh | 10000 | angle votes treshold }"
"{ scaleThresh | 1000 | scale votes treshold }"
"{ posThresh | 100 | position votes threshold }"
"{ dp | 2 | inverse ratio of the accumulator resolution to the image resolution }"
"{ minScale | 0.5 | minimal scale to detect }"
"{ maxScale | 2 | maximal scale to detect }"
"{ scaleStep | 0.05 | scale step }"
"{ minAngle | 0 | minimal rotation angle to detect in degrees }"
"{ maxAngle | 360 | maximal rotation angle to detect in degrees }"
"{ angleStep | 1 | angle step in degrees }"
"{ maxSize | 1000 | maximal size of inner buffers }"
"{ help h ? | | print help message }"
"{ i | image | pic1.png | input image }"
"{ t | template | templ.png | template image }"
"{ s | scale | | estimate scale }"
"{ r | rotation | | estimate rotation }"
"{ | gpu | | use gpu version }"
"{ | minDist | 100 | minimum distance between the centers of the detected objects }"
"{ | levels | 360 | R-Table levels }"
"{ | votesThreshold | 30 | the accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected }"
"{ | angleThresh | 10000 | angle votes treshold }"
"{ | scaleThresh | 1000 | scale votes treshold }"
"{ | posThresh | 100 | position votes threshold }"
"{ | dp | 2 | inverse ratio of the accumulator resolution to the image resolution }"
"{ | minScale | 0.5 | minimal scale to detect }"
"{ | maxScale | 2 | maximal scale to detect }"
"{ | scaleStep | 0.05 | scale step }"
"{ | minAngle | 0 | minimal rotation angle to detect in degrees }"
"{ | maxAngle | 360 | maximal rotation angle to detect in degrees }"
"{ | angleStep | 1 | angle step in degrees }"
"{ | maxSize | 1000 | maximal size of inner buffers }"
"{ h | help | | print help message }"
);
cmd.about("This program demonstrates arbitary object finding with the Generalized Hough transform.");
//cmd.about("This program demonstrates arbitary object finding with the Generalized Hough transform.");
if (cmd.has("help"))
if (cmd.get<bool>("help"))
{
cmd.printMessage();
cmd.printParams();
return 0;
}
const string templName = cmd.get<string>("template");
const string imageName = cmd.get<string>("image");
const bool estimateScale = cmd.has("scale");
const bool estimateRotation = cmd.has("rotation");
const bool useGpu = cmd.has("gpu");
const bool estimateScale = cmd.get<bool>("scale");
const bool estimateRotation = cmd.get<bool>("rotation");
const bool useGpu = cmd.get<bool>("gpu");
const double minDist = cmd.get<double>("minDist");
const int levels = cmd.get<int>("levels");
const int votesThreshold = cmd.get<int>("votesThreshold");
......@@ -76,12 +76,6 @@ int main(int argc, const char* argv[])
const double angleStep = cmd.get<double>("angleStep");
const int maxSize = cmd.get<int>("maxSize");
if (!cmd.check())
{
cmd.printErrors();
return -1;
}
Mat templ = loadImage(templName);
Mat image = loadImage(imageName);
......
......@@ -364,7 +364,7 @@ TEST(BruteForceMatcher)
// Init GPU matcher
gpu::BFMatcher_GPU d_matcher(NORM_L2);
gpu::BruteForceMatcher_GPU_base d_matcher(gpu::BruteForceMatcher_GPU_base::L2Dist);
gpu::GpuMat d_query(query);
gpu::GpuMat d_train(train);
......
......@@ -57,7 +57,7 @@ int main(int argc, char* argv[])
cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
// matching descriptors
BFMatcher_GPU matcher(NORM_L2);
gpu::BruteForceMatcher_GPU_base matcher(gpu::BruteForceMatcher_GPU_base::L2Dist);
GpuMat trainIdx, distance;
matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);
......@@ -69,7 +69,7 @@ int main(int argc, char* argv[])
surf.downloadKeypoints(keypoints2GPU, keypoints2);
surf.downloadDescriptors(descriptors1GPU, descriptors1);
surf.downloadDescriptors(descriptors2GPU, descriptors2);
BFMatcher_GPU::matchDownload(trainIdx, distance, matches);
BruteForceMatcher_GPU_base::matchDownload(trainIdx, distance, matches);
// drawing the results
Mat img_matches;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册