提交 49f6dad1 编写于 作者: A Andrey Kamaev

Move cv::KeyPoint and cv::DMatch to core

上级 8eff34ee
ocv_define_module(contrib opencv_imgproc opencv_calib3d opencv_features2d opencv_ml opencv_video opencv_objdetect OPTIONAL opencv_highgui)
ocv_define_module(contrib opencv_imgproc opencv_calib3d opencv_ml opencv_video opencv_objdetect OPTIONAL opencv_highgui)
......@@ -826,6 +826,96 @@ public:
int start, end;
};
/////////////////////////////// KeyPoint ////////////////////////////////
/*!
The Keypoint Class
The class instance stores a keypoint, i.e. a point feature found by one of many available keypoint detectors, such as
Harris corner detector, cv::FAST, cv::StarDetector, cv::SURF, cv::SIFT, cv::LDetector etc.
The keypoint is characterized by the 2D position, scale
(proportional to the diameter of the neighborhood that needs to be taken into account),
orientation and some other parameters. The keypoint neighborhood is then analyzed by another algorithm that builds a descriptor
(usually represented as a feature vector). The keypoints representing the same object in different images can then be matched using
cv::KDTree or another method.
*/
class CV_EXPORTS_W_SIMPLE KeyPoint
{
public:
//! the default constructor
CV_WRAP KeyPoint();
//! the full constructor
KeyPoint(Point2f _pt, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1);
//! another form of the full constructor
CV_WRAP KeyPoint(float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1);
size_t hash() const;
//! converts vector of keypoints to vector of points
static void convert(const std::vector<KeyPoint>& keypoints,
CV_OUT std::vector<Point2f>& points2f,
const std::vector<int>& keypointIndexes=std::vector<int>());
//! converts vector of points to the vector of keypoints, where each keypoint is assigned the same size and the same orientation
static void convert(const std::vector<Point2f>& points2f,
CV_OUT std::vector<KeyPoint>& keypoints,
float size=1, float response=1, int octave=0, int class_id=-1);
//! computes overlap for pair of keypoints;
//! overlap is a ratio between area of keypoint regions intersection and
//! area of keypoint regions union (now keypoint region is circle)
static float overlap(const KeyPoint& kp1, const KeyPoint& kp2);
CV_PROP_RW Point2f pt; //!< coordinates of the keypoints
CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood
CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable);
//!< it's in [0,360) degrees and measured relative to
//!< image coordinate system, ie in clockwise.
CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling
CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted
CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to)
};
inline KeyPoint::KeyPoint() : pt(0,0), size(0), angle(-1), response(0), octave(0), class_id(-1) {}
inline KeyPoint::KeyPoint(Point2f _pt, float _size, float _angle, float _response, int _octave, int _class_id)
: pt(_pt), size(_size), angle(_angle), response(_response), octave(_octave), class_id(_class_id) {}
inline KeyPoint::KeyPoint(float x, float y, float _size, float _angle, float _response, int _octave, int _class_id)
: pt(x, y), size(_size), angle(_angle), response(_response), octave(_octave), class_id(_class_id) {}
//////////////////////////////// DMatch /////////////////////////////////
/*
* Struct for matching: query descriptor index, train descriptor index, train image index and distance between descriptors.
*/
struct CV_EXPORTS_W_SIMPLE DMatch
{
CV_WRAP DMatch();
CV_WRAP DMatch(int _queryIdx, int _trainIdx, float _distance);
CV_WRAP DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance);
CV_PROP_RW int queryIdx; // query descriptor index
CV_PROP_RW int trainIdx; // train descriptor index
CV_PROP_RW int imgIdx; // train image index
CV_PROP_RW float distance;
// less is better
bool operator<(const DMatch &m) const;
};
inline DMatch::DMatch() : queryIdx(-1), trainIdx(-1), imgIdx(-1), distance(FLT_MAX) {}
inline DMatch::DMatch(int _queryIdx, int _trainIdx, float _distance)
: queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1), distance(_distance) {}
inline DMatch::DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance)
: queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx), distance(_distance) {}
inline bool DMatch::operator<(const DMatch &m) const { return distance < m.distance; }
/////////////////////////////// DataType ////////////////////////////////
/*!
......
......@@ -2947,6 +2947,9 @@ static inline void read(const FileNode& node, String& value, const String& defau
CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() );
CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() );
CV_EXPORTS void read(const FileNode& node, std::vector<KeyPoint>& keypoints);
CV_EXPORTS void write(FileStorage& fs, const String& objname, const std::vector<KeyPoint>& keypoints);
inline FileNode::operator int() const
{
int value;
......
......@@ -5522,6 +5522,37 @@ void read( const FileNode& node, SparseMat& mat, const SparseMat& default_mat )
SparseMat(m).copyTo(mat);
}
void write(FileStorage& fs, const String& objname, const std::vector<KeyPoint>& keypoints)
{
WriteStructContext ws(fs, objname, CV_NODE_SEQ + CV_NODE_FLOW);
int i, npoints = (int)keypoints.size();
for( i = 0; i < npoints; i++ )
{
const KeyPoint& kpt = keypoints[i];
cv::write(fs, kpt.pt.x);
cv::write(fs, kpt.pt.y);
cv::write(fs, kpt.size);
cv::write(fs, kpt.angle);
cv::write(fs, kpt.response);
cv::write(fs, kpt.octave);
cv::write(fs, kpt.class_id);
}
}
void read(const FileNode& node, std::vector<KeyPoint>& keypoints)
{
keypoints.resize(0);
FileNodeIterator it = node.begin(), it_end = node.end();
for( ; it != it_end; )
{
KeyPoint kpt;
it >> kpt.pt.x >> kpt.pt.y >> kpt.size >> kpt.angle >> kpt.response >> kpt.octave >> kpt.class_id;
keypoints.push_back(kpt);
}
}
}
/* End of file. */
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
size_t KeyPoint::hash() const
{
size_t _Val = 2166136261U, scale = 16777619U;
Cv32suf u;
u.f = pt.x; _Val = (scale * _Val) ^ u.u;
u.f = pt.y; _Val = (scale * _Val) ^ u.u;
u.f = size; _Val = (scale * _Val) ^ u.u;
u.f = angle; _Val = (scale * _Val) ^ u.u;
u.f = response; _Val = (scale * _Val) ^ u.u;
_Val = (scale * _Val) ^ ((size_t) octave);
_Val = (scale * _Val) ^ ((size_t) class_id);
return _Val;
}
void KeyPoint::convert(const std::vector<KeyPoint>& keypoints, std::vector<Point2f>& points2f,
const std::vector<int>& keypointIndexes)
{
if( keypointIndexes.empty() )
{
points2f.resize( keypoints.size() );
for( size_t i = 0; i < keypoints.size(); i++ )
points2f[i] = keypoints[i].pt;
}
else
{
points2f.resize( keypointIndexes.size() );
for( size_t i = 0; i < keypointIndexes.size(); i++ )
{
int idx = keypointIndexes[i];
if( idx >= 0 )
points2f[i] = keypoints[idx].pt;
else
{
CV_Error( CV_StsBadArg, "keypointIndexes has element < 0. TODO: process this case" );
//points2f[i] = Point2f(-1, -1);
}
}
}
}
void KeyPoint::convert( const std::vector<Point2f>& points2f, std::vector<KeyPoint>& keypoints,
float size, float response, int octave, int class_id )
{
keypoints.resize(points2f.size());
for( size_t i = 0; i < points2f.size(); i++ )
keypoints[i] = KeyPoint(points2f[i], size, -1, response, octave, class_id);
}
float KeyPoint::overlap( const KeyPoint& kp1, const KeyPoint& kp2 )
{
float a = kp1.size * 0.5f;
float b = kp2.size * 0.5f;
float a_2 = a * a;
float b_2 = b * b;
Point2f p1 = kp1.pt;
Point2f p2 = kp2.pt;
float c = (float)norm( p1 - p2 );
float ovrl = 0.f;
// one circle is completely encovered by the other => no intersection points!
if( std::min( a, b ) + c <= std::max( a, b ) )
return std::min( a_2, b_2 ) / std::max( a_2, b_2 );
if( c < a + b ) // circles intersect
{
float c_2 = c * c;
float cosAlpha = ( b_2 + c_2 - a_2 ) / ( kp2.size * c );
float cosBeta = ( a_2 + c_2 - b_2 ) / ( kp1.size * c );
float alpha = acos( cosAlpha );
float beta = acos( cosBeta );
float sinAlpha = sin(alpha);
float sinBeta = sin(beta);
float segmentAreaA = a_2 * beta;
float segmentAreaB = b_2 * alpha;
float triangleAreaA = a_2 * sinBeta * cosBeta;
float triangleAreaB = b_2 * sinAlpha * cosAlpha;
float intersectionArea = segmentAreaA + segmentAreaB - triangleAreaA - triangleAreaB;
float unionArea = (a_2 + b_2) * (float)CV_PI - intersectionArea;
ovrl = intersectionArea / unionArea;
}
return ovrl;
}
} // cv
\ No newline at end of file
......@@ -54,64 +54,10 @@ namespace cv
CV_EXPORTS bool initModule_features2d();
/*!
The Keypoint Class
The class instance stores a keypoint, i.e. a point feature found by one of many available keypoint detectors, such as
Harris corner detector, cv::FAST, cv::StarDetector, cv::SURF, cv::SIFT, cv::LDetector etc.
The keypoint is characterized by the 2D position, scale
(proportional to the diameter of the neighborhood that needs to be taken into account),
orientation and some other parameters. The keypoint neighborhood is then analyzed by another algorithm that builds a descriptor
(usually represented as a feature vector). The keypoints representing the same object in different images can then be matched using
cv::KDTree or another method.
*/
class CV_EXPORTS_W_SIMPLE KeyPoint
{
public:
//! the default constructor
CV_WRAP KeyPoint() : pt(0,0), size(0), angle(-1), response(0), octave(0), class_id(-1) {}
//! the full constructor
KeyPoint(Point2f _pt, float _size, float _angle=-1,
float _response=0, int _octave=0, int _class_id=-1)
: pt(_pt), size(_size), angle(_angle),
response(_response), octave(_octave), class_id(_class_id) {}
//! another form of the full constructor
CV_WRAP KeyPoint(float x, float y, float _size, float _angle=-1,
float _response=0, int _octave=0, int _class_id=-1)
: pt(x, y), size(_size), angle(_angle),
response(_response), octave(_octave), class_id(_class_id) {}
size_t hash() const;
//! converts vector of keypoints to vector of points
static void convert(const std::vector<KeyPoint>& keypoints,
CV_OUT std::vector<Point2f>& points2f,
const std::vector<int>& keypointIndexes=std::vector<int>());
//! converts vector of points to the vector of keypoints, where each keypoint is assigned the same size and the same orientation
static void convert(const std::vector<Point2f>& points2f,
CV_OUT std::vector<KeyPoint>& keypoints,
float size=1, float response=1, int octave=0, int class_id=-1);
//! computes overlap for pair of keypoints;
//! overlap is a ratio between area of keypoint regions intersection and
//! area of keypoint regions union (now keypoint region is circle)
static float overlap(const KeyPoint& kp1, const KeyPoint& kp2);
CV_PROP_RW Point2f pt; //!< coordinates of the keypoints
CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood
CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable);
//!< it's in [0,360) degrees and measured relative to
//!< image coordinate system, ie in clockwise.
CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling
CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted
CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to)
};
//! writes vector of keypoints to the file storage
CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints);
//! reads vector of keypoints from the specified file storage node
CV_EXPORTS void read(const FileNode& node, CV_OUT std::vector<KeyPoint>& keypoints);
// //! writes vector of keypoints to the file storage
// CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints);
// //! reads vector of keypoints from the specified file storage node
// CV_EXPORTS void read(const FileNode& node, CV_OUT std::vector<KeyPoint>& keypoints);
/*
* A class filters a vector of keypoints.
......@@ -1028,33 +974,6 @@ template<int cellsize> struct CV_EXPORTS HammingMultilevel
}
};
/****************************************************************************************\
* DMatch *
\****************************************************************************************/
/*
* Struct for matching: query descriptor index, train descriptor index, train image index and distance between descriptors.
*/
struct CV_EXPORTS_W_SIMPLE DMatch
{
CV_WRAP DMatch() : queryIdx(-1), trainIdx(-1), imgIdx(-1), distance(FLT_MAX) {}
CV_WRAP DMatch( int _queryIdx, int _trainIdx, float _distance ) :
queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1), distance(_distance) {}
CV_WRAP DMatch( int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) :
queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx), distance(_distance) {}
CV_PROP_RW int queryIdx; // query descriptor index
CV_PROP_RW int trainIdx; // train descriptor index
CV_PROP_RW int imgIdx; // train image index
CV_PROP_RW float distance;
// less is better
bool operator<( const DMatch &m ) const
{
return distance < m.distance;
}
};
/****************************************************************************************\
* DescriptorMatcher *
\****************************************************************************************/
......
......@@ -44,129 +44,6 @@
namespace cv
{
size_t KeyPoint::hash() const
{
size_t _Val = 2166136261U, scale = 16777619U;
Cv32suf u;
u.f = pt.x; _Val = (scale * _Val) ^ u.u;
u.f = pt.y; _Val = (scale * _Val) ^ u.u;
u.f = size; _Val = (scale * _Val) ^ u.u;
u.f = angle; _Val = (scale * _Val) ^ u.u;
u.f = response; _Val = (scale * _Val) ^ u.u;
_Val = (scale * _Val) ^ ((size_t) octave);
_Val = (scale * _Val) ^ ((size_t) class_id);
return _Val;
}
void write(FileStorage& fs, const String& objname, const std::vector<KeyPoint>& keypoints)
{
WriteStructContext ws(fs, objname, CV_NODE_SEQ + CV_NODE_FLOW);
int i, npoints = (int)keypoints.size();
for( i = 0; i < npoints; i++ )
{
const KeyPoint& kpt = keypoints[i];
write(fs, kpt.pt.x);
write(fs, kpt.pt.y);
write(fs, kpt.size);
write(fs, kpt.angle);
write(fs, kpt.response);
write(fs, kpt.octave);
write(fs, kpt.class_id);
}
}
void read(const FileNode& node, std::vector<KeyPoint>& keypoints)
{
keypoints.resize(0);
FileNodeIterator it = node.begin(), it_end = node.end();
for( ; it != it_end; )
{
KeyPoint kpt;
it >> kpt.pt.x >> kpt.pt.y >> kpt.size >> kpt.angle >> kpt.response >> kpt.octave >> kpt.class_id;
keypoints.push_back(kpt);
}
}
void KeyPoint::convert(const std::vector<KeyPoint>& keypoints, std::vector<Point2f>& points2f,
const std::vector<int>& keypointIndexes)
{
if( keypointIndexes.empty() )
{
points2f.resize( keypoints.size() );
for( size_t i = 0; i < keypoints.size(); i++ )
points2f[i] = keypoints[i].pt;
}
else
{
points2f.resize( keypointIndexes.size() );
for( size_t i = 0; i < keypointIndexes.size(); i++ )
{
int idx = keypointIndexes[i];
if( idx >= 0 )
points2f[i] = keypoints[idx].pt;
else
{
CV_Error( CV_StsBadArg, "keypointIndexes has element < 0. TODO: process this case" );
//points2f[i] = Point2f(-1, -1);
}
}
}
}
void KeyPoint::convert( const std::vector<Point2f>& points2f, std::vector<KeyPoint>& keypoints,
float size, float response, int octave, int class_id )
{
keypoints.resize(points2f.size());
for( size_t i = 0; i < points2f.size(); i++ )
keypoints[i] = KeyPoint(points2f[i], size, -1, response, octave, class_id);
}
float KeyPoint::overlap( const KeyPoint& kp1, const KeyPoint& kp2 )
{
float a = kp1.size * 0.5f;
float b = kp2.size * 0.5f;
float a_2 = a * a;
float b_2 = b * b;
Point2f p1 = kp1.pt;
Point2f p2 = kp2.pt;
float c = (float)norm( p1 - p2 );
float ovrl = 0.f;
// one circle is completely encovered by the other => no intersection points!
if( std::min( a, b ) + c <= std::max( a, b ) )
return std::min( a_2, b_2 ) / std::max( a_2, b_2 );
if( c < a + b ) // circles intersect
{
float c_2 = c * c;
float cosAlpha = ( b_2 + c_2 - a_2 ) / ( kp2.size * c );
float cosBeta = ( a_2 + c_2 - b_2 ) / ( kp1.size * c );
float alpha = acos( cosAlpha );
float beta = acos( cosBeta );
float sinAlpha = sin(alpha);
float sinBeta = sin(beta);
float segmentAreaA = a_2 * beta;
float segmentAreaB = b_2 * alpha;
float triangleAreaA = a_2 * sinBeta * cosBeta;
float triangleAreaB = b_2 * sinAlpha * cosAlpha;
float intersectionArea = segmentAreaA + segmentAreaB - triangleAreaA - triangleAreaB;
float unionArea = (a_2 + b_2) * (float)CV_PI - intersectionArea;
ovrl = intersectionArea / unionArea;
}
return ovrl;
}
struct KeypointResponseGreaterThanThreshold
{
KeypointResponseGreaterThanThreshold(float _value) :
......
......@@ -47,8 +47,8 @@
#include "opencv2/core/types_c.h"
#include "opencv2/core.hpp"
#include "opencv2/flann/flann_base.hpp"
#include "opencv2/flann/miniflann.hpp"
#include "opencv2/flann/flann_base.hpp"
namespace cvflann
{
......
......@@ -19,8 +19,8 @@ import org.opencv.core.Point3;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.features2d.DMatch;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.DMatch;
import org.opencv.core.KeyPoint;
import org.opencv.highgui.Highgui;
import android.util.Log;
......
package org.opencv.test.features2d;
package org.opencv.test.core;
import org.opencv.features2d.DMatch;
import org.opencv.core.DMatch;
import junit.framework.TestCase;
......
package org.opencv.test.features2d;
package org.opencv.test.core;
import org.opencv.core.Point;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
public class KeyPointTest extends OpenCVTestCase {
......
......@@ -7,7 +7,7 @@ import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -11,11 +11,11 @@ import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DMatch;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -10,7 +10,7 @@ import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DMatch;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
......
......@@ -10,7 +10,7 @@ import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DMatch;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
......
......@@ -10,11 +10,11 @@ import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DMatch;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -10,11 +10,11 @@ import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DMatch;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -9,7 +9,7 @@ import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -12,12 +12,12 @@ import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.Range;
import org.opencv.features2d.DMatch;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.Features2d;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.highgui.Highgui;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -11,11 +11,11 @@ import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DMatch;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -7,7 +7,7 @@ import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -7,7 +7,7 @@ import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -9,7 +9,7 @@ import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -7,7 +7,7 @@ import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -13,7 +13,7 @@ import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
......
......@@ -5,8 +5,8 @@ import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Point3;
import org.opencv.core.Rect;
import org.opencv.features2d.DMatch;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.DMatch;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.utils.Converters;
......
......@@ -10,7 +10,7 @@ except:
class_ignore_list = (
#core
"FileNode", "FileStorage", "KDTree",
"FileNode", "FileStorage", "KDTree", "KeyPoint", "DMatch",
#highgui
"VideoWriter", "VideoCapture",
)
......
package org.opencv.features2d;
package org.opencv.core;
import org.opencv.core.Point;
......
......@@ -3,7 +3,7 @@ package org.opencv.core;
import java.util.Arrays;
import java.util.List;
import org.opencv.features2d.DMatch;
import org.opencv.core.DMatch;
public class MatOfDMatch extends Mat {
// 32FC4
......
......@@ -3,7 +3,7 @@ package org.opencv.core;
import java.util.Arrays;
import java.util.List;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.KeyPoint;
public class MatOfKeyPoint extends Mat {
// 32FC7
......
......@@ -14,8 +14,8 @@ import org.opencv.core.MatOfPoint3f;
import org.opencv.core.Point;
import org.opencv.core.Point3;
import org.opencv.core.Rect;
import org.opencv.features2d.DMatch;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.DMatch;
import org.opencv.core.KeyPoint;
public class Converters {
......
......@@ -23,8 +23,8 @@ import org.opencv.core.Point3;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.features2d.DMatch;
import org.opencv.features2d.KeyPoint;
import org.opencv.core.DMatch;
import org.opencv.core.KeyPoint;
import org.opencv.highgui.Highgui;
public class OpenCVTestCase extends TestCase {
......
......@@ -3,5 +3,5 @@ if(NOT HAVE_OPENCL)
endif()
set(the_description "OpenCL-accelerated Computer Vision")
ocv_define_module(ocl opencv_core opencv_imgproc opencv_features2d opencv_objdetect opencv_video)
ocv_define_module(ocl opencv_core opencv_imgproc opencv_objdetect opencv_video)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow)
......@@ -50,7 +50,7 @@
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/features2d.hpp"
//#include "opencv2/features2d.hpp"
namespace cv
{
......
......@@ -16,7 +16,7 @@ endif()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
ocv_add_module(ts opencv_core opencv_features2d)
ocv_add_module(ts opencv_core opencv_imgproc opencv_highgui)
ocv_glob_module_sources()
ocv_module_include_directories()
......
......@@ -2,7 +2,6 @@
#define __OPENCV_TS_PERF_HPP__
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "ts_gtest.h"
#ifdef HAVE_TBB
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册