提交 a3bde36c 编写于 作者: R Roman Donchenko

Merge remote-tracking branch 'origin/2.4' into merge-2.4

Conflicts:
	modules/calib3d/include/opencv2/calib3d/calib3d.hpp
	modules/contrib/doc/facerec/facerec_api.rst
	modules/contrib/include/opencv2/contrib/contrib.hpp
	modules/contrib/src/facerec.cpp
	modules/core/include/opencv2/core/mat.hpp
	modules/features2d/include/opencv2/features2d/features2d.hpp
	modules/highgui/src/loadsave.cpp
	modules/imgproc/src/pyramids.cpp
	modules/ocl/include/opencv2/ocl/cl_runtime/cl_runtime.hpp
	modules/python/src2/gen.py
	modules/python/test/test.py
	modules/superres/test/test_superres.cpp
	samples/cpp/facerec_demo.cpp
......@@ -5,7 +5,7 @@
Introduction into Android Development
*************************************
This guide was designed to help you in learning Android development basics and seting up your
This guide was designed to help you in learning Android development basics and setting up your
working environment quickly. It was written with Windows 7 in mind, though it would work with Linux
(Ubuntu), Mac OS X and any other OS supported by Android SDK.
......
此差异由.gitattributes 抑制。
......@@ -46,6 +46,7 @@
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/core/affine.hpp"
namespace cv
{
......@@ -411,6 +412,66 @@ CV_EXPORTS_W Ptr<StereoSGBM> createStereoSGBM(int minDisparity, int numDispariti
int speckleWindowSize = 0, int speckleRange = 0,
int mode = StereoSGBM::MODE_SGBM);
namespace fisheye
{
enum{
CALIB_USE_INTRINSIC_GUESS = 1,
CALIB_RECOMPUTE_EXTRINSIC = 2,
CALIB_CHECK_COND = 4,
CALIB_FIX_SKEW = 8,
CALIB_FIX_K1 = 16,
CALIB_FIX_K2 = 32,
CALIB_FIX_K3 = 64,
CALIB_FIX_K4 = 128,
CALIB_FIX_INTRINSIC = 256
};
//! projects 3D points using fisheye model
CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
//! projects points using fisheye model
CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
//! distorts 2D points using fisheye model
CV_EXPORTS void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
//! undistorts 2D points using fisheye model
CV_EXPORTS void undistortPoints(InputArray distorted, OutputArray undistorted,
InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray());
//! computing undistortion and rectification maps for image transform by cv::remap()
//! If D is empty zero distortion is used, if R or P is empty identity matrixes are used
CV_EXPORTS void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,
const cv::Size& size, int m1type, OutputArray map1, OutputArray map2);
//! undistorts image, optionally changes resolution and camera matrix. If Knew zero identity matrix is used
CV_EXPORTS void undistortImage(InputArray distorted, OutputArray undistorted,
InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());
//! estimates new camera matrix for undistortion or rectification
CV_EXPORTS void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0);
//! performs camera calibaration
CV_EXPORTS double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
//! stereo rectification estimation
CV_EXPORTS void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,
OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),
double balance = 0.0, double fov_scale = 1.0);
//! performs stereo calibaration
CV_EXPORTS double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
OutputArray R, OutputArray T, int flags = CALIB_FIX_INTRINSIC,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
}
} // cv
#endif
此差异已折叠。
#ifndef FISHEYE_INTERNAL_H
#define FISHEYE_INTERNAL_H
#include "precomp.hpp"
namespace cv { namespace internal {
struct CV_EXPORTS IntrinsicParams
{
Vec2d f;
Vec2d c;
Vec4d k;
double alpha;
std::vector<int> isEstimate;
IntrinsicParams();
IntrinsicParams(Vec2d f, Vec2d c, Vec4d k, double alpha = 0);
IntrinsicParams operator+(const Mat& a);
IntrinsicParams& operator =(const Mat& a);
void Init(const cv::Vec2d& f, const cv::Vec2d& c, const cv::Vec4d& k = Vec4d(0,0,0,0), const double& alpha = 0);
};
void projectPoints(cv::InputArray objectPoints, cv::OutputArray imagePoints,
cv::InputArray _rvec,cv::InputArray _tvec,
const IntrinsicParams& param, cv::OutputArray jacobian);
void ComputeExtrinsicRefine(const Mat& imagePoints, const Mat& objectPoints, Mat& rvec,
Mat& tvec, Mat& J, const int MaxIter,
const IntrinsicParams& param, const double thresh_cond);
CV_EXPORTS Mat ComputeHomography(Mat m, Mat M);
CV_EXPORTS Mat NormalizePixels(const Mat& imagePoints, const IntrinsicParams& param);
void InitExtrinsics(const Mat& _imagePoints, const Mat& _objectPoints, const IntrinsicParams& param, Mat& omckk, Mat& Tckk);
void CalibrateExtrinsics(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
const IntrinsicParams& param, const int check_cond,
const double thresh_cond, InputOutputArray omc, InputOutputArray Tc);
void ComputeJacobians(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
const IntrinsicParams& param, InputArray omc, InputArray Tc,
const int& check_cond, const double& thresh_cond, Mat& JJ2_inv, Mat& ex3);
CV_EXPORTS void EstimateUncertainties(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
const IntrinsicParams& params, InputArray omc, InputArray Tc,
IntrinsicParams& errors, Vec2d& std_err, double thresh_cond, int check_cond, double& rms);
void dAB(cv::InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB);
void JRodriguesMatlab(const Mat& src, Mat& dst);
void compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2,
Mat& om3, Mat& T3, Mat& dom3dom1, Mat& dom3dT1, Mat& dom3dom2,
Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2);
double median(const Mat& row);
Vec3d median3d(InputArray m);
}}
#endif
此差异已折叠。
......@@ -1122,7 +1122,7 @@ CV_INLINE CvSetElem* cvSetNew( CvSet* set_header )
set_header->active_count++;
}
else
cvSetAdd( set_header, NULL, (CvSetElem**)&elem );
cvSetAdd( set_header, NULL, &elem );
return elem;
}
......
......@@ -643,7 +643,8 @@ inline void Mat::release()
deallocate();
u = NULL;
data = datastart = dataend = datalimit = 0;
size.p[0] = 0;
for(int i = 0; i < dims; i++)
size.p[i] = 0;
}
inline
......@@ -2733,7 +2734,7 @@ SparseMatConstIterator_<_Tp>& SparseMatConstIterator_<_Tp>::operator ++()
template<typename _Tp> inline
SparseMatConstIterator_<_Tp> SparseMatConstIterator_<_Tp>::operator ++(int)
{
SparseMatConstIterator it = *this;
SparseMatConstIterator_<_Tp> it = *this;
SparseMatConstIterator::operator ++();
return it;
}
......@@ -2785,7 +2786,7 @@ SparseMatIterator_<_Tp>& SparseMatIterator_<_Tp>::operator ++()
template<typename _Tp> inline
SparseMatIterator_<_Tp> SparseMatIterator_<_Tp>::operator ++(int)
{
SparseMatIterator it = *this;
SparseMatIterator_<_Tp> it = *this;
SparseMatConstIterator::operator ++();
return it;
}
......
......@@ -548,7 +548,7 @@ CV_INLINE void cvmSet( CvMat* mat, int row, int col, double value )
else
{
assert( type == CV_64FC1 );
((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = (double)value;
((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = value;
}
}
......
......@@ -1543,17 +1543,17 @@ CV_EXPORTS void evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& im
/*
* Abstract base class for training of a 'bag of visual words' vocabulary from a set of descriptors
*/
class CV_EXPORTS BOWTrainer
class CV_EXPORTS_W BOWTrainer
{
public:
BOWTrainer();
virtual ~BOWTrainer();
void add( const Mat& descriptors );
const std::vector<Mat>& getDescriptors() const;
int descriptorsCount() const;
CV_WRAP void add( const Mat& descriptors );
CV_WRAP const std::vector<Mat>& getDescriptors() const;
CV_WRAP int descriptorsCount() const;
virtual void clear();
CV_WRAP virtual void clear();
/*
* Train visual words vocabulary, that is cluster training descriptors and
......@@ -1562,8 +1562,8 @@ public:
*
* descriptors Training descriptors computed on images keypoints.
*/
virtual Mat cluster() const = 0;
virtual Mat cluster( const Mat& descriptors ) const = 0;
CV_WRAP virtual Mat cluster() const = 0;
CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0;
protected:
std::vector<Mat> descriptors;
......@@ -1573,16 +1573,16 @@ protected:
/*
* This is BOWTrainer using cv::kmeans to get vocabulary.
*/
class CV_EXPORTS BOWKMeansTrainer : public BOWTrainer
class CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer
{
public:
BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(),
CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(),
int attempts=3, int flags=KMEANS_PP_CENTERS );
virtual ~BOWKMeansTrainer();
// Returns trained vocabulary (i.e. cluster centers).
virtual Mat cluster() const;
virtual Mat cluster( const Mat& descriptors ) const;
CV_WRAP virtual Mat cluster() const;
CV_WRAP virtual Mat cluster( const Mat& descriptors ) const;
protected:
......@@ -1595,24 +1595,27 @@ protected:
/*
* Class to compute image descriptor using bag of visual words.
*/
class CV_EXPORTS BOWImgDescriptorExtractor
class CV_EXPORTS_W BOWImgDescriptorExtractor
{
public:
BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor,
CV_WRAP BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor,
const Ptr<DescriptorMatcher>& dmatcher );
BOWImgDescriptorExtractor( const Ptr<DescriptorMatcher>& dmatcher );
virtual ~BOWImgDescriptorExtractor();
void setVocabulary( const Mat& vocabulary );
const Mat& getVocabulary() const;
CV_WRAP void setVocabulary( const Mat& vocabulary );
CV_WRAP const Mat& getVocabulary() const;
void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor,
std::vector<std::vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 );
void compute( InputArray keypointDescriptors, OutputArray imgDescriptor,
std::vector<std::vector<int> >* pointIdxsOfClusters=0 );
// compute() is not constant because DescriptorMatcher::match is not constant
int descriptorSize() const;
int descriptorType() const;
CV_WRAP_AS(compute) void compute2( const Mat& image, std::vector<KeyPoint>& keypoints, CV_OUT Mat& imgDescriptor )
{ compute(image,keypoints,imgDescriptor); }
CV_WRAP int descriptorSize() const;
CV_WRAP int descriptorType() const;
protected:
Mat vocabulary;
......
......@@ -837,6 +837,66 @@ typename Distance::ResultType ensureSquareDistance( typename Distance::ResultTyp
return dummy( dist );
}
/*
* ...and a template to ensure the user that he will process the normal distance,
* and not squared distance, without loosing processing time calling sqrt(ensureSquareDistance)
* that will result in doing actually sqrt(dist*dist) for L1 distance for instance.
*/
template <typename Distance, typename ElementType>
struct simpleDistance
{
typedef typename Distance::ResultType ResultType;
ResultType operator()( ResultType dist ) { return dist; }
};
template <typename ElementType>
struct simpleDistance<L2_Simple<ElementType>, ElementType>
{
typedef typename L2_Simple<ElementType>::ResultType ResultType;
ResultType operator()( ResultType dist ) { return sqrt(dist); }
};
template <typename ElementType>
struct simpleDistance<L2<ElementType>, ElementType>
{
typedef typename L2<ElementType>::ResultType ResultType;
ResultType operator()( ResultType dist ) { return sqrt(dist); }
};
template <typename ElementType>
struct simpleDistance<MinkowskiDistance<ElementType>, ElementType>
{
typedef typename MinkowskiDistance<ElementType>::ResultType ResultType;
ResultType operator()( ResultType dist ) { return sqrt(dist); }
};
template <typename ElementType>
struct simpleDistance<HellingerDistance<ElementType>, ElementType>
{
typedef typename HellingerDistance<ElementType>::ResultType ResultType;
ResultType operator()( ResultType dist ) { return sqrt(dist); }
};
template <typename ElementType>
struct simpleDistance<ChiSquareDistance<ElementType>, ElementType>
{
typedef typename ChiSquareDistance<ElementType>::ResultType ResultType;
ResultType operator()( ResultType dist ) { return sqrt(dist); }
};
template <typename Distance>
typename Distance::ResultType ensureSimpleDistance( typename Distance::ResultType dist )
{
typedef typename Distance::ElementType ElementType;
simpleDistance<Distance, ElementType> dummy;
return dummy( dist );
}
}
#endif //OPENCV_FLANN_DIST_H_
......@@ -109,10 +109,22 @@ public:
*/
void buildIndex()
{
std::vector<size_t> indices(feature_size_ * CHAR_BIT);
tables_.resize(table_number_);
for (unsigned int i = 0; i < table_number_; ++i) {
//re-initialize the random indices table that the LshTable will use to pick its sub-dimensions
if( (indices.size() == feature_size_ * CHAR_BIT) || (indices.size() < key_size_) )
{
indices.resize( feature_size_ * CHAR_BIT );
for (size_t j = 0; j < feature_size_ * CHAR_BIT; ++j)
indices[j] = j;
std::random_shuffle(indices.begin(), indices.end());
}
lsh::LshTable<ElementType>& table = tables_[i];
table = lsh::LshTable<ElementType>(feature_size_, key_size_);
table = lsh::LshTable<ElementType>(feature_size_, key_size_, indices);
// Add the features to the table
table.add(dataset_);
......
......@@ -153,7 +153,7 @@ public:
* @param feature_size is the size of the feature (considered as a ElementType[])
* @param key_size is the number of bits that are turned on in the feature
*/
LshTable(unsigned int /*feature_size*/, unsigned int /*key_size*/)
LshTable(unsigned int /*feature_size*/, unsigned int /*key_size*/, std::vector<size_t> & /*indices*/)
{
std::cerr << "LSH is not implemented for that type" << std::endl;
assert(0);
......@@ -339,20 +339,20 @@ private:
// Specialization for unsigned char
template<>
inline LshTable<unsigned char>::LshTable(unsigned int feature_size, unsigned int subsignature_size)
inline LshTable<unsigned char>::LshTable( unsigned int feature_size,
unsigned int subsignature_size,
std::vector<size_t> & indices )
{
initialize(subsignature_size);
// Allocate the mask
mask_ = std::vector<size_t>((size_t)ceil((float)(feature_size * sizeof(char)) / (float)sizeof(size_t)), 0);
// A bit brutal but fast to code
std::vector<size_t> indices(feature_size * CHAR_BIT);
for (size_t i = 0; i < feature_size * CHAR_BIT; ++i) indices[i] = i;
std::random_shuffle(indices.begin(), indices.end());
// Generate a random set of order of subsignature_size_ bits
for (unsigned int i = 0; i < key_size_; ++i) {
size_t index = indices[i];
//Ensure the Nth bit will be selected only once among the different LshTables
//to avoid having two different tables with signatures sharing many dimensions/many bits
size_t index = indices[0];
indices.erase( indices.begin() );
// Set that bit in the mask
size_t divisor = CHAR_BIT * sizeof(size_t);
......
......@@ -158,12 +158,12 @@ the symptoms were damaged image and 'Corrupt JPEG data: premature end of data se
11th patch: Apr 13, 2010, Filipe Almeida filipe.almeida@ist.utl.pt
- Tries to setup all properties first through v4l2_ioctl call.
- Allows seting up all Video4Linux properties through cvSetCaptureProperty instead of only CV_CAP_PROP_BRIGHTNESS, CV_CAP_PROP_CONTRAST, CV_CAP_PROP_SATURATION, CV_CAP_PROP_HUE, CV_CAP_PROP_GAIN and CV_CAP_PROP_EXPOSURE.
- Allows setting up all Video4Linux properties through cvSetCaptureProperty instead of only CV_CAP_PROP_BRIGHTNESS, CV_CAP_PROP_CONTRAST, CV_CAP_PROP_SATURATION, CV_CAP_PROP_HUE, CV_CAP_PROP_GAIN and CV_CAP_PROP_EXPOSURE.
12th patch: Apr 16, 2010, Filipe Almeida filipe.almeida@ist.utl.pt
- CvCaptureCAM_V4L structure cleanup (no longer needs <PROPERTY>_{min,max,} variables)
- Introduction of v4l2_ctrl_range - minimum and maximum allowed values for v4l controls
- Allows seting up all Video4Linux properties through cvSetCaptureProperty using input values between 0.0 and 1.0
- Allows setting up all Video4Linux properties through cvSetCaptureProperty using input values between 0.0 and 1.0
- Gets v4l properties first through v4l2_ioctl call (ignores capture->is_v4l2_device)
- cvGetCaptureProperty adjusted to support the changes
- Returns device properties to initial values after device closes
......
......@@ -138,9 +138,9 @@ static ImageDecoder findDecoder( const Mat& buf )
maxlen = std::max(maxlen, len);
}
String signature(maxlen, ' ');
size_t bufSize = buf.rows*buf.cols*buf.elemSize();
maxlen = std::min(maxlen, bufSize);
String signature(maxlen, ' ');
memcpy( (void*)signature.c_str(), buf.data, maxlen );
for( i = 0; i < codecs.decoders.size(); i++ )
......
......@@ -504,6 +504,8 @@ Constructs the Gaussian pyramid for an image.
:param maxlevel: 0-based index of the last (the smallest) pyramid layer. It must be non-negative.
:param borderType: Pixel extrapolation method (BORDER_CONSTANT don't supported). See :ocv:func:`borderInterpolate` for details.
The function constructs a vector of images and builds the Gaussian pyramid by recursively applying
:ocv:func:`pyrDown` to the previously built pyramid layers, starting from ``dst[0]==src`` .
......@@ -1256,12 +1258,16 @@ Blurs an image and downsamples it.
:param dst: output image; it has the specified size and the same type as ``src``.
:param dstsize: size of the output image; by default, it is computed as ``Size((src.cols+1)/2, (src.rows+1)/2)``, but in any case, the following conditions should be satisfied:
:param dstsize: size of the output image.
.. math::
:param borderType: Pixel extrapolation method (BORDER_CONSTANT don't supported). See :ocv:func:`borderInterpolate` for details.
By default, size of the output image is computed as ``Size((src.cols+1)/2, (src.rows+1)/2)``, but in any case, the following conditions should be satisfied:
\begin{array}{l}
| \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}
.. math::
\begin{array}{l}
| \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}
The function performs the downsampling step of the Gaussian pyramid construction. First, it convolves the source image with the kernel:
......@@ -1271,8 +1277,6 @@ The function performs the downsampling step of the Gaussian pyramid construction
Then, it downsamples the image by rejecting even rows and columns.
pyrUp
-----
Upsamples an image and then blurs it.
......@@ -1287,12 +1291,16 @@ Upsamples an image and then blurs it.
:param dst: output image. It has the specified size and the same type as ``src`` .
:param dstsize: size of the output image; by default, it is computed as ``Size(src.cols*2, (src.rows*2)``, but in any case, the following conditions should be satisfied:
:param dstsize: size of the output image.
.. math::
:param borderType: Pixel extrapolation method (only BORDER_DEFAULT supported). See :ocv:func:`borderInterpolate` for details.
By default, size of the output image is computed as ``Size(src.cols*2, (src.rows*2)``, but in any case, the following conditions should be satisfied:
.. math::
\begin{array}{l}
| \texttt{dstsize.width} -src.cols*2| \leq ( \texttt{dstsize.width} \mod 2) \\ | \texttt{dstsize.height} -src.rows*2| \leq ( \texttt{dstsize.height} \mod 2) \end{array}
\begin{array}{l}
| \texttt{dstsize.width} -src.cols*2| \leq ( \texttt{dstsize.width} \mod 2) \\ | \texttt{dstsize.height} -src.rows*2| \leq ( \texttt{dstsize.height} \mod 2) \end{array}
The function performs the upsampling step of the Gaussian pyramid construction, though it can actually be used to construct the Laplacian pyramid. First, it upsamples the source image by injecting even zero rows and columns and then convolves the result with the same kernel as in
:ocv:func:`pyrDown` multiplied by 4.
......
......@@ -502,6 +502,8 @@ static bool ocl_pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int
void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType )
{
CV_Assert(borderType != BORDER_CONSTANT);
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_pyrDown(_src, _dst, _dsz, borderType))
......@@ -571,6 +573,8 @@ void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borde
void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType )
{
CV_Assert(borderType == BORDER_DEFAULT);
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_pyrUp(_src, _dst, _dsz, borderType))
......@@ -640,6 +644,8 @@ void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderT
void cv::buildPyramid( InputArray _src, OutputArrayOfArrays _dst, int maxlevel, int borderType )
{
CV_Assert(borderType != BORDER_CONSTANT);
if (_src.dims() <= 2 && _dst.isUMatVector())
{
UMat src = _src.getUMat();
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
// Authors:
// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com
//
// This workaround code was taken from PCL library(www.pointclouds.org)
//
//M*/
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
// Authors:
// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com
//
// This workaround code was taken from PCL library(www.pointclouds.org)
//
// Modified by Jasper Shemilt to work with VTK 6.2
// The fix was needed because GetCocoaServer has been moved from
// vtkCocoaRenderWindowInteractor to vtkCocoaRenderWindow in VTK 6.2.
// This alteration to VTK happened almost a year ago according to the gitHub
// commit a3e9fc9.
//
//M*/
#import <Cocoa/Cocoa.h>
#include <vtkCocoaRenderWindow.h>
......@@ -118,14 +124,14 @@
[application stop:application];
NSEvent *event = [NSEvent otherEventWithType:NSApplicationDefined
location:NSMakePoint(0.0,0.0)
modifierFlags:0
timestamp:0
windowNumber:-1
context:nil
subtype:0
data1:0
data2:0];
location:NSMakePoint(0.0,0.0)
modifierFlags:0
timestamp:0
windowNumber:-1
context:nil
subtype:0
data1:0
data2:0];
[application postEvent:event atStart:YES];
}
......@@ -154,30 +160,121 @@
//----------------------------------------------------------------------------
#if VTK_MAJOR_VERSION >= 6 && VTK_MINOR_VERSION >=2
namespace cv { namespace viz
{
class vtkCocoaRenderWindowInteractorFix : public vtkCocoaRenderWindowInteractor
{
public:
static vtkCocoaRenderWindowInteractorFix *New ();
vtkTypeMacro (vtkCocoaRenderWindowInteractorFix, vtkCocoaRenderWindowInteractor)
virtual void Start ();
virtual void TerminateApp ();
protected:
vtkCocoaRenderWindowInteractorFix () {}
~vtkCocoaRenderWindowInteractorFix () {}
private:
vtkCocoaRenderWindowInteractorFix (const vtkCocoaRenderWindowInteractorFix&); // Not implemented.
void operator = (const vtkCocoaRenderWindowInteractorFix&); // Not implemented.
};
vtkStandardNewMacro (vtkCocoaRenderWindowInteractorFix)
vtkSmartPointer<vtkRenderWindowInteractor> vtkCocoaRenderWindowInteractorNew();
class vtkCocoaRenderWindowFix : public vtkCocoaRenderWindow
{
public:
static vtkCocoaRenderWindowFix *New ();
vtkTypeMacro ( vtkCocoaRenderWindowFix, vtkCocoaRenderWindow)
virtual vtkCocoaServerFix * GetCocoaServer ();
virtual void SetCocoaServer (void* );
protected:
vtkCocoaRenderWindowFix () {}
~vtkCocoaRenderWindowFix () {}
private:
vtkCocoaRenderWindowFix (const vtkCocoaRenderWindowInteractorFix&); // Not implemented.
void operator = (const vtkCocoaRenderWindowFix&); // Not implemented.
};
vtkStandardNewMacro (vtkCocoaRenderWindowFix)
vtkSmartPointer<vtkRenderWindow> vtkCocoaRenderWindowNew();
}}
vtkCocoaServerFix * cv::viz::vtkCocoaRenderWindowFix::GetCocoaServer ()
{
return reinterpret_cast<vtkCocoaServerFix*> (this->GetCocoaServer ());
}
void cv::viz::vtkCocoaRenderWindowFix::SetCocoaServer (void* server)
{
class vtkCocoaRenderWindowInteractorFix : public vtkCocoaRenderWindowInteractor
this->SetCocoaServer (server);
}
void cv::viz::vtkCocoaRenderWindowInteractorFix::Start ()
{
vtkCocoaRenderWindowFix* renWin = vtkCocoaRenderWindowFix::SafeDownCast(this->GetRenderWindow ());
if (renWin != NULL)
{
public:
static vtkCocoaRenderWindowInteractorFix *New ();
vtkTypeMacro (vtkCocoaRenderWindowInteractorFix, vtkCocoaRenderWindowInteractor)
vtkCocoaServerFix *server = reinterpret_cast<vtkCocoaServerFix*> (renWin->GetCocoaServer ());
if (!renWin->GetCocoaServer ())
{
server = [vtkCocoaServerFix cocoaServerWithRenderWindow:renWin];
renWin->SetCocoaServer (reinterpret_cast<void*> (server));
}
[server start];
}
}
void cv::viz::vtkCocoaRenderWindowInteractorFix::TerminateApp ()
{
vtkCocoaRenderWindowFix *renWin = vtkCocoaRenderWindowFix::SafeDownCast (this->RenderWindow);
if (renWin)
{
vtkCocoaServerFix *server = reinterpret_cast<vtkCocoaServerFix*> (renWin->GetCocoaServer ());
[server stop];
}
}
vtkSmartPointer<vtkRenderWindowInteractor> cv::viz::vtkCocoaRenderWindowInteractorNew()
{
return vtkSmartPointer<vtkCocoaRenderWindowInteractorFix>::New();
}
#else
namespace cv { namespace viz
{
class vtkCocoaRenderWindowInteractorFix : public vtkCocoaRenderWindowInteractor
{
public:
static vtkCocoaRenderWindowInteractorFix *New ();
vtkTypeMacro (vtkCocoaRenderWindowInteractorFix, vtkCocoaRenderWindowInteractor)
virtual void Start ();
virtual void TerminateApp ();
virtual void Start ();
virtual void TerminateApp ();
protected:
vtkCocoaRenderWindowInteractorFix () {}
~vtkCocoaRenderWindowInteractorFix () {}
protected:
vtkCocoaRenderWindowInteractorFix () {}
~vtkCocoaRenderWindowInteractorFix () {}
private:
vtkCocoaRenderWindowInteractorFix (const vtkCocoaRenderWindowInteractorFix&); // Not implemented.
void operator = (const vtkCocoaRenderWindowInteractorFix&); // Not implemented.
};
private:
vtkCocoaRenderWindowInteractorFix (const vtkCocoaRenderWindowInteractorFix&); // Not implemented.
void operator = (const vtkCocoaRenderWindowInteractorFix&); // Not implemented.
};
vtkStandardNewMacro (vtkCocoaRenderWindowInteractorFix)
vtkStandardNewMacro (vtkCocoaRenderWindowInteractorFix)
vtkSmartPointer<vtkRenderWindowInteractor> vtkCocoaRenderWindowInteractorNew();
}}
vtkSmartPointer<vtkRenderWindowInteractor> vtkCocoaRenderWindowInteractorNew();
}}
void cv::viz::vtkCocoaRenderWindowInteractorFix::Start ()
{
......@@ -209,3 +306,5 @@ vtkSmartPointer<vtkRenderWindowInteractor> cv::viz::vtkCocoaRenderWindowInteract
{
return vtkSmartPointer<vtkCocoaRenderWindowInteractorFix>::New();
}
#endif
......@@ -56,7 +56,7 @@ public class Puzzle15Activity extends Activity implements CvCameraViewListener,
super.onCreate(savedInstanceState);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
Log.d(TAG, "Creating and seting view");
Log.d(TAG, "Creating and setting view");
mOpenCvCameraView = (CameraBridgeViewBase) new JavaCameraView(this, -1);
setContentView(mOpenCvCameraView);
mOpenCvCameraView.setCvCameraViewListener(this);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册