提交 02d34bda 编写于 作者: A Alexey Spizhevoy

Refactored videostab module

上级 79e20706
...@@ -57,8 +57,6 @@ ...@@ -57,8 +57,6 @@
#include "opencv2/gpu/gpu.hpp" #include "opencv2/gpu/gpu.hpp"
#endif #endif
// TODO remove code duplications (in cpp too)
namespace cv namespace cv
{ {
namespace videostab namespace videostab
...@@ -73,10 +71,65 @@ CV_EXPORTS Mat estimateGlobalMotionRobust( ...@@ -73,10 +71,65 @@ CV_EXPORTS Mat estimateGlobalMotionRobust(
const RansacParams &params = RansacParams::default2dMotion(MM_AFFINE), const RansacParams &params = RansacParams::default2dMotion(MM_AFFINE),
float *rmse = 0, int *ninliers = 0); float *rmse = 0, int *ninliers = 0);
class CV_EXPORTS GlobalMotionEstimatorBase class CV_EXPORTS MotionEstimatorBase
{ {
public: public:
virtual ~GlobalMotionEstimatorBase() {} virtual ~MotionEstimatorBase() {}
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
virtual MotionModel motionModel() const { return motionModel_; }
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) = 0;
protected:
MotionEstimatorBase(MotionModel model) { setMotionModel(model); }
private:
MotionModel motionModel_;
};
class CV_EXPORTS MotionEstimatorRansacL2 : public MotionEstimatorBase
{
public:
MotionEstimatorRansacL2(MotionModel model = MM_AFFINE);
void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
RansacParams ransacParams() const { return ransacParams_; }
void setMinInlierRatio(float val) { minInlierRatio_ = val; }
float minInlierRatio() const { return minInlierRatio_; }
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0);
private:
RansacParams ransacParams_;
float minInlierRatio_;
};
class CV_EXPORTS MotionEstimatorL1 : public MotionEstimatorBase
{
public:
MotionEstimatorL1(MotionModel model = MM_AFFINE);
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok);
private:
std::vector<double> obj_, collb_, colub_;
std::vector<double> elems_, rowlb_, rowub_;
std::vector<int> rows_, cols_;
void set(int row, int col, double coef)
{
rows_.push_back(row);
cols_.push_back(col);
elems_.push_back(coef);
}
};
class CV_EXPORTS ImageMotionEstimatorBase
{
public:
virtual ~ImageMotionEstimatorBase() {}
virtual void setMotionModel(MotionModel val) { motionModel_ = val; } virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
virtual MotionModel motionModel() const { return motionModel_; } virtual MotionModel motionModel() const { return motionModel_; }
...@@ -84,12 +137,13 @@ public: ...@@ -84,12 +137,13 @@ public:
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) = 0; virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) = 0;
protected: protected:
GlobalMotionEstimatorBase(MotionModel model) { setMotionModel(model); } ImageMotionEstimatorBase(MotionModel model) { setMotionModel(model); }
private:
MotionModel motionModel_; MotionModel motionModel_;
}; };
class CV_EXPORTS FromFileMotionReader : public GlobalMotionEstimatorBase class CV_EXPORTS FromFileMotionReader : public ImageMotionEstimatorBase
{ {
public: public:
FromFileMotionReader(const std::string &path); FromFileMotionReader(const std::string &path);
...@@ -100,50 +154,45 @@ private: ...@@ -100,50 +154,45 @@ private:
std::ifstream file_; std::ifstream file_;
}; };
class CV_EXPORTS ToFileMotionWriter : public GlobalMotionEstimatorBase class CV_EXPORTS ToFileMotionWriter : public ImageMotionEstimatorBase
{ {
public: public:
ToFileMotionWriter(const std::string &path, Ptr<GlobalMotionEstimatorBase> estimator); ToFileMotionWriter(const std::string &path, Ptr<ImageMotionEstimatorBase> estimator);
virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }
virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0); virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
private: private:
std::ofstream file_; std::ofstream file_;
Ptr<GlobalMotionEstimatorBase> estimator_; Ptr<ImageMotionEstimatorBase> motionEstimator_;
}; };
class CV_EXPORTS RansacMotionEstimator : public GlobalMotionEstimatorBase class CV_EXPORTS KeypointBasedMotionEstimator : public ImageMotionEstimatorBase
{ {
public: public:
RansacMotionEstimator(MotionModel model = MM_AFFINE); KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator);
virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }
virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; } void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
Ptr<FeatureDetector> detector() const { return detector_; } Ptr<FeatureDetector> detector() const { return detector_; }
void setOptFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; } void setOpticalFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
Ptr<ISparseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; } Ptr<ISparseOptFlowEstimator> opticalFlowEstimator() const { return optFlowEstimator_; }
void setGridSize(Size val) { gridSize_ = val; }
Size gridSize() const { return gridSize_; }
void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
RansacParams ransacParams() const { return ransacParams_; }
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; } void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; } Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
void setMinInlierRatio(float val) { minInlierRatio_ = val; }
float minInlierRatio() const { return minInlierRatio_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0); virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
private: private:
Ptr<MotionEstimatorBase> motionEstimator_;
Ptr<FeatureDetector> detector_; Ptr<FeatureDetector> detector_;
Ptr<ISparseOptFlowEstimator> optFlowEstimator_; Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
Size gridSize_;
RansacParams ransacParams_;
Ptr<IOutlierRejector> outlierRejector_; Ptr<IOutlierRejector> outlierRejector_;
float minInlierRatio_;
std::vector<uchar> status_; std::vector<uchar> status_;
std::vector<KeyPoint> keypointsPrev_; std::vector<KeyPoint> keypointsPrev_;
...@@ -152,29 +201,25 @@ private: ...@@ -152,29 +201,25 @@ private:
}; };
#if HAVE_OPENCV_GPU #if HAVE_OPENCV_GPU
class CV_EXPORTS RansacMotionEstimatorGpu : public GlobalMotionEstimatorBase class CV_EXPORTS KeypointBasedMotionEstimatorGpu : public ImageMotionEstimatorBase
{ {
public: public:
RansacMotionEstimatorGpu(MotionModel model = MM_AFFINE); KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator);
void setRansacParams(const RansacParams &val) { ransacParams_ = val; } virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }
RansacParams ransacParams() const { return ransacParams_; } virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; } void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; } Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
void setMinInlierRatio(float val) { minInlierRatio_ = val; }
float minInlierRatio() const { return minInlierRatio_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0); virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
Mat estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok = 0); Mat estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok = 0);
private: private:
Ptr<MotionEstimatorBase> motionEstimator_;
gpu::GoodFeaturesToTrackDetector_GPU detector_; gpu::GoodFeaturesToTrackDetector_GPU detector_;
SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_; SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_;
RansacParams ransacParams_;
Ptr<IOutlierRejector> outlierRejector_; Ptr<IOutlierRejector> outlierRejector_;
float minInlierRatio_;
gpu::GpuMat frame0_, grayFrame0_, frame1_; gpu::GpuMat frame0_, grayFrame0_, frame1_;
gpu::GpuMat pointsPrev_, points_; gpu::GpuMat pointsPrev_, points_;
...@@ -186,44 +231,6 @@ private: ...@@ -186,44 +231,6 @@ private:
}; };
#endif #endif
class CV_EXPORTS LpBasedMotionEstimator : public GlobalMotionEstimatorBase
{
public:
LpBasedMotionEstimator(MotionModel model = MM_AFFINE);
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
Ptr<FeatureDetector> detector() const { return detector_; }
void setOptFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
Ptr<ISparseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok);
private:
Ptr<FeatureDetector> detector_;
Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
Ptr<IOutlierRejector> outlierRejector_;
std::vector<uchar> status_;
std::vector<KeyPoint> keypointsPrev_;
std::vector<Point2f> pointsPrev_, points_;
std::vector<Point2f> pointsPrevGood_, pointsGood_;
std::vector<double> obj_, collb_, colub_;
std::vector<int> rows_, cols_;
std::vector<double> elems_, rowlb_, rowub_;
void set(int row, int col, double coef)
{
rows_.push_back(row);
cols_.push_back(col);
elems_.push_back(coef);
}
};
CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions); CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions);
} // namespace videostab } // namespace videostab
......
...@@ -74,8 +74,8 @@ public: ...@@ -74,8 +74,8 @@ public:
void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; } void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; }
Ptr<IFrameSource> frameSource() const { return frameSource_; } Ptr<IFrameSource> frameSource() const { return frameSource_; }
void setMotionEstimator(Ptr<GlobalMotionEstimatorBase> val) { motionEstimator_ = val; } void setMotionEstimator(Ptr<ImageMotionEstimatorBase> val) { motionEstimator_ = val; }
Ptr<GlobalMotionEstimatorBase> motionEstimator() const { return motionEstimator_; } Ptr<ImageMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }
void setDeblurer(Ptr<DeblurerBase> val) { deblurer_ = val; } void setDeblurer(Ptr<DeblurerBase> val) { deblurer_ = val; }
Ptr<DeblurerBase> deblurrer() const { return deblurer_; } Ptr<DeblurerBase> deblurrer() const { return deblurer_; }
...@@ -107,7 +107,7 @@ protected: ...@@ -107,7 +107,7 @@ protected:
Ptr<ILog> log_; Ptr<ILog> log_;
Ptr<IFrameSource> frameSource_; Ptr<IFrameSource> frameSource_;
Ptr<GlobalMotionEstimatorBase> motionEstimator_; Ptr<ImageMotionEstimatorBase> motionEstimator_;
Ptr<DeblurerBase> deblurer_; Ptr<DeblurerBase> deblurer_;
Ptr<InpainterBase> inpainter_; Ptr<InpainterBase> inpainter_;
int radius_; int radius_;
......
...@@ -64,8 +64,8 @@ public: ...@@ -64,8 +64,8 @@ public:
virtual ~WobbleSuppressorBase() {} virtual ~WobbleSuppressorBase() {}
void setMotionEstimator(Ptr<GlobalMotionEstimatorBase> val) { motionEstimator_ = val; } void setMotionEstimator(Ptr<ImageMotionEstimatorBase> val) { motionEstimator_ = val; }
Ptr<GlobalMotionEstimatorBase> motionEstimator() const { return motionEstimator_; } Ptr<ImageMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }
virtual void suppress(int idx, const Mat &frame, Mat &result) = 0; virtual void suppress(int idx, const Mat &frame, Mat &result) = 0;
...@@ -85,7 +85,7 @@ public: ...@@ -85,7 +85,7 @@ public:
virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; } virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; }
protected: protected:
Ptr<GlobalMotionEstimatorBase> motionEstimator_; Ptr<ImageMotionEstimatorBase> motionEstimator_;
int frameCount_; int frameCount_;
const std::vector<Mat> *motions_; const std::vector<Mat> *motions_;
const std::vector<Mat> *motions2_; const std::vector<Mat> *motions2_;
......
...@@ -404,8 +404,183 @@ Mat estimateGlobalMotionRobust( ...@@ -404,8 +404,183 @@ Mat estimateGlobalMotionRobust(
} }
MotionEstimatorRansacL2::MotionEstimatorRansacL2(MotionModel model)
: MotionEstimatorBase(model)
{
setRansacParams(RansacParams::default2dMotion(model));
setMinInlierRatio(0.1f);
}
Mat MotionEstimatorRansacL2::estimate(InputArray points0, InputArray points1, bool *ok)
{
CV_Assert(points0.type() == points1.type());
const int npoints = points0.getMat().checkVector(2);
CV_Assert(points1.getMat().checkVector(2) == npoints);
// find motion
int ninliers = 0;
Mat_<float> M;
if (motionModel() != MM_HOMOGRAPHY)
M = estimateGlobalMotionRobust(
points0, points1, motionModel(), ransacParams_, 0, &ninliers);
else
{
vector<uchar> mask;
M = findHomography(points0, points1, mask, CV_RANSAC, ransacParams_.thresh);
for (int i = 0; i < npoints; ++i)
if (mask[i]) ninliers++;
}
// check if we're confident enough in estimated motion
if (ok) *ok = true;
if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
{
M = Mat::eye(3, 3, CV_32F);
if (ok) *ok = false;
}
return M;
}
MotionEstimatorL1::MotionEstimatorL1(MotionModel model)
: MotionEstimatorBase(model)
{
}
// TODO will estimation of all motions as one LP problem be faster?
Mat MotionEstimatorL1::estimate(InputArray points0, InputArray points1, bool *ok)
{
CV_Assert(points0.type() == points1.type());
const int npoints = points0.getMat().checkVector(2);
CV_Assert(points1.getMat().checkVector(2) == npoints);
const Point2f *points0_ = points0.getMat().ptr<Point2f>();
const Point2f *points1_ = points1.getMat().ptr<Point2f>();
#ifndef HAVE_CLP
CV_Error(CV_StsError, "The library is built without Clp support");
if (ok) *ok = false;
return Mat::eye(3, 3, CV_32F);
#else
CV_Assert(motionModel() <= MM_AFFINE && motionModel() != MM_RIGID);
// prepare LP problem
int ncols = 6 + 2*npoints;
int nrows = 4*npoints;
if (motionModel() == MM_SIMILARITY)
nrows += 2;
else if (motionModel() == MM_TRANSLATION_AND_SCALE)
nrows += 3;
else if (motionModel() == MM_TRANSLATION)
nrows += 4;
rows_.clear();
cols_.clear();
elems_.clear();
obj_.assign(ncols, 0);
collb_.assign(ncols, -INF);
colub_.assign(ncols, INF);
int c = 6;
for (int i = 0; i < npoints; ++i, c += 2)
{
obj_[c] = 1;
collb_[c] = 0;
obj_[c+1] = 1;
collb_[c+1] = 0;
}
elems_.clear();
rowlb_.assign(nrows, -INF);
rowub_.assign(nrows, INF);
int r = 0;
Point2f p0, p1;
for (int i = 0; i < npoints; ++i, r += 4)
{
p0 = points0_[i];
p1 = points1_[i];
set(r, 0, p0.x); set(r, 1, p0.y); set(r, 2, 1); set(r, 6+2*i, -1);
rowub_[r] = p1.x;
set(r+1, 3, p0.x); set(r+1, 4, p0.y); set(r+1, 5, 1); set(r+1, 6+2*i+1, -1);
rowub_[r+1] = p1.y;
set(r+2, 0, p0.x); set(r+2, 1, p0.y); set(r+2, 2, 1); set(r+2, 6+2*i, 1);
rowlb_[r+2] = p1.x;
set(r+3, 3, p0.x); set(r+3, 4, p0.y); set(r+3, 5, 1); set(r+3, 6+2*i+1, 1);
rowlb_[r+3] = p1.y;
}
if (motionModel() == MM_SIMILARITY)
{
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
set(r+1, 1, 1); set(r+1, 3, 1); rowlb_[r+1] = rowub_[r+1] = 0;
}
else if (motionModel() == MM_TRANSLATION_AND_SCALE)
{
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
}
else if (motionModel() == MM_TRANSLATION)
{
set(r, 0, 1); rowlb_[r] = rowub_[r] = 1;
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
set(r+3, 4, 1); rowlb_[r+3] = rowub_[r+3] = 1;
}
// solve
CoinPackedMatrix A(true, &rows_[0], &cols_[0], &elems_[0], elems_.size());
A.setDimensions(nrows, ncols);
ClpSimplex model(false);
model.loadProblem(A, &collb_[0], &colub_[0], &obj_[0], &rowlb_[0], &rowub_[0]);
ClpDualRowSteepest dualSteep(1);
model.setDualRowPivotAlgorithm(dualSteep);
model.scaling(1);
model.dual();
// extract motion
const double *sol = model.getColSolution();
Mat_<float> M = Mat::eye(3, 3, CV_32F);
M(0,0) = sol[0];
M(0,1) = sol[1];
M(0,2) = sol[2];
M(1,0) = sol[3];
M(1,1) = sol[4];
M(1,2) = sol[5];
if (ok) *ok = true;
return M;
#endif
}
FromFileMotionReader::FromFileMotionReader(const string &path) FromFileMotionReader::FromFileMotionReader(const string &path)
: GlobalMotionEstimatorBase(MM_UNKNOWN) : ImageMotionEstimatorBase(MM_UNKNOWN)
{ {
file_.open(path.c_str()); file_.open(path.c_str());
CV_Assert(file_.is_open()); CV_Assert(file_.is_open());
...@@ -424,19 +599,18 @@ Mat FromFileMotionReader::estimate(const Mat &/*frame0*/, const Mat &/*frame1*/, ...@@ -424,19 +599,18 @@ Mat FromFileMotionReader::estimate(const Mat &/*frame0*/, const Mat &/*frame1*/,
} }
ToFileMotionWriter::ToFileMotionWriter(const string &path, Ptr<GlobalMotionEstimatorBase> estimator) ToFileMotionWriter::ToFileMotionWriter(const string &path, Ptr<ImageMotionEstimatorBase> estimator)
: GlobalMotionEstimatorBase(estimator->motionModel()) : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{ {
file_.open(path.c_str()); file_.open(path.c_str());
CV_Assert(file_.is_open()); CV_Assert(file_.is_open());
estimator_ = estimator;
} }
Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok) Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{ {
bool ok_; bool ok_;
Mat_<float> M = estimator_->estimate(frame0, frame1, &ok_); Mat_<float> M = motionEstimator_->estimate(frame0, frame1, &ok_);
file_ << M(0,0) << " " << M(0,1) << " " << M(0,2) << " " file_ << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " " << M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << " " << ok_ << endl; << M(2,0) << " " << M(2,1) << " " << M(2,2) << " " << ok_ << endl;
...@@ -445,43 +619,26 @@ Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok) ...@@ -445,43 +619,26 @@ Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
} }
RansacMotionEstimator::RansacMotionEstimator(MotionModel model) KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
: GlobalMotionEstimatorBase(model) : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{ {
setDetector(new GoodFeaturesToTrackDetector()); setDetector(new GoodFeaturesToTrackDetector());
setOptFlowEstimator(new SparsePyrLkOptFlowEstimator()); setOpticalFlowEstimator(new SparsePyrLkOptFlowEstimator());
setGridSize(Size(0,0));
setRansacParams(RansacParams::default2dMotion(model));
setOutlierRejector(new NullOutlierRejector()); setOutlierRejector(new NullOutlierRejector());
setMinInlierRatio(0.1f);
} }
Mat RansacMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok) Mat KeypointBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{ {
// find keypoints // find keypoints
detector_->detect(frame0, keypointsPrev_); detector_->detect(frame0, keypointsPrev_);
// add extra keypoints
if (gridSize_.width > 0 && gridSize_.height > 0)
{
float dx = static_cast<float>(frame0.cols) / (gridSize_.width + 1);
float dy = static_cast<float>(frame0.rows) / (gridSize_.height + 1);
for (int x = 0; x < gridSize_.width; ++x)
for (int y = 0; y < gridSize_.height; ++y)
keypointsPrev_.push_back(KeyPoint((x+1)*dx, (y+1)*dy, 0.f));
}
// extract points from keypoints // extract points from keypoints
pointsPrev_.resize(keypointsPrev_.size()); pointsPrev_.resize(keypointsPrev_.size());
for (size_t i = 0; i < keypointsPrev_.size(); ++i) for (size_t i = 0; i < keypointsPrev_.size(); ++i)
pointsPrev_[i] = keypointsPrev_[i].pt; pointsPrev_[i] = keypointsPrev_[i].pt;
// find correspondences // find correspondences
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray()); optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
// leave good correspondences only // leave good correspondences only
...@@ -498,7 +655,7 @@ Mat RansacMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool * ...@@ -498,7 +655,7 @@ Mat RansacMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *
} }
} }
// perfrom outlier rejection // perform outlier rejection
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_); IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector)) if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
...@@ -508,8 +665,11 @@ Mat RansacMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool * ...@@ -508,8 +665,11 @@ Mat RansacMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *
outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_); outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size()); pointsPrevGood_.clear();
pointsGood_.clear(); pointsGood_.reserve(points_.size()); pointsPrevGood_.reserve(points_.size());
pointsGood_.clear();
pointsGood_.reserve(points_.size());
for (size_t i = 0; i < points_.size(); ++i) for (size_t i = 0; i < points_.size(); ++i)
{ {
...@@ -521,49 +681,21 @@ Mat RansacMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool * ...@@ -521,49 +681,21 @@ Mat RansacMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *
} }
} }
size_t npoints = pointsGood_.size(); // estimate motion
return motionEstimator_->estimate(pointsPrevGood_, pointsGood_, ok);
// find motion
int ninliers = 0;
Mat_<float> M;
if (motionModel_ != MM_HOMOGRAPHY)
M = estimateGlobalMotionRobust(
pointsPrevGood_, pointsGood_, motionModel_, ransacParams_, 0, &ninliers);
else
{
vector<uchar> mask;
M = findHomography(pointsPrevGood_, pointsGood_, mask, CV_RANSAC, ransacParams_.thresh);
for (size_t i = 0; i < npoints; ++i)
if (mask[i]) ninliers++;
}
// check if we're confident enough in estimated motion
if (ok) *ok = true;
if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
{
M = Mat::eye(3, 3, CV_32F);
if (ok) *ok = false;
}
return M;
} }
#if HAVE_OPENCV_GPU #if HAVE_OPENCV_GPU
RansacMotionEstimatorGpu::RansacMotionEstimatorGpu(MotionModel model) KeypointBasedMotionEstimatorGpu::KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator)
: GlobalMotionEstimatorBase(model) : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{ {
CV_Assert(gpu::getCudaEnabledDeviceCount() > 0); CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
setRansacParams(RansacParams::default2dMotion(model));
setOutlierRejector(new NullOutlierRejector()); setOutlierRejector(new NullOutlierRejector());
setMinInlierRatio(0.1f);
} }
Mat RansacMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok) Mat KeypointBasedMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{ {
frame0_.upload(frame0); frame0_.upload(frame0);
frame1_.upload(frame1); frame1_.upload(frame1);
...@@ -571,7 +703,7 @@ Mat RansacMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, boo ...@@ -571,7 +703,7 @@ Mat RansacMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, boo
} }
Mat RansacMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok) Mat KeypointBasedMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok)
{ {
// convert frame to gray if it's color // convert frame to gray if it's color
...@@ -585,29 +717,29 @@ Mat RansacMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::Gpu ...@@ -585,29 +717,29 @@ Mat RansacMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::Gpu
} }
// find keypoints // find keypoints
detector_(grayFrame0, pointsPrev_); detector_(grayFrame0, pointsPrev_);
// find correspondences // find correspondences
optFlowEstimator_.run(frame0, frame1, pointsPrev_, points_, status_); optFlowEstimator_.run(frame0, frame1, pointsPrev_, points_, status_);
// leave good correspondences only // leave good correspondences only
gpu::compactPoints(pointsPrev_, points_, status_); gpu::compactPoints(pointsPrev_, points_, status_);
pointsPrev_.download(hostPointsPrev_); pointsPrev_.download(hostPointsPrev_);
points_.download(hostPoints_); points_.download(hostPoints_);
// perfrom outlier rejection // perform outlier rejection
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_); IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector)) if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
{ {
outlierRejector_->process(frame0.size(), hostPointsPrev_, hostPoints_, rejectionStatus_); outlierRejector_->process(frame0.size(), hostPointsPrev_, hostPoints_, rejectionStatus_);
hostPointsPrevTmp_.clear(); hostPointsPrevTmp_.reserve(hostPoints_.cols); hostPointsPrevTmp_.clear();
hostPointsTmp_.clear(); hostPointsTmp_.reserve(hostPoints_.cols); hostPointsPrevTmp_.reserve(hostPoints_.cols);
hostPointsTmp_.clear();
hostPointsTmp_.reserve(hostPoints_.cols);
for (int i = 0; i < hostPoints_.cols; ++i) for (int i = 0; i < hostPoints_.cols; ++i)
{ {
...@@ -622,216 +754,10 @@ Mat RansacMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::Gpu ...@@ -622,216 +754,10 @@ Mat RansacMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::Gpu
hostPoints_ = Mat(1, hostPointsTmp_.size(), CV_32FC2, &hostPointsTmp_[0]); hostPoints_ = Mat(1, hostPointsTmp_.size(), CV_32FC2, &hostPointsTmp_[0]);
} }
// find motion // estimate motion
return motionEstimator_->estimate(hostPointsPrev_, hostPoints_, ok);
int npoints = hostPoints_.cols;
int ninliers = 0;
Mat_<float> M;
if (motionModel_ != MM_HOMOGRAPHY)
M = estimateGlobalMotionRobust(
hostPointsPrev_, hostPoints_, motionModel_, ransacParams_, 0, &ninliers);
else
{
vector<uchar> mask;
M = findHomography(hostPointsPrev_, hostPoints_, mask, CV_RANSAC, ransacParams_.thresh);
for (int i = 0; i < npoints; ++i)
if (mask[i]) ninliers++;
}
// check if we're confident enough in estimated motion
if (ok) *ok = true;
if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
{
M = Mat::eye(3, 3, CV_32F);
if (ok) *ok = false;
}
return M;
}
#endif // #if HAVE_OPENCV_GPU
LpBasedMotionEstimator::LpBasedMotionEstimator(MotionModel model)
: GlobalMotionEstimatorBase(model)
{
setDetector(new GoodFeaturesToTrackDetector());
setOptFlowEstimator(new SparsePyrLkOptFlowEstimator());
setOutlierRejector(new NullOutlierRejector());
}
// TODO will estimation of all motions as one LP problem be faster?
Mat LpBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{
// find keypoints
detector_->detect(frame0, keypointsPrev_);
// extract points from keypoints
pointsPrev_.resize(keypointsPrev_.size());
for (size_t i = 0; i < keypointsPrev_.size(); ++i)
pointsPrev_[i] = keypointsPrev_[i].pt;
// find correspondences
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
// leave good correspondences only
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
pointsGood_.clear(); pointsGood_.reserve(points_.size());
for (size_t i = 0; i < points_.size(); ++i)
{
if (status_[i])
{
pointsPrevGood_.push_back(pointsPrev_[i]);
pointsGood_.push_back(points_[i]);
}
}
// perfrom outlier rejection
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
{
pointsPrev_.swap(pointsPrevGood_);
points_.swap(pointsGood_);
outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
pointsGood_.clear(); pointsGood_.reserve(points_.size());
for (size_t i = 0; i < points_.size(); ++i)
{
if (status_[i])
{
pointsPrevGood_.push_back(pointsPrev_[i]);
pointsGood_.push_back(points_[i]);
}
}
}
// prepare LP problem
#ifndef HAVE_CLP
CV_Error(CV_StsError, "The library is built without Clp support");
if (ok) *ok = false;
return Mat::eye(3, 3, CV_32F);
#else
CV_Assert(motionModel_ <= MM_AFFINE && motionModel_ != MM_RIGID);
int npoints = static_cast<int>(pointsGood_.size());
int ncols = 6 + 2*npoints;
int nrows = 4*npoints;
if (motionModel_ == MM_SIMILARITY)
nrows += 2;
else if (motionModel_ == MM_TRANSLATION_AND_SCALE)
nrows += 3;
else if (motionModel_ == MM_TRANSLATION)
nrows += 4;
rows_.clear();
cols_.clear();
elems_.clear();
obj_.assign(ncols, 0);
collb_.assign(ncols, -INF);
colub_.assign(ncols, INF);
int c = 6;
for (int i = 0; i < npoints; ++i, c += 2)
{
obj_[c] = 1;
collb_[c] = 0;
obj_[c+1] = 1;
collb_[c+1] = 0;
}
elems_.clear();
rowlb_.assign(nrows, -INF);
rowub_.assign(nrows, INF);
int r = 0;
Point2f p0, p1;
for (int i = 0; i < npoints; ++i, r += 4)
{
p0 = pointsPrevGood_[i];
p1 = pointsGood_[i];
set(r, 0, p0.x); set(r, 1, p0.y); set(r, 2, 1); set(r, 6+2*i, -1);
rowub_[r] = p1.x;
set(r+1, 3, p0.x); set(r+1, 4, p0.y); set(r+1, 5, 1); set(r+1, 6+2*i+1, -1);
rowub_[r+1] = p1.y;
set(r+2, 0, p0.x); set(r+2, 1, p0.y); set(r+2, 2, 1); set(r+2, 6+2*i, 1);
rowlb_[r+2] = p1.x;
set(r+3, 3, p0.x); set(r+3, 4, p0.y); set(r+3, 5, 1); set(r+3, 6+2*i+1, 1);
rowlb_[r+3] = p1.y;
}
if (motionModel_ == MM_SIMILARITY)
{
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
set(r+1, 1, 1); set(r+1, 3, 1); rowlb_[r+1] = rowub_[r+1] = 0;
}
else if (motionModel_ == MM_TRANSLATION_AND_SCALE)
{
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
}
else if (motionModel_ == MM_TRANSLATION)
{
set(r, 0, 1); rowlb_[r] = rowub_[r] = 1;
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
set(r+3, 4, 1); rowlb_[r+3] = rowub_[r+3] = 1;
}
// solve
CoinPackedMatrix A(true, &rows_[0], &cols_[0], &elems_[0], elems_.size());
A.setDimensions(nrows, ncols);
ClpSimplex model(false);
model.loadProblem(A, &collb_[0], &colub_[0], &obj_[0], &rowlb_[0], &rowub_[0]);
ClpDualRowSteepest dualSteep(1);
model.setDualRowPivotAlgorithm(dualSteep);
model.scaling(1);
model.dual();
// extract motion
const double *sol = model.getColSolution();
Mat_<float> M = Mat::eye(3, 3, CV_32F);
M(0,0) = sol[0];
M(0,1) = sol[1];
M(0,2) = sol[2];
M(1,0) = sol[3];
M(1,1) = sol[4];
M(1,2) = sol[5];
if (ok) *ok = true;
return M;
#endif
} }
#endif // HAVE_OPENCV_GPU
Mat getMotion(int from, int to, const vector<Mat> &motions) Mat getMotion(int from, int to, const vector<Mat> &motions)
......
...@@ -58,7 +58,7 @@ StabilizerBase::StabilizerBase() ...@@ -58,7 +58,7 @@ StabilizerBase::StabilizerBase()
{ {
setLog(new LogToStdout()); setLog(new LogToStdout());
setFrameSource(new NullFrameSource()); setFrameSource(new NullFrameSource());
setMotionEstimator(new RansacMotionEstimator()); setMotionEstimator(new KeypointBasedMotionEstimator(new MotionEstimatorRansacL2()));
setDeblurer(new NullDeblurer()); setDeblurer(new NullDeblurer());
setInpainter(new NullInpainter()); setInpainter(new NullInpainter());
setRadius(15); setRadius(15);
......
...@@ -53,9 +53,7 @@ namespace videostab ...@@ -53,9 +53,7 @@ namespace videostab
WobbleSuppressorBase::WobbleSuppressorBase() : motions_(0), stabilizationMotions_(0) WobbleSuppressorBase::WobbleSuppressorBase() : motions_(0), stabilizationMotions_(0)
{ {
RansacMotionEstimator *est = new RansacMotionEstimator(); setMotionEstimator(new KeypointBasedMotionEstimator(new MotionEstimatorRansacL2(MM_HOMOGRAPHY)));
est->setMotionModel(MM_HOMOGRAPHY);
est->setRansacParams(RansacParams::default2dMotion(MM_HOMOGRAPHY));
} }
......
...@@ -85,8 +85,6 @@ void printHelp() ...@@ -85,8 +85,6 @@ void printHelp()
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n" " Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
" --nkps=<int_number>\n" " --nkps=<int_number>\n"
" Number of keypoints to find in each frame. The default is 1000.\n" " Number of keypoints to find in each frame. The default is 1000.\n"
" --extra-kps=<int_number>\n"
" Extra keypoint grid size for motion estimation. The default is 0.\n"
" --local-outlier-rejection=(yes|no)\n" " --local-outlier-rejection=(yes|no)\n"
" Perform local outlier rejection. The default is no.\n\n" " Perform local outlier rejection. The default is no.\n\n"
" -sm, --save-motions=(<file_path>|no)\n" " -sm, --save-motions=(<file_path>|no)\n"
...@@ -154,8 +152,6 @@ void printHelp() ...@@ -154,8 +152,6 @@ void printHelp()
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n" " Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
" --ws-nkps=<int_number>\n" " --ws-nkps=<int_number>\n"
" Number of keypoints to find in each frame. The default is 1000.\n" " Number of keypoints to find in each frame. The default is 1000.\n"
" --ws-extra-kps=<int_number>\n"
" Extra keypoint grid size for motion estimation. The default is 0.\n"
" --ws-local-outlier-rejection=(yes|no)\n" " --ws-local-outlier-rejection=(yes|no)\n"
" Perform local outlier rejection. The default is no.\n\n" " Perform local outlier rejection. The default is no.\n\n"
" -sm2, --save-motions2=(<file_path>|no)\n" " -sm2, --save-motions2=(<file_path>|no)\n"
...@@ -181,24 +177,22 @@ class IMotionEstimatorBuilder ...@@ -181,24 +177,22 @@ class IMotionEstimatorBuilder
{ {
public: public:
virtual ~IMotionEstimatorBuilder() {} virtual ~IMotionEstimatorBuilder() {}
virtual Ptr<GlobalMotionEstimatorBase> build() = 0; virtual Ptr<ImageMotionEstimatorBase> build() = 0;
protected: protected:
IMotionEstimatorBuilder(CommandLineParser &cmd) : cmd(cmd) {} IMotionEstimatorBuilder(CommandLineParser &cmd) : cmd(cmd) {}
CommandLineParser cmd; CommandLineParser cmd;
}; };
class RansacMotionEstimatorBuilder : public IMotionEstimatorBuilder class MotionEstimatorRansacL2Builder : public IMotionEstimatorBuilder
{ {
public: public:
RansacMotionEstimatorBuilder(CommandLineParser &cmd, const string &prefix = "") MotionEstimatorRansacL2Builder(CommandLineParser &cmd, bool gpu, const string &prefix = "")
: IMotionEstimatorBuilder(cmd), prefix(prefix) {} : IMotionEstimatorBuilder(cmd), gpu(gpu), prefix(prefix) {}
virtual Ptr<GlobalMotionEstimatorBase> build() virtual Ptr<ImageMotionEstimatorBase> build()
{ {
RansacMotionEstimator *est = new RansacMotionEstimator(motionModel(arg(prefix + "model"))); MotionEstimatorRansacL2 *est = new MotionEstimatorRansacL2(motionModel(arg(prefix + "model")));
est->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
RansacParams ransac = est->ransacParams(); RansacParams ransac = est->ransacParams();
if (arg(prefix + "subset") != "auto") if (arg(prefix + "subset") != "auto")
...@@ -208,97 +202,76 @@ public: ...@@ -208,97 +202,76 @@ public:
ransac.eps = argf(prefix + "outlier-ratio"); ransac.eps = argf(prefix + "outlier-ratio");
est->setRansacParams(ransac); est->setRansacParams(ransac);
est->setGridSize(Size(argi(prefix + "extra-kps"), argi(prefix + "extra-kps"))); est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector(); Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
if (arg(prefix + "local-outlier-rejection") == "yes") if (arg(prefix + "local-outlier-rejection") == "yes")
{ {
TranslationBasedLocalOutlierRejector *tor = new TranslationBasedLocalOutlierRejector(); TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
RansacParams ransacParams = tor->ransacParams(); RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto") if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh"); ransacParams.thresh = argf(prefix + "thresh");
tor->setRansacParams(ransacParams); tblor->setRansacParams(ransacParams);
outlierRejector = tor; outlierRejector = tblor;
} }
est->setOutlierRejector(outlierRejector);
est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
return est;
}
private:
string prefix;
};
#if HAVE_OPENCV_GPU #if HAVE_OPENCV_GPU
class RansacMotionEstimatorBuilderGpu : public IMotionEstimatorBuilder if (gpu)
{
public:
RansacMotionEstimatorBuilderGpu(CommandLineParser &cmd, const string &prefix = "")
: IMotionEstimatorBuilder(cmd), prefix(prefix) {}
virtual Ptr<GlobalMotionEstimatorBase> build()
{
RansacMotionEstimatorGpu *est = new RansacMotionEstimatorGpu(motionModel(arg(prefix + "model")));
RansacParams ransac = est->ransacParams();
if (arg(prefix + "subset") != "auto")
ransac.size = argi(prefix + "subset");
if (arg(prefix + "thresh") != "auto")
ransac.thresh = argi(prefix + "thresh");
ransac.eps = argf(prefix + "outlier-ratio");
est->setRansacParams(ransac);
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
if (arg(prefix + "local-outlier-rejection") == "yes")
{ {
TranslationBasedLocalOutlierRejector *tor = new TranslationBasedLocalOutlierRejector(); KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
RansacParams ransacParams = tor->ransacParams(); kbest->setOutlierRejector(outlierRejector);
if (arg(prefix + "thresh") != "auto") return kbest;
ransacParams.thresh = argf(prefix + "thresh");
tor->setRansacParams(ransacParams);
outlierRejector = tor;
} }
est->setOutlierRejector(outlierRejector); #endif
est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
return est; KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
} }
private: private:
bool gpu;
string prefix; string prefix;
}; };
#endif
class LpBasedMotionEstimatorBuilder : public IMotionEstimatorBuilder class MotionEstimatorL1Builder : public IMotionEstimatorBuilder
{ {
public: public:
LpBasedMotionEstimatorBuilder(CommandLineParser &cmd, const string &prefix = "") MotionEstimatorL1Builder(CommandLineParser &cmd, bool gpu, const string &prefix = "")
: IMotionEstimatorBuilder(cmd), prefix(prefix) {} : IMotionEstimatorBuilder(cmd), gpu(gpu), prefix(prefix) {}
virtual Ptr<GlobalMotionEstimatorBase> build() virtual Ptr<ImageMotionEstimatorBase> build()
{ {
LpBasedMotionEstimator *est = new LpBasedMotionEstimator(motionModel(arg(prefix + "model"))); MotionEstimatorL1 *est = new MotionEstimatorL1(motionModel(arg(prefix + "model")));
est->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector(); Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
if (arg(prefix + "local-outlier-rejection") == "yes") if (arg(prefix + "local-outlier-rejection") == "yes")
{ {
TranslationBasedLocalOutlierRejector *tor = new TranslationBasedLocalOutlierRejector(); TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
RansacParams ransacParams = tor->ransacParams(); RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto") if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh"); ransacParams.thresh = argf(prefix + "thresh");
tor->setRansacParams(ransacParams); tblor->setRansacParams(ransacParams);
outlierRejector = tor; outlierRejector = tblor;
}
#if HAVE_OPENCV_GPU
if (gpu)
{
KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
kbest->setOutlierRejector(outlierRejector);
return kbest;
} }
est->setOutlierRejector(outlierRejector); #endif
return est; KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
} }
private: private:
bool gpu;
string prefix; string prefix;
}; };
...@@ -399,40 +372,16 @@ int main(int argc, const char **argv) ...@@ -399,40 +372,16 @@ int main(int argc, const char **argv)
// prepare motion estimation builders // prepare motion estimation builders
Ptr<IMotionEstimatorBuilder> motionEstBuilder; Ptr<IMotionEstimatorBuilder> motionEstBuilder;
#if HAVE_OPENCV_GPU if (arg("lin-prog-motion-est") == "yes")
if (arg("gpu") == "yes") motionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes");
{
if (arg("lin-prog-motion-est") == "yes")
motionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd);
else
motionEstBuilder = new RansacMotionEstimatorBuilderGpu(cmd);
}
else else
#endif motionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes");
{
if (arg("lin-prog-motion-est") == "yes")
motionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd);
else
motionEstBuilder = new RansacMotionEstimatorBuilder(cmd);
}
Ptr<IMotionEstimatorBuilder> wsMotionEstBuilder; Ptr<IMotionEstimatorBuilder> wsMotionEstBuilder;
#if HAVE_OPENCV_GPU if (arg("ws-lp") == "yes")
if (arg("gpu") == "yes") wsMotionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes", "ws-");
{
if (arg("ws-lp") == "yes")
wsMotionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd, "ws-");
else
wsMotionEstBuilder = new RansacMotionEstimatorBuilderGpu(cmd, "ws-");
}
else else
#endif wsMotionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes", "ws-");
{
if (arg("ws-lp") == "yes")
wsMotionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd, "ws-");
else
wsMotionEstBuilder = new RansacMotionEstimatorBuilder(cmd, "ws-");
}
// determine whether we must use one pass or two pass stabilizer // determine whether we must use one pass or two pass stabilizer
bool isTwoPass = bool isTwoPass =
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册