提交 c1e5c16f 编写于 作者: M Maksim Shabunin

Backport C-API cleanup (imgproc) from 5.x

上级 c63d79c5
......@@ -204,9 +204,9 @@ receives three arguments:
We can choose any of three shapes for our kernel:
- Rectangular box: CV_SHAPE_RECT
- Cross: CV_SHAPE_CROSS
- Ellipse: CV_SHAPE_ELLIPSE
- Rectangular box: Imgproc.SHAPE_RECT
- Cross: Imgproc.SHAPE_CROSS
- Ellipse: Imgproc.SHAPE_ELLIPSE
Together with the shape we specify the size of our kernel and the *anchor point*. If the anchor point is not
specified, it is assumed to be in the center.
......
......@@ -27,19 +27,19 @@ Theory
(\f$d(H_{1}, H_{2})\f$) to express how well both histograms match.
- OpenCV implements the function @ref cv::compareHist to perform a comparison. It also offers 4
different metrics to compute the matching:
-# **Correlation ( CV_COMP_CORREL )**
-# **Correlation ( cv::HISTCMP_CORREL )**
\f[d(H_1,H_2) = \frac{\sum_I (H_1(I) - \bar{H_1}) (H_2(I) - \bar{H_2})}{\sqrt{\sum_I(H_1(I) - \bar{H_1})^2 \sum_I(H_2(I) - \bar{H_2})^2}}\f]
where
\f[\bar{H_k} = \frac{1}{N} \sum _J H_k(J)\f]
and \f$N\f$ is the total number of histogram bins.
-# **Chi-Square ( CV_COMP_CHISQR )**
-# **Chi-Square ( cv::HISTCMP_CHISQR )**
\f[d(H_1,H_2) = \sum _I \frac{\left(H_1(I)-H_2(I)\right)^2}{H_1(I)}\f]
-# **Intersection ( method=CV_COMP_INTERSECT )**
-# **Intersection ( method=cv::HISTCMP_INTERSECT )**
\f[d(H_1,H_2) = \sum _I \min (H_1(I), H_2(I))\f]
-# **Bhattacharyya distance ( CV_COMP_BHATTACHARYYA )**
-# **Bhattacharyya distance ( cv::HISTCMP_BHATTACHARYYA )**
\f[d(H_1,H_2) = \sqrt{1 - \frac{1}{\sqrt{\bar{H_1} \bar{H_2} N^2}} \sum_I \sqrt{H_1(I) \cdot H_2(I)}}\f]
Code
......
......@@ -101,8 +101,6 @@ namespace cv {
#define MAX_CONTOUR_APPROX 7
#define USE_CV_FINDCONTOURS // switch between cv::findContours() and legacy C API
#ifdef USE_CV_FINDCONTOURS
struct QuadCountour {
Point pt[4];
int parent_contour;
......@@ -113,18 +111,6 @@ struct QuadCountour {
pt[0] = pt_[0]; pt[1] = pt_[1]; pt[2] = pt_[2]; pt[3] = pt_[3];
}
};
#else
} // namespace
#include "opencv2/imgproc/imgproc_c.h"
namespace cv {
struct CvContourEx
{
CV_CONTOUR_FIELDS()
int counter;
};
#endif
/** This structure stores information about the chessboard corner.*/
......@@ -552,13 +538,7 @@ bool findChessboardCorners(InputArray image_, Size pattern_size,
rectangle( thresh_img_new, Point(0,0), Point(thresh_img_new.cols-1, thresh_img_new.rows-1), Scalar(255,255,255), 3, LINE_8);
detector.reset();
#ifdef USE_CV_FINDCONTOURS
Mat binarized_img = thresh_img_new;
#else
Mat binarized_img = thresh_img_new.clone(); // make clone because cvFindContours modifies the source image
#endif
detector.generateQuads(binarized_img, flags);
detector.generateQuads(thresh_img_new, flags);
DPRINTF("Quad count: %d/%d", detector.all_quads_count, (pattern_size.width/2+1)*(pattern_size.height/2+1));
SHOW_QUADS("New quads", thresh_img_new, &detector.all_quads[0], detector.all_quads_count);
if (detector.processQuads(out_corners, prev_sqr_size))
......@@ -623,13 +603,7 @@ bool findChessboardCorners(InputArray image_, Size pattern_size,
rectangle( thresh_img, Point(0,0), Point(thresh_img.cols-1, thresh_img.rows-1), Scalar(255,255,255), 3, LINE_8);
detector.reset();
#ifdef USE_CV_FINDCONTOURS
Mat binarized_img = thresh_img;
#else
Mat binarized_img = (useAdaptive) ? thresh_img : thresh_img.clone(); // make clone because cvFindContours modifies the source image
#endif
detector.generateQuads(binarized_img, flags);
detector.generateQuads(thresh_img, flags);
DPRINTF("Quad count: %d/%d", detector.all_quads_count, (pattern_size.width/2+1)*(pattern_size.height/2+1));
SHOW_QUADS("Old quads", thresh_img, &detector.all_quads[0], detector.all_quads_count);
if (detector.processQuads(out_corners, prev_sqr_size))
......@@ -1376,7 +1350,6 @@ int ChessBoardDetector::checkQuadGroup(std::vector<ChessBoardQuad*>& quad_group,
for (int j = 0; j < 4; ++j)
{
//cvLine( debug_img, cvPointFrom32f(q->corners[j]->pt), cvPointFrom32f(q->corners[(j+1)&3]->pt), color, 1, CV_AA, 0 );
if (q->neighbors[j])
{
int next_j = (j + 1) & 3;
......@@ -1465,7 +1438,6 @@ int ChessBoardDetector::checkQuadGroup(std::vector<ChessBoardQuad*>& quad_group,
goto finalize;
cur->row = 0;
//cvCircle( debug_img, cvPointFrom32f(cur->pt), 3, cvScalar(0,255,0), -1, 8, 0 );
first = below; // remember the first corner in the next row
......@@ -1474,7 +1446,6 @@ int ChessBoardDetector::checkQuadGroup(std::vector<ChessBoardQuad*>& quad_group,
{
right->row = 0;
out_corners.push_back(right);
//cvCircle( debug_img, cvPointFrom32f(right->pt), 3, cvScalar(0,255-j*10,0), -1, 8, 0 );
if( right->count == 2 )
break;
if( right->count != 3 || (int)out_corners.size() >= std::max(pattern_size.width,pattern_size.height) )
......@@ -1519,7 +1490,6 @@ int ChessBoardDetector::checkQuadGroup(std::vector<ChessBoardQuad*>& quad_group,
{
cur->row = i;
out_corners.push_back(cur);
//cvCircle( debug_img, cvPointFrom32f(cur->pt), 3, cvScalar(0,0,255-j*10), -1, 8, 0 );
if (cur->count == 2 + (i < height-1) && j > 0)
break;
......@@ -1764,7 +1734,6 @@ void ChessBoardDetector::generateQuads(const cv::Mat& image_, int flags)
int min_size = 25; //cvRound( image->cols * image->rows * .03 * 0.01 * 0.92 );
bool filterQuads = (flags & CALIB_CB_FILTER_QUADS) != 0;
#ifdef USE_CV_FINDCONTOURS // use cv::findContours
std::vector<std::vector<Point> > contours;
std::vector<Vec4i> hierarchy;
......@@ -1879,122 +1848,6 @@ void ChessBoardDetector::generateQuads(const cv::Mat& image_, int flags)
}
}
#else // use legacy API: cvStartFindContours / cvFindNextContour / cvEndFindContours
CvMat image_old = cvMat(image_), *image = &image_old;
CvContourEx* board = 0;
// create temporary storage for contours and the sequence of pointers to found quadrangles
cv::Ptr<CvMemStorage> temp_storage(cvCreateMemStorage(0));
CvSeq *root = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvSeq*), temp_storage);
// initialize contour retrieving routine
CvContourScanner scanner = cvStartFindContours(image, temp_storage, sizeof(CvContourEx),
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
// get all the contours one by one
CvSeq* src_contour = NULL;
while ((src_contour = cvFindNextContour(scanner)) != NULL)
{
CvSeq *dst_contour = 0;
CvRect rect = ((CvContour*)src_contour)->rect;
// reject contours with too small perimeter
if( CV_IS_SEQ_HOLE(src_contour) && rect.width*rect.height >= min_size )
{
const int min_approx_level = 1, max_approx_level = MAX_CONTOUR_APPROX;
for (int approx_level = min_approx_level; approx_level <= max_approx_level; approx_level++ )
{
dst_contour = cvApproxPoly( src_contour, sizeof(CvContour), temp_storage,
CV_POLY_APPROX_DP, (float)approx_level );
if( dst_contour->total == 4 )
break;
// we call this again on its own output, because sometimes
// cvApproxPoly() does not simplify as much as it should.
dst_contour = cvApproxPoly( dst_contour, sizeof(CvContour), temp_storage,
CV_POLY_APPROX_DP, (float)approx_level );
if( dst_contour->total == 4 )
break;
}
// reject non-quadrangles
if( dst_contour->total == 4 && cvCheckContourConvexity(dst_contour) )
{
cv::Point2i pt[4];
double p = cvContourPerimeter(dst_contour);
double area = fabs(cvContourArea(dst_contour, CV_WHOLE_SEQ));
for (int i = 0; i < 4; ++i)
pt[i] = *(CvPoint*)cvGetSeqElem(dst_contour, i);
CV_LOG_VERBOSE(NULL, 9, "... contours(" << root->total << " added):" << pt[0] << " " << pt[1] << " " << pt[2] << " " << pt[3]);
double d1 = sqrt(normL2Sqr<double>(pt[0] - pt[2]));
double d2 = sqrt(normL2Sqr<double>(pt[1] - pt[3]));
// philipg. Only accept those quadrangles which are more square
// than rectangular and which are big enough
double d3 = sqrt(normL2Sqr<double>(pt[0] - pt[1]));
double d4 = sqrt(normL2Sqr<double>(pt[1] - pt[2]));
if (!filterQuads ||
(d3*4 > d4 && d4*4 > d3 && d3*d4 < area*1.5 && area > min_size &&
d1 >= 0.15 * p && d2 >= 0.15 * p))
{
CvContourEx* parent = (CvContourEx*)(src_contour->v_prev);
parent->counter++;
if( !board || board->counter < parent->counter )
board = parent;
dst_contour->v_prev = (CvSeq*)parent;
//for( i = 0; i < 4; i++ ) cvLine( debug_img, pt[i], pt[(i+1)&3], cvScalar(200,255,255), 1, CV_AA, 0 );
cvSeqPush( root, &dst_contour );
}
}
}
}
// finish contour retrieving
cvEndFindContours( &scanner );
// allocate quad & corner buffers
int total = root->total;
size_t max_quad_buf_size = std::max((size_t)2, (size_t)total * 3);
all_quads.allocate(max_quad_buf_size);
all_corners.allocate(max_quad_buf_size * 4);
// Create array of quads structures
for (int idx = 0; idx < total; ++idx)
{
/* CvSeq* */src_contour = *(CvSeq**)cvGetSeqElem(root, idx);
if (filterQuads && src_contour->v_prev != (CvSeq*)board)
continue;
int quad_idx = quad_count++;
ChessBoardQuad& q = all_quads[quad_idx];
// reset group ID
q = ChessBoardQuad();
CV_Assert(src_contour->total == 4);
for (int i = 0; i < 4; i++)
{
Point* onePoint = (Point*)cvGetSeqElem(src_contour, i);
CV_Assert(onePoint != NULL);
Point2f pt(*onePoint);
ChessBoardCorner& corner = all_corners[quad_idx*4 + i];
corner = ChessBoardCorner(pt);
q.corners[i] = &corner;
}
q.edge_len = FLT_MAX;
for (int i = 0; i < 4; ++i)
{
float d = normL2Sqr<float>(q.corners[i]->pt - q.corners[(i+1)&3]->pt);
q.edge_len = std::min(q.edge_len, d);
}
}
#endif
all_quads_count = quad_count;
CV_LOG_VERBOSE(NULL, 3, "Total quad contours: " << total);
......
......@@ -998,7 +998,7 @@ TEST(DISABLED_Calib3d_InitInverseRectificationMap, accuracy) { CV_InitInverseRec
////////////////////////////// undistort /////////////////////////////////
static void test_remap( const Mat& src, Mat& dst, const Mat& mapx, const Mat& mapy,
Mat* mask=0, int interpolation=CV_INTER_LINEAR )
Mat* mask=0, int interpolation=cv::INTER_LINEAR )
{
int x, y, k;
int drows = dst.rows, dcols = dst.cols;
......@@ -1009,7 +1009,7 @@ static void test_remap( const Mat& src, Mat& dst, const Mat& mapx, const Mat& ma
int step = (int)(src.step / CV_ELEM_SIZE(depth));
int delta;
if( interpolation != CV_INTER_CUBIC )
if( interpolation != cv::INTER_CUBIC )
{
delta = 0;
scols -= 1; srows -= 1;
......@@ -1318,7 +1318,7 @@ void CV_UndistortTest::get_test_array_types_and_sizes( int test_case_idx, vector
sizes[INPUT][2] = cvtest::randInt(rng)%2 ? cvSize(4,1) : cvSize(1,4);
types[INPUT][3] = types[INPUT][1];
sizes[INPUT][3] = sizes[INPUT][1];
interpolation = CV_INTER_LINEAR;
interpolation = cv::INTER_LINEAR;
}
......
......@@ -609,8 +609,8 @@ Below is an example that utilizes BufferPool with StackAllocator:
GpuMat d_src2 = pool2.getBuffer(1024, 1024, CV_8UC1); // 1MB
GpuMat d_dst2 = pool2.getBuffer(1024, 1024, CV_8UC3); // 3MB
cvtColor(d_src1, d_dst1, CV_GRAY2BGR, 0, stream1);
cvtColor(d_src2, d_dst2, CV_GRAY2BGR, 0, stream2);
cvtColor(d_src1, d_dst1, cv::COLOR_GRAY2BGR, 0, stream1);
cvtColor(d_src2, d_dst2, cv::COLOR_GRAY2BGR, 0, stream2);
}
@endcode
......@@ -675,8 +675,8 @@ and the corresponding memory is automatically returned to the pool for later usa
d_src1.setTo(Scalar(i), stream1);
d_src2.setTo(Scalar(i), stream2);
cvtColor(d_src1, d_dst1, CV_GRAY2BGR, 0, stream1);
cvtColor(d_src2, d_dst2, CV_GRAY2BGR, 0, stream2);
cvtColor(d_src1, d_dst1, cv::COLOR_GRAY2BGR, 0, stream1);
cvtColor(d_src2, d_dst2, cv::COLOR_GRAY2BGR, 0, stream2);
// The order of destruction of the local variables is:
// d_dst2 => d_src2 => d_dst1 => d_src1
// LIFO rule is satisfied, this code runs without error
......
......@@ -1418,7 +1418,7 @@ types.
*/
GAPI_EXPORTS_W GMat threshold(const GMat& src, const GScalar& thresh, const GScalar& maxval, int type);
/** @overload
This function applicable for all threshold types except CV_THRESH_OTSU and CV_THRESH_TRIANGLE
This function applicable for all threshold types except cv::THRESH_OTSU and cv::THRESH_TRIANGLE
@note Function textual ID is "org.opencv.core.matrixop.thresholdOT"
*/
GAPI_EXPORTS_W std::tuple<GMat, GScalar> threshold(const GMat& src, const GScalar& maxval, int type);
......
......@@ -2351,7 +2351,7 @@ v/yr//0n63/XHTrw/wG8XoGIn8Sc7QAAAABJRU5ErkJggg==
x="1408.5438"
y="1445.6154"
id="tspan10807"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:1.25;font-family:serif;-inkscape-font-specification:'serif Italic'">CV_WARP_FILL_OUTLIERS</tspan></text>
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:1.25;font-family:serif;-inkscape-font-specification:'serif Italic'">cv::WARP_FILL_OUTLIERS</tspan></text>
<path
style="display:inline;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker5176)"
d="m 1493.2407,1448.7527 c 199.5036,0.8614 218.4554,-34.5864 259.1291,-179.15"
......@@ -3130,7 +3130,7 @@ v/yr//0n63/XHTrw/wG8XoGIn8Sc7QAAAABJRU5ErkJggg==
x="1577.472"
y="602.83765"
id="tspan10807-6"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:1.25;font-family:serif;-inkscape-font-specification:'serif Italic'">CV_WARP_FILL_OUTLIERS</tspan></text>
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:1.25;font-family:serif;-inkscape-font-specification:'serif Italic'">cv::WARP_FILL_OUTLIERS</tspan></text>
<path
style="display:inline;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker5176-89)"
d="m 1473.2037,606.11056 c -21.2287,0.8614 17.5333,-133.30846 -27.5745,-179.15"
......@@ -3236,7 +3236,7 @@ v/yr//0n63/XHTrw/wG8XoGIn8Sc7QAAAABJRU5ErkJggg==
x="2178.0503"
y="602.83765"
id="tspan10807-6-4"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:1.25;font-family:serif;-inkscape-font-specification:'serif Italic'">CV_WARP_FILL_OUTLIERS</tspan></text>
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:1.25;font-family:serif;-inkscape-font-specification:'serif Italic'">cv::WARP_FILL_OUTLIERS</tspan></text>
<path
style="display:inline;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker5176-89-9)"
d="m 2073.7821,606.11056 c -21.2287,0.8614 17.5333,-133.30846 -27.5745,-179.15"
......
......@@ -357,7 +357,7 @@ public class ImgprocTest extends OpenCVTestCase {
H1.put(0, 0, 1, 2, 3);
H2.put(0, 0, 4, 5, 6);
double distance = Imgproc.compareHist(H1, H2, Imgproc.CV_COMP_CORREL);
double distance = Imgproc.compareHist(H1, H2, Imgproc.HISTCMP_CORREL);
assertEquals(1., distance, EPS);
}
......@@ -636,7 +636,7 @@ public class ImgprocTest extends OpenCVTestCase {
Mat dstLables = getMat(CvType.CV_32SC1, 0);
Mat labels = new Mat();
Imgproc.distanceTransformWithLabels(gray128, dst, labels, Imgproc.CV_DIST_L2, 3);
Imgproc.distanceTransformWithLabels(gray128, dst, labels, Imgproc.DIST_L2, 3);
assertMatEqual(dstLables, labels);
assertMatEqual(getMat(CvType.CV_32FC1, 8192), dst, EPS);
......@@ -813,7 +813,7 @@ public class ImgprocTest extends OpenCVTestCase {
Mat linePoints = new Mat(4, 1, CvType.CV_32FC1);
linePoints.put(0, 0, 0.53198653, 0.84675282, 2.5, 3.75);
Imgproc.fitLine(points, dst, Imgproc.CV_DIST_L12, 0, 0.01, 0.01);
Imgproc.fitLine(points, dst, Imgproc.DIST_L12, 0, 0.01, 0.01);
assertMatEqual(linePoints, dst, EPS);
}
......@@ -1040,7 +1040,7 @@ public class ImgprocTest extends OpenCVTestCase {
Mat img = new Mat(sz, sz, CvType.CV_8U, new Scalar(128));
Mat circles = new Mat();
Imgproc.HoughCircles(img, circles, Imgproc.CV_HOUGH_GRADIENT, 2, img.rows() / 4);
Imgproc.HoughCircles(img, circles, Imgproc.HOUGH_GRADIENT, 2, img.rows() / 4);
assertEquals(0, circles.cols());
}
......@@ -1054,7 +1054,7 @@ public class ImgprocTest extends OpenCVTestCase {
int radius = Math.min(img.cols() / 4, img.rows() / 4);
Imgproc.circle(img, center, radius, colorBlack, 3);
Imgproc.HoughCircles(img, circles, Imgproc.CV_HOUGH_GRADIENT, 2, img.rows() / 4);
Imgproc.HoughCircles(img, circles, Imgproc.HOUGH_GRADIENT, 2, img.rows() / 4);
assertEquals(1, circles.cols());
}
......@@ -1308,7 +1308,7 @@ public class ImgprocTest extends OpenCVTestCase {
contour1.put(0, 0, 1, 1, 5, 1, 4, 3, 6, 2);
contour2.put(0, 0, 1, 1, 6, 1, 4, 1, 2, 5);
double distance = Imgproc.matchShapes(contour1, contour2, Imgproc.CV_CONTOURS_MATCH_I1, 1);
double distance = Imgproc.matchShapes(contour1, contour2, Imgproc.CONTOURS_MATCH_I1, 1);
assertEquals(2.81109697365334, distance, EPS);
}
......
......@@ -60,7 +60,7 @@ OCL_PERF_TEST_P(CV_TM_CCORRFixture, matchTemplate,
declare.in(src, templ, WARMUP_RNG).out(dst);
OCL_TEST_CYCLE() cv::matchTemplate(src, templ, dst, CV_TM_CCORR);
OCL_TEST_CYCLE() cv::matchTemplate(src, templ, dst, cv::TM_CCORR);
SANITY_CHECK(dst, 1e-4);
}
......@@ -78,7 +78,7 @@ OCL_PERF_TEST_P(CV_TM_CCORR_NORMEDFixture, matchTemplate,
declare.in(src, templ, WARMUP_RNG).out(dst);
OCL_TEST_CYCLE() cv::matchTemplate(src, templ, dst, CV_TM_CCORR_NORMED);
OCL_TEST_CYCLE() cv::matchTemplate(src, templ, dst, cv::TM_CCORR_NORMED);
SANITY_CHECK(dst, 3e-2);
}
......
......@@ -26,7 +26,7 @@ PERF_TEST(PerfHoughCircles, Basic)
TEST_CYCLE()
{
HoughCircles(img, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(img, circles, cv::HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
}
SANITY_CHECK_NOTHING();
......@@ -50,7 +50,7 @@ PERF_TEST(PerfHoughCircles2, ManySmallCircles)
TEST_CYCLE()
{
HoughCircles(img, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(img, circles, cv::HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
}
SANITY_CHECK_NOTHING();
......@@ -76,7 +76,7 @@ PERF_TEST(PerfHoughCircles4f, Basic)
TEST_CYCLE()
{
HoughCircles(img, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(img, circles, cv::HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
}
SANITY_CHECK_NOTHING();
......
......@@ -42,16 +42,6 @@
#ifndef _CV_GEOM_H_
#define _CV_GEOM_H_
/* Finds distance between two points */
CV_INLINE float icvDistanceL2_32f( CvPoint2D32f pt1, CvPoint2D32f pt2 )
{
float dx = pt2.x - pt1.x;
float dy = pt2.y - pt1.y;
return std::sqrt( dx*dx + dy*dy );
}
/* curvature: 0 - 1-curvature, 1 - k-cosine curvature. */
CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size, CvMemStorage* storage, int method );
......
......@@ -835,9 +835,10 @@ void Canny( InputArray _src, OutputArray _dst,
_dst.create(size, CV_8U);
// backward compatibility
const int CV_CANNY_L2_GRADIENT = (1 << 31);
if (!L2gradient && (aperture_size & CV_CANNY_L2_GRADIENT) == CV_CANNY_L2_GRADIENT)
{
// backward compatibility
aperture_size &= ~CV_CANNY_L2_GRADIENT;
L2gradient = true;
}
......
......@@ -1111,7 +1111,7 @@ FillConvexPoly( Mat& img, const Point2l* v, int npts, const void* color, int lin
Point2l p0;
int delta1, delta2;
if( line_type < CV_AA )
if( line_type < cv::LINE_AA )
delta1 = delta2 = XY_ONE >> 1;
else
delta1 = XY_ONE - 1, delta2 = 0;
......@@ -1181,7 +1181,7 @@ FillConvexPoly( Mat& img, const Point2l* v, int npts, const void* color, int lin
do
{
if( line_type < CV_AA || y < (int)ymax || y == (int)ymin )
if( line_type < cv::LINE_AA || y < (int)ymax || y == (int)ymin )
{
for( i = 0; i < 2; i++ )
{
......@@ -1277,7 +1277,7 @@ CollectPolyEdges( Mat& img, const Point2l* v, int count, std::vector<PolyEdge>&
pt1.x = (pt1.x + offset.x) << (XY_SHIFT - shift);
pt1.y = (pt1.y + delta) >> shift;
if( line_type < CV_AA )
if( line_type < cv::LINE_AA )
{
t0.y = pt0.y; t1.y = pt1.y;
t0.x = (pt0.x + (XY_ONE >> 1)) >> XY_SHIFT;
......@@ -1632,7 +1632,7 @@ ThickLine( Mat& img, Point2l p0, Point2l p1, const void* color,
if( thickness <= 1 )
{
if( line_type < CV_AA )
if( line_type < cv::LINE_AA )
{
if( line_type == 1 || line_type == 4 || shift == 0 )
{
......@@ -1678,7 +1678,7 @@ ThickLine( Mat& img, Point2l p0, Point2l p1, const void* color,
{
if( flags & (i+1) )
{
if( line_type < CV_AA )
if( line_type < cv::LINE_AA )
{
Point center;
center.x = (int)((p0.x + (XY_ONE>>1)) >> XY_SHIFT);
......@@ -1796,7 +1796,7 @@ void line( InputOutputArray _img, Point pt1, Point pt2, const Scalar& color,
Mat img = _img.getMat();
if( line_type == CV_AA && img.depth() != CV_8U )
if( line_type == cv::LINE_AA && img.depth() != CV_8U )
line_type = 8;
CV_Assert( 0 < thickness && thickness <= MAX_THICKNESS );
......@@ -1835,7 +1835,7 @@ void rectangle( InputOutputArray _img, Point pt1, Point pt2,
Mat img = _img.getMat();
if( lineType == CV_AA && img.depth() != CV_8U )
if( lineType == cv::LINE_AA && img.depth() != CV_8U )
lineType = 8;
CV_Assert( thickness <= MAX_THICKNESS );
......@@ -1885,7 +1885,7 @@ void circle( InputOutputArray _img, Point center, int radius,
Mat img = _img.getMat();
if( line_type == CV_AA && img.depth() != CV_8U )
if( line_type == cv::LINE_AA && img.depth() != CV_8U )
line_type = 8;
CV_Assert( radius >= 0 && thickness <= MAX_THICKNESS &&
......@@ -1917,7 +1917,7 @@ void ellipse( InputOutputArray _img, Point center, Size axes,
Mat img = _img.getMat();
if( line_type == CV_AA && img.depth() != CV_8U )
if( line_type == cv::LINE_AA && img.depth() != CV_8U )
line_type = 8;
CV_Assert( axes.width >= 0 && axes.height >= 0 &&
......@@ -1947,7 +1947,7 @@ void ellipse(InputOutputArray _img, const RotatedRect& box, const Scalar& color,
Mat img = _img.getMat();
if( lineType == CV_AA && img.depth() != CV_8U )
if( lineType == cv::LINE_AA && img.depth() != CV_8U )
lineType = 8;
CV_Assert( box.size.width >= 0 && box.size.height >= 0 &&
......@@ -1978,7 +1978,7 @@ void fillConvexPoly( InputOutputArray _img, const Point* pts, int npts,
if( !pts || npts <= 0 )
return;
if( line_type == CV_AA && img.depth() != CV_8U )
if( line_type == cv::LINE_AA && img.depth() != CV_8U )
line_type = 8;
double buf[4];
......@@ -1996,7 +1996,7 @@ void fillPoly( InputOutputArray _img, const Point** pts, const int* npts, int nc
Mat img = _img.getMat();
if( line_type == CV_AA && img.depth() != CV_8U )
if( line_type == cv::LINE_AA && img.depth() != CV_8U )
line_type = 8;
CV_Assert( pts && npts && ncontours >= 0 && 0 <= shift && shift <= XY_SHIFT );
......@@ -2027,7 +2027,7 @@ void polylines( InputOutputArray _img, const Point* const* pts, const int* npts,
Mat img = _img.getMat();
if( line_type == CV_AA && img.depth() != CV_8U )
if( line_type == cv::LINE_AA && img.depth() != CV_8U )
line_type = 8;
CV_Assert( pts && npts && ncontours >= 0 &&
......@@ -2572,7 +2572,7 @@ cvDrawContours( void* _img, CvSeq* contour,
cv::Point offset = _offset;
double ext_buf[4], hole_buf[4];
if( line_type == CV_AA && img.depth() != CV_8U )
if( line_type == cv::LINE_AA && img.depth() != CV_8U )
line_type = 8;
if( !contour )
......
......@@ -331,26 +331,26 @@ static void fitLine2D( const Point2f * points, int count, int dist,
switch (dist)
{
case CV_DIST_L2:
case cv::DIST_L2:
return fitLine2D_wods( points, count, 0, line );
case CV_DIST_L1:
case cv::DIST_L1:
calc_weights = weightL1;
break;
case CV_DIST_L12:
case cv::DIST_L12:
calc_weights = weightL12;
break;
case CV_DIST_FAIR:
case cv::DIST_FAIR:
calc_weights_param = weightFair;
break;
case CV_DIST_WELSCH:
case cv::DIST_WELSCH:
calc_weights_param = weightWelsch;
break;
case CV_DIST_HUBER:
case cv::DIST_HUBER:
calc_weights_param = weightHuber;
break;
......@@ -475,26 +475,26 @@ static void fitLine3D( Point3f * points, int count, int dist,
switch (dist)
{
case CV_DIST_L2:
case cv::DIST_L2:
return fitLine3D_wods( points, count, 0, line );
case CV_DIST_L1:
case cv::DIST_L1:
calc_weights = weightL1;
break;
case CV_DIST_L12:
case cv::DIST_L12:
calc_weights = weightL12;
break;
case CV_DIST_FAIR:
case cv::DIST_FAIR:
calc_weights_param = weightFair;
break;
case CV_DIST_WELSCH:
case cv::DIST_WELSCH:
calc_weights_param = weightWelsch;
break;
case CV_DIST_HUBER:
case cv::DIST_HUBER:
calc_weights_param = weightHuber;
break;
......
......@@ -394,7 +394,6 @@ void cv::pyrMeanShiftFiltering( InputArray _src, OutputArray _dst,
dst_pyramid[level].create( src_pyramid[level].rows,
src_pyramid[level].cols, src_pyramid[level].type() );
cv::pyrDown( src_pyramid[level-1], src_pyramid[level], src_pyramid[level].size() );
//CV_CALL( cvResize( src_pyramid[level-1], src_pyramid[level], CV_INTER_AREA ));
}
mask0.create(src0.rows, src0.cols, CV_8UC1);
......@@ -419,7 +418,6 @@ void cv::pyrMeanShiftFiltering( InputArray _src, OutputArray _dst,
m = cv::Mat(size.height, size.width, CV_8UC1, mask0.ptr());
dstep = (int)dst_pyramid[level+1].step;
dptr = dst_pyramid[level+1].ptr() + dstep + cn;
//cvResize( dst_pyramid[level+1], dst_pyramid[level], CV_INTER_CUBIC );
cv::pyrUp( dst_pyramid[level+1], dst_pyramid[level], dst_pyramid[level].size() );
m.setTo(cv::Scalar::all(0));
......
......@@ -315,7 +315,7 @@ static bool matchTemplate_CCORR(InputArray _image, InputArray _templ, OutputArra
static bool matchTemplate_CCORR_NORMED(InputArray _image, InputArray _templ, OutputArray _result)
{
matchTemplate(_image, _templ, _result, CV_TM_CCORR);
matchTemplate(_image, _templ, _result, cv::TM_CCORR);
int type = _image.type(), cn = CV_MAT_CN(type);
......@@ -373,7 +373,7 @@ static bool matchTemplate_SQDIFF(InputArray _image, InputArray _templ, OutputArr
return( matchTemplateNaive_SQDIFF(_image, _templ, _result));
else
{
matchTemplate(_image, _templ, _result, CV_TM_CCORR);
matchTemplate(_image, _templ, _result, cv::TM_CCORR);
int type = _image.type(), cn = CV_MAT_CN(type);
......@@ -404,7 +404,7 @@ static bool matchTemplate_SQDIFF(InputArray _image, InputArray _templ, OutputArr
static bool matchTemplate_SQDIFF_NORMED(InputArray _image, InputArray _templ, OutputArray _result)
{
matchTemplate(_image, _templ, _result, CV_TM_CCORR);
matchTemplate(_image, _templ, _result, cv::TM_CCORR);
int type = _image.type(), cn = CV_MAT_CN(type);
......@@ -436,7 +436,7 @@ static bool matchTemplate_SQDIFF_NORMED(InputArray _image, InputArray _templ, Ou
static bool matchTemplate_CCOEFF(InputArray _image, InputArray _templ, OutputArray _result)
{
matchTemplate(_image, _templ, _result, CV_TM_CCORR);
matchTemplate(_image, _templ, _result, cv::TM_CCORR);
UMat image_sums, temp;
integral(_image, image_sums, CV_32F);
......@@ -471,7 +471,7 @@ static bool matchTemplate_CCOEFF(InputArray _image, InputArray _templ, OutputArr
static bool matchTemplate_CCOEFF_NORMED(InputArray _image, InputArray _templ, OutputArray _result)
{
matchTemplate(_image, _templ, _result, CV_TM_CCORR);
matchTemplate(_image, _templ, _result, cv::TM_CCORR);
UMat temp, image_sums, image_sqsums;
integral(_image, image_sums, image_sqsums, CV_32F, CV_32F);
......@@ -796,7 +796,7 @@ static void matchTemplateMask( InputArray _img, InputArray _templ, OutputArray _
merge(maskChannels.data(), templ.channels(), mask);
}
if (method == CV_TM_SQDIFF || method == CV_TM_SQDIFF_NORMED)
if (method == cv::TM_SQDIFF || method == cv::TM_SQDIFF_NORMED)
{
Mat temp_result(corrSize, CV_32F);
Mat img2 = img.mul(img);
......@@ -810,19 +810,19 @@ static void matchTemplateMask( InputArray _img, InputArray _templ, OutputArray _
// for normalization.
result = -2 * result + temp_result + templ2_mask2_sum;
if (method == CV_TM_SQDIFF_NORMED)
if (method == cv::TM_SQDIFF_NORMED)
{
sqrt(templ2_mask2_sum * temp_result, temp_result);
result /= temp_result;
}
}
else if (method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED)
else if (method == cv::TM_CCORR || method == cv::TM_CCORR_NORMED)
{
// If the mul() is ever unnested, declare MatExpr, *not* Mat, to be more efficient.
Mat templ_mask2 = templ.mul(mask.mul(mask));
crossCorr(img, templ_mask2, result, Point(0,0), 0, 0);
if (method == CV_TM_CCORR_NORMED)
if (method == cv::TM_CCORR_NORMED)
{
Mat temp_result(corrSize, CV_32F);
Mat img2 = img.mul(img);
......@@ -834,7 +834,7 @@ static void matchTemplateMask( InputArray _img, InputArray _templ, OutputArray _
result /= temp_result;
}
}
else if (method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED)
else if (method == cv::TM_CCOEFF || method == cv::TM_CCOEFF_NORMED)
{
// Do mul() inline or declare MatExpr where possible, *not* Mat, to be more efficient.
......@@ -864,7 +864,7 @@ static void matchTemplateMask( InputArray _img, InputArray _templ, OutputArray _
// transform back, but now with only one channel
result -= temp_res.reshape(1, result.rows);
}
if (method == CV_TM_CCOEFF_NORMED)
if (method == cv::TM_CCOEFF_NORMED)
{
// norm(T')
double norm_templx = norm(mask.mul(templ - sum(mask.mul(templ)).div(mask_sum)),
......@@ -905,14 +905,14 @@ static void matchTemplateMask( InputArray _img, InputArray _templ, OutputArray _
static void common_matchTemplate( Mat& img, Mat& templ, Mat& result, int method, int cn )
{
if( method == CV_TM_CCORR )
if( method == cv::TM_CCORR )
return;
int numType = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 :
method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2;
bool isNormed = method == CV_TM_CCORR_NORMED ||
method == CV_TM_SQDIFF_NORMED ||
method == CV_TM_CCOEFF_NORMED;
int numType = method == cv::TM_CCORR || method == cv::TM_CCORR_NORMED ? 0 :
method == cv::TM_CCOEFF || method == cv::TM_CCOEFF_NORMED ? 1 : 2;
bool isNormed = method == cv::TM_CCORR_NORMED ||
method == cv::TM_SQDIFF_NORMED ||
method == cv::TM_CCOEFF_NORMED;
double invArea = 1./((double)templ.rows * templ.cols);
......@@ -921,7 +921,7 @@ static void common_matchTemplate( Mat& img, Mat& templ, Mat& result, int method,
double *q0 = 0, *q1 = 0, *q2 = 0, *q3 = 0;
double templNorm = 0, templSum2 = 0;
if( method == CV_TM_CCOEFF )
if( method == cv::TM_CCOEFF )
{
integral(img, sum, CV_64F);
templMean = mean(templ);
......@@ -933,7 +933,7 @@ static void common_matchTemplate( Mat& img, Mat& templ, Mat& result, int method,
templNorm = templSdv[0]*templSdv[0] + templSdv[1]*templSdv[1] + templSdv[2]*templSdv[2] + templSdv[3]*templSdv[3];
if( templNorm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED )
if( templNorm < DBL_EPSILON && method == cv::TM_CCOEFF_NORMED )
{
result = Scalar::all(1);
return;
......@@ -1020,7 +1020,7 @@ static void common_matchTemplate( Mat& img, Mat& templ, Mat& result, int method,
else if( fabs(num) < t*1.125 )
num = num > 0 ? 1 : -1;
else
num = method != CV_TM_SQDIFF_NORMED ? 0 : 1;
num = method != cv::TM_SQDIFF_NORMED ? 0 : 1;
}
rrow[j] = (float)num;
......@@ -1116,30 +1116,30 @@ static bool ipp_matchTemplate( Mat& img, Mat& templ, Mat& result, int method)
if(templ.size().area()*4 > img.size().area())
return false;
if(method == CV_TM_SQDIFF)
if(method == cv::TM_SQDIFF)
{
if(ipp_sqrDistance(img, templ, result))
return true;
}
else if(method == CV_TM_SQDIFF_NORMED)
else if(method == cv::TM_SQDIFF_NORMED)
{
if(ipp_crossCorr(img, templ, result, false))
{
common_matchTemplate(img, templ, result, CV_TM_SQDIFF_NORMED, 1);
common_matchTemplate(img, templ, result, cv::TM_SQDIFF_NORMED, 1);
return true;
}
}
else if(method == CV_TM_CCORR)
else if(method == cv::TM_CCORR)
{
if(ipp_crossCorr(img, templ, result, false))
return true;
}
else if(method == CV_TM_CCORR_NORMED)
else if(method == cv::TM_CCORR_NORMED)
{
if(ipp_crossCorr(img, templ, result, true))
return true;
}
else if(method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED)
else if(method == cv::TM_CCOEFF || method == cv::TM_CCOEFF_NORMED)
{
if(ipp_crossCorr(img, templ, result, false))
{
......@@ -1160,7 +1160,7 @@ void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result,
CV_INSTRUMENT_REGION();
int type = _img.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED );
CV_Assert( cv::TM_SQDIFF <= method && method <= cv::TM_CCOEFF_NORMED );
CV_Assert( (depth == CV_8U || depth == CV_32F) && type == _templ.type() && _img.dims() <= 2 );
if (!_mask.empty())
......
......@@ -51,7 +51,7 @@ namespace ocl {
///////////////////////////////////////////// matchTemplate //////////////////////////////////////////////////////////
CV_ENUM(MatchTemplType, CV_TM_CCORR, CV_TM_CCORR_NORMED, CV_TM_SQDIFF, CV_TM_SQDIFF_NORMED, CV_TM_CCOEFF, CV_TM_CCOEFF_NORMED)
CV_ENUM(MatchTemplType, cv::TM_CCORR, cv::TM_CCORR_NORMED, cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED, cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)
PARAM_TEST_CASE(MatchTemplate, MatDepth, Channels, MatchTemplType, bool)
{
......
......@@ -160,7 +160,7 @@ void CV_CannyTest::run_func()
{
cv::Mat _out = cv::cvarrToMat(test_array[OUTPUT][0]);
cv::Canny(cv::cvarrToMat(test_array[INPUT][0]), _out, threshold1, threshold2,
aperture_size + (use_true_gradient ? CV_CANNY_L2_GRADIENT : 0));
aperture_size, use_true_gradient);
}
}
......
......@@ -128,7 +128,7 @@ void CV_DrawingTest_CPP::draw( Mat& img )
p2 = Point(3,imgSize.height+1000);
if( clipLine(Rect(0,0,imgSize.width,imgSize.height), p1, p2) && clipLine(imgSize, p1, p2) )
circle( img, Point(500,300), 50, cvColorToScalar(255,CV_8UC3), 5, 8, 1 ); // draw
circle( img, Point(500,300), 50, Scalar(255, 0, 0), 5, 8, 1 ); // draw
p1 = Point(imgSize.width,1), p2 = Point(imgSize.width,3);
if( clipLine(Rect(0,0,imgSize.width,imgSize.height), p1, p2) && clipLine(imgSize, p1, p2) )
......@@ -144,7 +144,7 @@ void CV_DrawingTest_CPP::draw( Mat& img )
ellipse2Poly( Point(430,180), Size(100,150), 30, 0, 150, 20, polyline );
pts = &polyline[0];
n = (int)polyline.size();
polylines( img, &pts, &n, 1, false, Scalar(0,0,150), 4, CV_AA );
polylines( img, &pts, &n, 1, false, Scalar(0,0,150), 4, cv::LINE_AA );
n = 0;
for( vector<Point>::const_iterator it = polyline.begin(); n < (int)polyline.size()-1; ++it, n++ )
{
......@@ -192,43 +192,43 @@ void CV_DrawingTest_CPP::draw( Mat& img )
textSize = getTextSize( text2, FONT_HERSHEY_SIMPLEX, fontScale, thickness, &baseline);
textOrg = Point(5,5)+Point(0,textSize.height+dist);
putText(img, text2, textOrg, FONT_HERSHEY_SIMPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_SIMPLEX, fontScale, color, thickness, cv::LINE_AA);
fontScale = 1;
textSize = getTextSize( text2, FONT_HERSHEY_PLAIN, fontScale, thickness, &baseline);
textOrg += Point(0,textSize.height+dist);
putText(img, text2, textOrg, FONT_HERSHEY_PLAIN, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_PLAIN, fontScale, color, thickness, cv::LINE_AA);
fontScale = 0.5;
textSize = getTextSize( text2, FONT_HERSHEY_DUPLEX, fontScale, thickness, &baseline);
textOrg += Point(0,textSize.height+dist);
putText(img, text2, textOrg, FONT_HERSHEY_DUPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_DUPLEX, fontScale, color, thickness, cv::LINE_AA);
textSize = getTextSize( text2, FONT_HERSHEY_COMPLEX, fontScale, thickness, &baseline);
textOrg += Point(0,textSize.height+dist);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX, fontScale, color, thickness, cv::LINE_AA);
textSize = getTextSize( text2, FONT_HERSHEY_TRIPLEX, fontScale, thickness, &baseline);
textOrg += Point(0,textSize.height+dist);
putText(img, text2, textOrg, FONT_HERSHEY_TRIPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_TRIPLEX, fontScale, color, thickness, cv::LINE_AA);
fontScale = 1;
textSize = getTextSize( text2, FONT_HERSHEY_COMPLEX_SMALL, fontScale, thickness, &baseline);
textOrg += Point(0,180) + Point(0,textSize.height+dist);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX_SMALL, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX_SMALL, fontScale, color, thickness, cv::LINE_AA);
textSize = getTextSize( text2, FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale, thickness, &baseline);
textOrg += Point(0,textSize.height+dist);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale, color, thickness, cv::LINE_AA);
textSize = getTextSize( text2, FONT_HERSHEY_SCRIPT_COMPLEX, fontScale, thickness, &baseline);
textOrg += Point(0,textSize.height+dist);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_COMPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_COMPLEX, fontScale, color, thickness, cv::LINE_AA);
dist = 15, fontScale = 0.5;
textSize = getTextSize( text2, FONT_ITALIC, fontScale, thickness, &baseline);
textOrg += Point(0,textSize.height+dist);
putText(img, text2, textOrg, FONT_ITALIC, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_ITALIC, fontScale, color, thickness, cv::LINE_AA);
}
int CV_DrawingTest_CPP::checkLineIterator( Mat& img )
......@@ -321,7 +321,7 @@ void CV_DrawingTest_Far::draw(Mat& img)
p2 = Point(32768 + 3, imgSize.height + 1000);
if (clipLine(Rect(32768 + 0, 0, imgSize.width, imgSize.height), p1, p2) && clipLine(imgSize, p1, p2))
circle(img, Point(65536 + 500, 300), 50, cvColorToScalar(255, CV_8UC3), 5, 8, 1); // draw
circle(img, Point(65536 + 500, 300), 50, Scalar(255, 0, 0), 5, 8, 1); // draw
p1 = Point(imgSize.width, 1), p2 = Point(imgSize.width, 3);
if (clipLine(Rect(32768 + 0, 0, imgSize.width, imgSize.height), p1, p2) && clipLine(imgSize, p1, p2))
......@@ -337,7 +337,7 @@ void CV_DrawingTest_Far::draw(Mat& img)
ellipse2Poly(Point(32768 + 430, 180), Size(100, 150), 30, 0, 150, 20, polyline);
pts = &polyline[0];
n = (int)polyline.size();
polylines(img, &pts, &n, 1, false, Scalar(0, 0, 150), 4, CV_AA);
polylines(img, &pts, &n, 1, false, Scalar(0, 0, 150), 4, cv::LINE_AA);
n = 0;
for (vector<Point>::const_iterator it = polyline.begin(); n < (int)polyline.size() - 1; ++it, n++)
{
......@@ -385,43 +385,43 @@ void CV_DrawingTest_Far::draw(Mat& img)
textSize = getTextSize(text2, FONT_HERSHEY_SIMPLEX, fontScale, thickness, &baseline);
textOrg = Point(32768 + 5, 5) + Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_SIMPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_SIMPLEX, fontScale, color, thickness, cv::LINE_AA);
fontScale = 1;
textSize = getTextSize(text2, FONT_HERSHEY_PLAIN, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_PLAIN, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_PLAIN, fontScale, color, thickness, cv::LINE_AA);
fontScale = 0.5;
textSize = getTextSize(text2, FONT_HERSHEY_DUPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_DUPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_DUPLEX, fontScale, color, thickness, cv::LINE_AA);
textSize = getTextSize(text2, FONT_HERSHEY_COMPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX, fontScale, color, thickness, cv::LINE_AA);
textSize = getTextSize(text2, FONT_HERSHEY_TRIPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_TRIPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_TRIPLEX, fontScale, color, thickness, cv::LINE_AA);
fontScale = 1;
textSize = getTextSize(text2, FONT_HERSHEY_COMPLEX_SMALL, fontScale, thickness, &baseline);
textOrg += Point(0, 180) + Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX_SMALL, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX_SMALL, fontScale, color, thickness, cv::LINE_AA);
textSize = getTextSize(text2, FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale, color, thickness, cv::LINE_AA);
textSize = getTextSize(text2, FONT_HERSHEY_SCRIPT_COMPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_COMPLEX, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_COMPLEX, fontScale, color, thickness, cv::LINE_AA);
dist = 15, fontScale = 0.5;
textSize = getTextSize(text2, FONT_ITALIC, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_ITALIC, fontScale, color, thickness, CV_AA);
putText(img, text2, textOrg, FONT_ITALIC, fontScale, color, thickness, cv::LINE_AA);
img = img(Rect(32768, 0, 600, 400)).clone();
}
......@@ -517,7 +517,7 @@ protected:
Size textSize = getTextSize(*line, *font | italic, fontScale, thickness, &baseline);
Point textOrg(0, textSize.height + 2);
Mat img(textSize + Size(0, baseline), CV_8UC3, Scalar(255, 255, 255));
putText(img, *line, textOrg, *font | italic, fontScale, color, thickness, CV_AA);
putText(img, *line, textOrg, *font | italic, fontScale, color, thickness, cv::LINE_AA);
results.push_back(img);
bigSize.width = max(bigSize.width, img.size().width);
......
......@@ -123,7 +123,7 @@ public:
vector<CircleType> circles;
const double dp = 1.0;
HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(src, circles, cv::HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/";
#if DEBUG_IMAGES
......
......@@ -163,7 +163,7 @@ void CV_ImageWarpBaseTest::generate_test_data()
for (y = 0; y < ssize.height; y += cell_size)
for (x = 0; x < ssize.width; x += cell_size)
rectangle(src, Point(x, y), Point(x + std::min<int>(cell_size, ssize.width - x), y +
std::min<int>(cell_size, ssize.height - y)), Scalar::all((x + y) % 2 ? 255: 0), CV_FILLED);
std::min<int>(cell_size, ssize.height - y)), Scalar::all((x + y) % 2 ? 255: 0), cv::FILLED);
}
else
{
......@@ -175,7 +175,7 @@ void CV_ImageWarpBaseTest::generate_test_data()
}
// generating an interpolation type
interpolation = rng.uniform(0, CV_INTER_LANCZOS4 + 1);
interpolation = rng.uniform(0, cv::INTER_LANCZOS4 + 1);
// generating the dst matrix structure
double scale_x, scale_y;
......@@ -288,10 +288,10 @@ void CV_ImageWarpBaseTest::validate_results() const
#ifdef SHOW_IMAGE
const std::string w1("OpenCV impl (run func)"), w2("Reference func"), w3("Src image"), w4("Diff");
namedWindow(w1, CV_WINDOW_KEEPRATIO);
namedWindow(w2, CV_WINDOW_KEEPRATIO);
namedWindow(w3, CV_WINDOW_KEEPRATIO);
namedWindow(w4, CV_WINDOW_KEEPRATIO);
namedWindow(w1, cv::WINDOW_KEEPRATIO);
namedWindow(w2, cv::WINDOW_KEEPRATIO);
namedWindow(w3, cv::WINDOW_KEEPRATIO);
namedWindow(w4, cv::WINDOW_KEEPRATIO);
Mat diff;
absdiff(reference_dst, _dst, diff);
......@@ -442,7 +442,7 @@ void CV_Resize_Test::generate_test_data()
for (y = 0; y < ssize.height; y += cell_size)
for (x = 0; x < ssize.width; x += cell_size)
rectangle(src, Point(x, y), Point(x + std::min<int>(cell_size, ssize.width - x), y +
std::min<int>(cell_size, ssize.height - y)), Scalar::all((x + y) % 2 ? 255: 0), CV_FILLED);
std::min<int>(cell_size, ssize.height - y)), Scalar::all((x + y) % 2 ? 255: 0), cv::FILLED);
}
else
{
......@@ -1082,7 +1082,7 @@ void CV_WarpAffine_Test::generate_test_data()
// warp_matrix is inverse
if (rng.uniform(0., 1.) > 0)
interpolation |= CV_WARP_INVERSE_MAP;
interpolation |= cv::WARP_INVERSE_MAP;
}
void CV_WarpAffine_Test::run_func()
......@@ -1123,7 +1123,7 @@ void CV_WarpAffine_Test::warpAffine(const Mat& _src, Mat& _dst)
else
mapy = Mat();
if (!(interpolation & CV_WARP_INVERSE_MAP))
if (!(interpolation & cv::WARP_INVERSE_MAP))
invertAffineTransform(tM.clone(), tM);
const int AB_BITS = MAX(10, (int)INTER_BITS);
......@@ -1239,7 +1239,7 @@ void CV_WarpPerspective_Test::warpPerspective(const Mat& _src, Mat& _dst)
M = tmp;
}
if (!(interpolation & CV_WARP_INVERSE_MAP))
if (!(interpolation & cv::WARP_INVERSE_MAP))
{
Mat tmp;
invert(M, tmp);
......
......@@ -66,8 +66,8 @@ void CV_PhaseCorrelatorTest::run( int )
double expectedShiftY = -20.0;
// draw 10x10 rectangles @ (100, 100) and (90, 80) should see ~(-10, -20) shift here...
cv::rectangle(r1, Point(100, 100), Point(110, 110), Scalar(0, 0, 0), CV_FILLED);
cv::rectangle(r2, Point(90, 80), Point(100, 90), Scalar(0, 0, 0), CV_FILLED);
cv::rectangle(r1, Point(100, 100), Point(110, 110), Scalar(0, 0, 0), cv::FILLED);
cv::rectangle(r2, Point(90, 80), Point(100, 90), Scalar(0, 0, 0), cv::FILLED);
Mat hann;
createHanningWindow(hann, r1.size(), CV_64F);
......
......@@ -125,7 +125,7 @@ void CV_TemplMatchTest::get_test_array_types_and_sizes( int test_case_idx,
double CV_TemplMatchTest::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ )
{
if( test_mat[INPUT][1].depth() == CV_8U ||
(method >= CV_TM_CCOEFF && test_mat[INPUT][1].cols*test_mat[INPUT][1].rows <= 2) )
(method >= cv::TM_CCOEFF && test_mat[INPUT][1].cols*test_mat[INPUT][1].rows <= 2) )
return 1e-2;
else
return 1e-3;
......@@ -162,7 +162,7 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
if( b_sdv.val[0]*b_sdv.val[0] + b_sdv.val[1]*b_sdv.val[1] +
b_sdv.val[2]*b_sdv.val[2] + b_sdv.val[3]*b_sdv.val[3] < DBL_EPSILON &&
method == CV_TM_CCOEFF_NORMED )
method == cv::TM_CCOEFF_NORMED )
{
cvSet( result, cvScalarAll(1.) );
return;
......@@ -171,7 +171,7 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
if( method & 1 )
{
b_denom = 0;
if( method != CV_TM_CCOEFF_NORMED )
if( method != cv::TM_CCOEFF_NORMED )
{
b_denom = b_sum2;
}
......@@ -185,7 +185,7 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
b_denom = 1.;
}
CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED );
CV_Assert( cv::TM_SQDIFF <= method && method <= cv::TM_CCOEFF_NORMED );
for( i = 0; i < result->rows; i++ )
{
......@@ -200,7 +200,7 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
const uchar* a = img->data.ptr + i*img->step + j*cn;
const uchar* b = templ->data.ptr;
if( cn == 1 || method < CV_TM_CCOEFF )
if( cn == 1 || method < cv::TM_CCOEFF )
{
for( k = 0; k < height; k++, a += a_step, b += b_step )
for( l = 0; l < width_n; l++ )
......@@ -232,7 +232,7 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
const float* a = (const float*)(img->data.ptr + i*img->step) + j*cn;
const float* b = (const float*)templ->data.ptr;
if( cn == 1 || method < CV_TM_CCOEFF )
if( cn == 1 || method < cv::TM_CCOEFF )
{
for( k = 0; k < height; k++, a += a_step, b += b_step )
for( l = 0; l < width_n; l++ )
......@@ -262,12 +262,12 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
switch( method )
{
case CV_TM_CCORR:
case CV_TM_CCORR_NORMED:
case cv::TM_CCORR:
case cv::TM_CCORR_NORMED:
value = ccorr.val[0];
break;
case CV_TM_SQDIFF:
case CV_TM_SQDIFF_NORMED:
case cv::TM_SQDIFF:
case cv::TM_SQDIFF_NORMED:
value = (a_sum2.val[0] + b_sum2 - 2*ccorr.val[0]);
break;
default:
......@@ -281,7 +281,7 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
double denom;
// calc denominator
if( method != CV_TM_CCOEFF_NORMED )
if( method != cv::TM_CCOEFF_NORMED )
{
denom = a_sum2.val[0] + a_sum2.val[1] + a_sum2.val[2];
}
......@@ -297,7 +297,7 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
else if( fabs(value) < denom*1.125 )
value = value > 0 ? 1 : -1;
else
value = method != CV_TM_SQDIFF_NORMED ? 0 : 1;
value = method != cv::TM_SQDIFF_NORMED ? 0 : 1;
}
((float*)(result->data.ptr + result->step*i))[j] = (float)value;
......@@ -323,7 +323,7 @@ void CV_TemplMatchTest::prepare_to_validation( int /*test_case_idx*/ )
cvReleaseFileStorage( &fs );
}*/
if( method >= CV_TM_CCOEFF )
if( method >= cv::TM_CCOEFF )
{
// avoid numerical stability problems in singular cases (when the results are near to 0)
const double delta = 10.;
......@@ -420,7 +420,7 @@ TEST(Imgproc_MatchTemplate, bug_9597) {
cv::Mat cvimg(cv::Size(61, 82), CV_8UC1, (void*)img, cv::Mat::AUTO_STEP);
cv::Mat cvtmpl(cv::Size(17, 17), CV_8UC1, (void*)tmpl, cv::Mat::AUTO_STEP);
cv::Mat result;
cv::matchTemplate(cvimg, cvtmpl, result, CV_TM_SQDIFF);
cv::matchTemplate(cvimg, cvtmpl, result, cv::TM_SQDIFF);
double minValue;
cv::minMaxLoc(result, &minValue, NULL, NULL, NULL);
ASSERT_GE(minValue, 0);
......
......@@ -6,9 +6,9 @@
namespace opencv_test { namespace {
CV_ENUM(MatchTemplType, CV_TM_CCORR, CV_TM_CCORR_NORMED,
CV_TM_SQDIFF, CV_TM_SQDIFF_NORMED,
CV_TM_CCOEFF, CV_TM_CCOEFF_NORMED)
CV_ENUM(MatchTemplType, cv::TM_CCORR, cv::TM_CCORR_NORMED,
cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED,
cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)
class Imgproc_MatchTemplateWithMask : public TestWithParam<std::tuple<MatType,MatType>>
{
......@@ -76,7 +76,7 @@ void Imgproc_MatchTemplateWithMask::SetUp()
TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplSQDIFF)
{
matchTemplate(img_testtype_, templ_testtype_, result_, CV_TM_SQDIFF, mask_testtype_);
matchTemplate(img_testtype_, templ_testtype_, result_, cv::TM_SQDIFF, mask_testtype_);
// Naive implementation for one point
Mat temp = img_roi_masked_ - templ_masked_;
Scalar temp_s = sum(temp.mul(temp));
......@@ -87,7 +87,7 @@ TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplSQDIFF)
TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplSQDIFF_NORMED)
{
matchTemplate(img_testtype_, templ_testtype_, result_, CV_TM_SQDIFF_NORMED, mask_testtype_);
matchTemplate(img_testtype_, templ_testtype_, result_, cv::TM_SQDIFF_NORMED, mask_testtype_);
// Naive implementation for one point
Mat temp = img_roi_masked_ - templ_masked_;
Scalar temp_s = sum(temp.mul(temp));
......@@ -106,7 +106,7 @@ TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplSQDIFF_NORMED)
TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplCCORR)
{
matchTemplate(img_testtype_, templ_testtype_, result_, CV_TM_CCORR, mask_testtype_);
matchTemplate(img_testtype_, templ_testtype_, result_, cv::TM_CCORR, mask_testtype_);
// Naive implementation for one point
Scalar temp_s = sum(templ_masked_.mul(img_roi_masked_));
double val = temp_s[0] + temp_s[1] + temp_s[2] + temp_s[3];
......@@ -116,7 +116,7 @@ TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplCCORR)
TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplCCORR_NORMED)
{
matchTemplate(img_testtype_, templ_testtype_, result_, CV_TM_CCORR_NORMED, mask_testtype_);
matchTemplate(img_testtype_, templ_testtype_, result_, cv::TM_CCORR_NORMED, mask_testtype_);
// Naive implementation for one point
Scalar temp_s = sum(templ_masked_.mul(img_roi_masked_));
double val = temp_s[0] + temp_s[1] + temp_s[2] + temp_s[3];
......@@ -134,7 +134,7 @@ TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplCCORR_NORMED)
TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplCCOEFF)
{
matchTemplate(img_testtype_, templ_testtype_, result_, CV_TM_CCOEFF, mask_testtype_);
matchTemplate(img_testtype_, templ_testtype_, result_, cv::TM_CCOEFF, mask_testtype_);
// Naive implementation for one point
Scalar temp_s = sum(mask_);
for (int i = 0; i < 4; i++)
......@@ -157,7 +157,7 @@ TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplCCOEFF)
TEST_P(Imgproc_MatchTemplateWithMask, CompareNaiveImplCCOEFF_NORMED)
{
matchTemplate(img_testtype_, templ_testtype_, result_, CV_TM_CCOEFF_NORMED, mask_testtype_);
matchTemplate(img_testtype_, templ_testtype_, result_, cv::TM_CCOEFF_NORMED, mask_testtype_);
// Naive implementation for one point
Scalar temp_s = sum(mask_);
for (int i = 0; i < 4; i++)
......@@ -265,14 +265,14 @@ INSTANTIATE_TEST_CASE_P(SingleChannelMask, Imgproc_MatchTemplateWithMask2,
Combine(
Values(CV_32FC1, CV_32FC3, CV_8UC1, CV_8UC3),
Values(CV_32FC1, CV_8UC1),
Values(CV_TM_SQDIFF, CV_TM_SQDIFF_NORMED, CV_TM_CCORR, CV_TM_CCORR_NORMED,
CV_TM_CCOEFF, CV_TM_CCOEFF_NORMED)));
Values(cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED, cv::TM_CCORR, cv::TM_CCORR_NORMED,
cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)));
INSTANTIATE_TEST_CASE_P(MultiChannelMask, Imgproc_MatchTemplateWithMask2,
Combine(
Values(CV_32FC3, CV_8UC3),
Values(CV_32FC3, CV_8UC3),
Values(CV_TM_SQDIFF, CV_TM_SQDIFF_NORMED, CV_TM_CCORR, CV_TM_CCORR_NORMED,
CV_TM_CCOEFF, CV_TM_CCOEFF_NORMED)));
Values(cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED, cv::TM_CCORR, cv::TM_CCORR_NORMED,
cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)));
}} // namespace
......@@ -735,17 +735,13 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
}\
}
namespace cv {
template<> struct DefaultDeleter<IplConvKernel>{ void operator ()(IplConvKernel* obj) const { cvReleaseStructuringElement(&obj); } };
}
static void
icvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_img,
double inpaintRange, int flags )
{
cv::Ptr<CvMat> mask, band, f, t, out;
cv::Ptr<CvPriorityQueueFloat> Heap, Out;
cv::Ptr<IplConvKernel> el_cross, el_range;
cv::Mat el_range, el_cross; // structuring elements for dilate
CvMat input_hdr, mask_hdr, output_hdr;
CvMat* input_img, *inpaint_mask, *output_img;
......@@ -780,7 +776,7 @@ icvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_
t.reset(cvCreateMat(erows, ecols, CV_32FC1));
band.reset(cvCreateMat(erows, ecols, CV_8UC1));
mask.reset(cvCreateMat(erows, ecols, CV_8UC1));
el_cross.reset(cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_CROSS,NULL));
el_cross = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(3, 3), cv::Point(1, 1));
cvCopy( input_img, output_img );
cvSet(mask,cvScalar(KNOWN,0,0,0));
......@@ -788,7 +784,7 @@ icvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_
SET_BORDER1_C1(mask,uchar,0);
cvSet(f,cvScalar(KNOWN,0,0,0));
cvSet(t,cvScalar(1.0e6f,0,0,0));
cvDilate(mask,band,el_cross,1); // image with narrow band
cv::dilate(cv::cvarrToMat(mask), cv::cvarrToMat(band), el_cross, cv::Point(1, 1));
Heap=cv::makePtr<CvPriorityQueueFloat>();
if (!Heap->Init(band))
return;
......@@ -803,9 +799,8 @@ icvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_
if( flags == cv::INPAINT_TELEA )
{
out.reset(cvCreateMat(erows, ecols, CV_8UC1));
el_range.reset(cvCreateStructuringElementEx(2*range+1,2*range+1,
range,range,CV_SHAPE_RECT,NULL));
cvDilate(mask,out,el_range,1);
el_range = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(2 * range + 1, 2 * range + 1));
cv::dilate(cv::cvarrToMat(mask), cv::cvarrToMat(out), el_range);
cvSub(out,mask,out,NULL);
Out=cv::makePtr<CvPriorityQueueFloat>();
if (!Out->Init(out))
......
......@@ -81,7 +81,7 @@ double TestUtils::checkNorm2(InputArray m1, InputArray m2, InputArray mask)
double TestUtils::checkSimilarity(InputArray m1, InputArray m2)
{
Mat diff;
matchTemplate(m1.getMat(), m2.getMat(), diff, CV_TM_CCORR_NORMED);
matchTemplate(m1.getMat(), m2.getMat(), diff, cv::TM_CCORR_NORMED);
return std::abs(diff.at<float>(0, 0) - 1.f);
}
......
......@@ -3061,7 +3061,7 @@ void threshold( const Mat& _src, Mat& _dst,
switch( thresh_type )
{
case CV_THRESH_BINARY:
case cv::THRESH_BINARY:
for( i = 0; i < height; i++ )
{
if( depth == CV_8U )
......@@ -3087,7 +3087,7 @@ void threshold( const Mat& _src, Mat& _dst,
}
}
break;
case CV_THRESH_BINARY_INV:
case cv::THRESH_BINARY_INV:
for( i = 0; i < height; i++ )
{
if( depth == CV_8U )
......@@ -3113,7 +3113,7 @@ void threshold( const Mat& _src, Mat& _dst,
}
}
break;
case CV_THRESH_TRUNC:
case cv::THRESH_TRUNC:
for( i = 0; i < height; i++ )
{
if( depth == CV_8U )
......@@ -3148,7 +3148,7 @@ void threshold( const Mat& _src, Mat& _dst,
}
}
break;
case CV_THRESH_TOZERO:
case cv::THRESH_TOZERO:
for( i = 0; i < height; i++ )
{
if( depth == CV_8U )
......@@ -3183,7 +3183,7 @@ void threshold( const Mat& _src, Mat& _dst,
}
}
break;
case CV_THRESH_TOZERO_INV:
case cv::THRESH_TOZERO_INV:
for( i = 0; i < height; i++ )
{
if( depth == CV_8U )
......
......@@ -274,7 +274,7 @@ int CV_CamShiftTest::validate_test_results( int /*test_case_idx*/ )
cvIsNaN(box.center.y) || cvIsInf(box.center.y) ||
cvIsNaN(box.angle) || cvIsInf(box.angle) || box.angle < -180 || box.angle > 180 )
{
ts->printf( cvtest::TS::LOG, "Invalid CvBox2D or CvConnectedComp was returned by cvCamShift\n" );
ts->printf( cvtest::TS::LOG, "Invalid RotatedRect was returned by CamShift\n" );
code = cvtest::TS::FAIL_INVALID_OUTPUT;
goto _exit_;
}
......@@ -317,19 +317,6 @@ _exit_:
if( code < 0 )
{
#if 0 //defined _DEBUG && defined _WIN32
IplImage* dst = cvCreateImage( img_size, 8, 3 );
cvNamedWindow( "test", 1 );
cvCmpS( img, 0, img, CV_CMP_GT );
cvCvtColor( img, dst, CV_GRAY2BGR );
cvRectangle( dst, cvPoint(init_rect.x, init_rect.y),
cvPoint(init_rect.x + init_rect.width, init_rect.y + init_rect.height),
CV_RGB(255,0,0), 3, 8, 0 );
cvEllipseBox( dst, box, CV_RGB(0,255,0), 3, 8, 0 );
cvShowImage( "test", dst );
cvReleaseImage( &dst );
cvWaitKey();
#endif
ts->set_failed_test_info( code );
}
return code;
......@@ -428,7 +415,7 @@ int CV_MeanShiftTest::validate_test_results( int /*test_case_idx*/ )
rect.y + rect.height > box0.center.y + delta )
{
ts->printf( cvtest::TS::LOG,
"Incorrect CvConnectedComp ((%d,%d,%d,%d) is not within (%.1f,%.1f,%.1f,%.1f))\n",
"Incorrect ConnectedComp ((%d,%d,%d,%d) is not within (%.1f,%.1f,%.1f,%.1f))\n",
rect.x, rect.y, rect.x + rect.width, rect.y + rect.height,
box0.center.x - delta, box0.center.y - delta, box0.center.x + delta, box0.center.y + delta );
code = cvtest::TS::FAIL_BAD_ACCURACY;
......@@ -438,21 +425,6 @@ _exit_:
if( code < 0 )
{
#if 0// defined _DEBUG && defined _WIN32
IplImage* dst = cvCreateImage( img_size, 8, 3 );
cvNamedWindow( "test", 1 );
cvCmpS( img, 0, img, CV_CMP_GT );
cvCvtColor( img, dst, CV_GRAY2BGR );
cvRectangle( dst, cvPoint(init_rect.x, init_rect.y),
cvPoint(init_rect.x + init_rect.width, init_rect.y + init_rect.height),
CV_RGB(255,0,0), 3, 8, 0 );
cvRectangle( dst, cvPoint(comp.rect.x, comp.rect.y),
cvPoint(comp.rect.x + comp.rect.width, comp.rect.y + comp.rect.height),
CV_RGB(0,255,0), 3, 8, 0 );
cvShowImage( "test", dst );
cvReleaseImage( &dst );
cvWaitKey();
#endif
ts->set_failed_test_info( code );
}
return code;
......
......@@ -751,7 +751,7 @@ fromConnection:(AVCaptureConnection *)connection{
bgr_image->imageData = bgr_imagedata;
bgr_image->imageSize = (int)currSize;
cvCvtColor(image, bgr_image, CV_BGRA2BGR);
cv::cvtColor(cv::cvarrToMat(image), cv::cvarrToMat(bgr_image), cv::COLOR_BGRA2BGR);
// image taken from the buffer is incorrected rotated. I'm using cvTranspose + cvFlip.
// There should be an option in iOS API to rotate the buffer output orientation.
......@@ -1001,11 +1001,11 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
deviceChannels = 4;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_BGRA2BGR;
cvtCode = cv::COLOR_BGRA2BGR;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = CV_BGRA2RGB;
cvtCode = cv::COLOR_BGRA2RGB;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_BGRA2GRAY;
cvtCode = cv::COLOR_BGRA2GRAY;
} else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
......@@ -1017,11 +1017,11 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
deviceChannels = 3;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_RGB2BGR;
cvtCode = cv::COLOR_RGB2BGR;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = 0;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_RGB2GRAY;
cvtCode = cv::COLOR_RGB2GRAY;
} else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
......@@ -1033,11 +1033,11 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
deviceChannels = 2;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_YUV2BGR_UYVY;
cvtCode = cv::COLOR_YUV2BGR_UYVY;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = CV_YUV2RGB_UYVY;
cvtCode = cv::COLOR_YUV2RGB_UYVY;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_YUV2GRAY_UYVY;
cvtCode = cv::COLOR_YUV2GRAY_UYVY;
} else if (mMode == CV_CAP_MODE_YUYV) {
cvtCode = -1; // Copy
} else {
......@@ -1053,11 +1053,11 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
deviceChannels = 1;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_YUV2BGR_YV12;
cvtCode = cv::COLOR_YUV2BGR_YV12;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = CV_YUV2RGB_YV12;
cvtCode = cv::COLOR_YUV2RGB_YV12;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_YUV2GRAY_420;
cvtCode = cv::COLOR_YUV2GRAY_420;
} else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
......@@ -1089,7 +1089,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
if (cvtCode == -1) {
cv::cvarrToMat(mDeviceImage).copyTo(cv::cvarrToMat(mOutImage));
} else {
cvCvtColor(mDeviceImage, mOutImage, cvtCode);
cv::cvtColor(cv::cvarrToMat(mDeviceImage), cv::cvarrToMat(mOutImage), cvtCode);
}
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
......@@ -1383,10 +1383,10 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
if (movieColor) {
//assert(iplimage->nChannels == 3);
cvCvtColor(iplimage, argbimage, CV_BGR2BGRA);
cv::cvtColor(cv::cvarrToMat(iplimage), cv::cvarrToMat(argbimage), cv::COLOR_BGR2BGRA);
}else{
//assert(iplimage->nChannels == 1);
cvCvtColor(iplimage, argbimage, CV_GRAY2BGRA);
cv::cvtColor(cv::cvarrToMat(iplimage), cv::cvarrToMat(argbimage), cv::COLOR_GRAY2BGRA);
}
//IplImage -> CGImage conversion
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
......
......@@ -697,7 +697,7 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
mDeviceImage->imageData = reinterpret_cast<char *>(baseaddress);
mDeviceImage->imageSize = int(rowBytes*height);
cvCvtColor(mDeviceImage, mOutImage, CV_BGRA2BGR);
cvtColor(cv::cvarrToMat(mDeviceImage), cv::cvarrToMat(mOutImage), cv::COLOR_BGRA2BGR);
} else if ( pixelFormat == kCVPixelFormatType_422YpCbCr8 ) {
if ( currSize != width*3*height ) {
currSize = width*3*height;
......@@ -716,7 +716,7 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
mDeviceImage->imageData = reinterpret_cast<char *>(baseaddress);
mDeviceImage->imageSize = int(rowBytes*height);
cvCvtColor(mDeviceImage, mOutImage, CV_YUV2BGR_UYVY);
cvtColor(cv::cvarrToMat(mDeviceImage), cv::cvarrToMat(mOutImage), cv::COLOR_YUV2BGR_UYVY);
} else {
fprintf(stderr, "OpenCV: unknown pixel format 0x%08X\n", pixelFormat);
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
......@@ -827,7 +827,7 @@ bool CvCaptureFile::setupReadingAt(CMTime position) {
// Capture in a pixel format that can be converted efficiently to the output mode.
OSType pixelFormat;
if (mMode == CV_CAP_MODE_BGR || mMode == CV_CAP_MODE_RGB) {
// For CV_CAP_MODE_BGR, read frames as BGRA (AV Foundation's YUV->RGB conversion is slightly faster than OpenCV's CV_YUV2BGR_YV12)
// For CV_CAP_MODE_BGR, read frames as BGRA (AV Foundation's YUV->RGB conversion is slightly faster than OpenCV's cv::COLOR_YUV2BGR_YV12)
// kCVPixelFormatType_32ABGR is reportedly faster on OS X, but OpenCV doesn't have a CV_ABGR2BGR conversion.
// kCVPixelFormatType_24RGB is significantly slower than kCVPixelFormatType_32BGRA.
pixelFormat = kCVPixelFormatType_32BGRA;
......@@ -972,11 +972,11 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
deviceChannels = 4;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_BGRA2BGR;
cvtCode = cv::COLOR_BGRA2BGR;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = CV_BGRA2RGB;
cvtCode = cv::COLOR_BGRA2RGB;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_BGRA2GRAY;
cvtCode = cv::COLOR_BGRA2GRAY;
} else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
......@@ -988,11 +988,11 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
deviceChannels = 3;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_RGB2BGR;
cvtCode = cv::COLOR_RGB2BGR;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = 0;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_RGB2GRAY;
cvtCode = cv::COLOR_RGB2GRAY;
} else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
......@@ -1004,11 +1004,11 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
deviceChannels = 2;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_YUV2BGR_UYVY;
cvtCode = cv::COLOR_YUV2BGR_UYVY;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = CV_YUV2RGB_UYVY;
cvtCode = cv::COLOR_YUV2RGB_UYVY;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_YUV2GRAY_UYVY;
cvtCode = cv::COLOR_YUV2GRAY_UYVY;
} else if (mMode == CV_CAP_MODE_YUYV) {
cvtCode = -1; // Copy
} else {
......@@ -1020,17 +1020,17 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
}
} else if ( pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange || // 420v
pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange ) { // 420f
// cvCvtColor(CV_YUV2GRAY_420) is expecting a single buffer with both the Y plane and the CrCb planes.
// So, lie about the height of the buffer. cvCvtColor(CV_YUV2GRAY_420) will only read the first 2/3 of it.
// cvtColor(cv::COLOR_YUV2GRAY_420) is expecting a single buffer with both the Y plane and the CrCb planes.
// So, lie about the height of the buffer. cvtColor(cv::COLOR_YUV2GRAY_420) will only read the first 2/3 of it.
height = height * 3 / 2;
deviceChannels = 1;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_YUV2BGR_YV12;
cvtCode = cv::COLOR_YUV2BGR_YV12;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = CV_YUV2RGB_YV12;
cvtCode = cv::COLOR_YUV2RGB_YV12;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_YUV2GRAY_420;
cvtCode = cv::COLOR_YUV2GRAY_420;
} else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
......@@ -1063,7 +1063,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
// Copy.
cv::cvarrToMat(mDeviceImage).copyTo(cv::cvarrToMat(mOutImage));
} else {
cvCvtColor(mDeviceImage, mOutImage, cvtCode);
cvtColor(cv::cvarrToMat(mDeviceImage), cv::cvarrToMat(mOutImage), cvtCode);
}
......@@ -1352,10 +1352,10 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
if (movieColor) {
//assert(iplimage->nChannels == 3);
cvCvtColor(iplimage, argbimage, CV_BGR2BGRA);
cvtColor(cv::cvarrToMat(iplimage), cv::cvarrToMat(argbimage), cv::COLOR_BGR2BGRA);
}else{
//assert(iplimage->nChannels == 1);
cvCvtColor(iplimage, argbimage, CV_GRAY2BGRA);
cvtColor(cv::cvarrToMat(iplimage), cv::cvarrToMat(argbimage), cv::COLOR_GRAY2BGRA);
}
//IplImage -> CGImage conversion
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
......
......@@ -1052,7 +1052,7 @@ IplImage* CvCapture_OpenNI2::retrieveGrayImage()
cv::Mat rgbImage;
getBGRImageFromMetaData(streamFrames[CV_COLOR_STREAM], rgbImage);
cv::cvtColor( rgbImage, outputMaps[CV_CAP_OPENNI_GRAY_IMAGE].mat, CV_BGR2GRAY );
cv::cvtColor( rgbImage, outputMaps[CV_CAP_OPENNI_GRAY_IMAGE].mat, cv::COLOR_BGR2GRAY );
return outputMaps[CV_CAP_OPENNI_GRAY_IMAGE].getIplImagePtr();
}
......
......@@ -64,7 +64,7 @@ public class Puzzle15Processor {
}
for (int i = 0; i < GRID_AREA; i++) {
Size s = Imgproc.getTextSize(Integer.toString(i + 1), 3/* CV_FONT_HERSHEY_COMPLEX */, 1, 2, null);
Size s = Imgproc.getTextSize(Integer.toString(i + 1), Imgproc.FONT_HERSHEY_COMPLEX, 1, 2, null);
mTextHeights[i] = (int) s.height;
mTextWidths[i] = (int) s.width;
}
......@@ -100,7 +100,7 @@ public class Puzzle15Processor {
cells[idx].copyTo(mCells15[i]);
if (mShowTileNumbers) {
Imgproc.putText(mCells15[i], Integer.toString(1 + idx), new Point((cols / GRID_SIZE - mTextWidths[idx]) / 2,
(rows / GRID_SIZE + mTextHeights[idx]) / 2), 3/* CV_FONT_HERSHEY_COMPLEX */, 1, new Scalar(255, 0, 0, 255), 2);
(rows / GRID_SIZE + mTextHeights[idx]) / 2), Imgproc.FONT_HERSHEY_COMPLEX, 1, new Scalar(255, 0, 0, 255), 2);
}
}
}
......
......@@ -52,7 +52,7 @@ int main( int argc, const char** argv )
return -1;
}
int method = parser.get<int>("cm"); // default 3 (CV_TM_CCORR_NORMED)
int method = parser.get<int>("cm"); // default 3 (cv::TM_CCORR_NORMED)
matchTemplate(img, tmpl, res, method, mask);
double minVal, maxVal;
......
......@@ -28,7 +28,7 @@ public class MorphologyDemo1 {
private static final int MAX_KERNEL_SIZE = 21;
private Mat matImgSrc;
private Mat matImgDst = new Mat();
private int elementType = Imgproc.CV_SHAPE_RECT;
private int elementType = Imgproc.MORPH_RECT;
private int kernelSize = 0;
private boolean doErosion = true;
private JFrame frame;
......@@ -74,11 +74,11 @@ public class MorphologyDemo1 {
@SuppressWarnings("unchecked")
JComboBox<String> cb = (JComboBox<String>)e.getSource();
if (cb.getSelectedIndex() == 0) {
elementType = Imgproc.CV_SHAPE_RECT;
elementType = Imgproc.MORPH_RECT;
} else if (cb.getSelectedIndex() == 1) {
elementType = Imgproc.CV_SHAPE_CROSS;
elementType = Imgproc.MORPH_CROSS;
} else if (cb.getSelectedIndex() == 2) {
elementType = Imgproc.CV_SHAPE_ELLIPSE;
elementType = Imgproc.MORPH_ELLIPSE;
}
update();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册