提交 2dea07f9 编写于 作者: V Vadim Pisarevsky

repaired Python bindings for functions using InputOutputArray

上级 f97e5f73
......@@ -561,8 +561,8 @@ enum
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints,
Size imageSize,
CV_IN_OUT InputOutputArray cameraMatrix,
CV_IN_OUT InputOutputArray distCoeffs,
CV_OUT InputOutputArray cameraMatrix,
CV_OUT InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
int flags=0 );
......@@ -581,10 +581,10 @@ CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix,
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints1,
InputArrayOfArrays imagePoints2,
CV_IN_OUT InputOutputArray cameraMatrix1,
CV_IN_OUT InputOutputArray distCoeffs1,
CV_IN_OUT InputOutputArray cameraMatrix2,
CV_IN_OUT InputOutputArray distCoeffs2,
CV_OUT InputOutputArray cameraMatrix1,
CV_OUT InputOutputArray distCoeffs1,
CV_OUT InputOutputArray cameraMatrix2,
CV_OUT InputOutputArray distCoeffs2,
Size imageSize, OutputArray R,
OutputArray T, OutputArray E, OutputArray F,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+
......
......@@ -2392,7 +2392,7 @@ CV_EXPORTS void polylines(Mat& img, const Point** pts, const int* npts,
CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2);
//! clips the line segment by the rectangle imgRect
CV_EXPORTS_W bool clipLine(Rect imgRect, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2);
CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2);
/*!
Line iterator class
......
......@@ -574,16 +574,16 @@ CV_EXPORTS_AS(integral3) void integral( InputArray src, OutputArray sum,
int sdepth=-1 );
//! adds image to the accumulator (dst += src). Unlike cv::add, dst and src can have different types.
CV_EXPORTS_W void accumulate( InputArray src, CV_IN_OUT InputOutputArray dst,
CV_EXPORTS_W void accumulate( InputArray src, CV_OUT InputOutputArray dst,
InputArray mask=noArray() );
//! adds squared src image to the accumulator (dst += src*src).
CV_EXPORTS_W void accumulateSquare( InputArray src, CV_IN_OUT InputOutputArray dst,
CV_EXPORTS_W void accumulateSquare( InputArray src, CV_OUT InputOutputArray dst,
InputArray mask=noArray() );
//! adds product of the 2 images to the accumulator (dst += src1*src2).
CV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2,
CV_IN_OUT InputOutputArray dst, InputArray mask=noArray() );
CV_OUT InputOutputArray dst, InputArray mask=noArray() );
//! updates the running average (dst = dst*(1-alpha) + src*alpha)
CV_EXPORTS_W void accumulateWeighted( InputArray src, CV_IN_OUT InputOutputArray dst,
CV_EXPORTS_W void accumulateWeighted( InputArray src, CV_OUT InputOutputArray dst,
double alpha, InputArray mask=noArray() );
//! type of the threshold operation
......
......@@ -417,7 +417,7 @@ void cv::accumulateProduct( InputArray _src1, InputArray _src2,
}
void cv::accumulateWeighted( InputArray _src, CV_IN_OUT InputOutputArray _dst,
void cv::accumulateWeighted( InputArray _src, InputOutputArray _dst,
double alpha, InputArray _mask )
{
Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat();
......
......@@ -278,10 +278,12 @@ class ArgInfo(object):
self.arraycvt = None
self.inputarg = True
self.outputarg = False
self.returnarg = False
for m in arg_tuple[3]:
if m == "/O":
self.inputarg = False
self.outputarg = True
self.returnarg = True
elif m == "/IO":
self.inputarg = True
self.outputarg = True
......@@ -353,7 +355,7 @@ class FuncVariant(object):
argno += 1
if a.name in self.array_counters:
continue
if a.outputarg:
if a.returnarg:
outlist.append((a.name, argno))
if not a.inputarg:
if a.isbig():
......
......@@ -214,82 +214,6 @@ CV_WRAP static inline void findCirclesGridDefault( InputArray image, Size patter
findCirclesGrid(image, patternSize, centers, flags);
}
/*
//! initializes camera matrix from a few 3D points and the corresponding projections.
CV_WRAP static inline Mat initCameraMatrix2D( const vector<Mat>& objectPoints,
const vector<Mat>& imagePoints,
Size imageSize, double aspectRatio=1. )
{
vector<vector<Point3f> > _objectPoints;
vector<vector<Point2f> > _imagePoints;
mv2vv(objectPoints, _objectPoints);
mv2vv(imagePoints, _imagePoints);
return initCameraMatrix2D(_objectPoints, _imagePoints, imageSize, aspectRatio);
}
CV_WRAP static inline double calibrateCamera( const vector<Mat>& objectPoints,
const vector<Mat>& imagePoints,
Size imageSize,
CV_IN_OUT Mat& cameraMatrix,
CV_IN_OUT Mat& distCoeffs,
vector<Mat>& rvecs, vector<Mat>& tvecs,
int flags=0 )
{
vector<vector<Point3f> > _objectPoints;
vector<vector<Point2f> > _imagePoints;
mv2vv(objectPoints, _objectPoints);
mv2vv(imagePoints, _imagePoints);
return calibrateCamera(_objectPoints, _imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flags);
}
CV_WRAP static inline double stereoCalibrate( const vector<Mat>& objectPoints,
const vector<Mat>& imagePoints1,
const vector<Mat>& imagePoints2,
CV_IN_OUT Mat& cameraMatrix1, CV_IN_OUT Mat& distCoeffs1,
CV_IN_OUT Mat& cameraMatrix2, CV_IN_OUT Mat& distCoeffs2,
Size imageSize, CV_OUT Mat& R, CV_OUT Mat& T,
CV_OUT Mat& E, CV_OUT Mat& F,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+
TermCriteria::EPS, 30, 1e-6),
int flags=CALIB_FIX_INTRINSIC )
{
vector<vector<Point3f> > _objectPoints;
vector<vector<Point2f> > _imagePoints1;
vector<vector<Point2f> > _imagePoints2;
mv2vv(objectPoints, _objectPoints);
mv2vv(imagePoints1, _imagePoints1);
mv2vv(imagePoints2, _imagePoints2);
return stereoCalibrate(_objectPoints, _imagePoints1, _imagePoints2, cameraMatrix1, distCoeffs1,
cameraMatrix2, distCoeffs2, imageSize, R, T, E, F, criteria, flags);
}
CV_WRAP static inline float rectify3Collinear( const Mat& cameraMatrix1, const Mat& distCoeffs1,
const Mat& cameraMatrix2, const Mat& distCoeffs2,
const Mat& cameraMatrix3, const Mat& distCoeffs3,
const vector<Mat>& imgpt1, const vector<Mat>& imgpt3,
Size imageSize, const Mat& R12, const Mat& T12,
const Mat& R13, const Mat& T13,
CV_OUT Mat& R1, CV_OUT Mat& R2, CV_OUT Mat& R3,
CV_OUT Mat& P1, CV_OUT Mat& P2, CV_OUT Mat& P3, CV_OUT Mat& Q,
double alpha, Size newImgSize,
CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags )
{
vector<vector<Point2f> > _imagePoints1;
vector<vector<Point2f> > _imagePoints3;
mv2vv(imgpt1, _imagePoints1);
mv2vv(imgpt3, _imagePoints3);
return rectify3Collinear(cameraMatrix1, distCoeffs1,
cameraMatrix2, distCoeffs2,
cameraMatrix3, distCoeffs3,
_imagePoints1, _imagePoints3, imageSize,
R12, T12, R13, T13, R1, R2, R3, P1, P2, P3,
Q, alpha, newImgSize, roi1, roi2, flags);
}
*/
}
#endif
......@@ -263,11 +263,11 @@ CV_EXPORTS_W void segmentMotion(InputArray mhi, OutputArray segmask,
double timestamp, double segThresh);
//! updates the object tracking window using CAMSHIFT algorithm
CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window,
CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_OUT CV_IN_OUT Rect& window,
TermCriteria criteria );
//! updates the object tracking window using meanshift algorithm
CV_EXPORTS_W int meanShift( InputArray probImage, CV_IN_OUT Rect& window,
CV_EXPORTS_W int meanShift( InputArray probImage, CV_OUT CV_IN_OUT Rect& window,
TermCriteria criteria );
/*!
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册