diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst index b82a3b2c319209e702552bd375b4c1e941c95093..b02a560dd72fdb2931e09849019af777fb8a52ed 100644 --- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst @@ -113,7 +113,7 @@ Finds the camera intrinsic and extrinsic parameters from several views of a cali .. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0 ) -.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs +.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize[, cameraMatrix[, distCoeffs[, rvecs[, tvecs[, flags]]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs .. ocv:cfunction:: double cvCalibrateCamera2( const CvMat* objectPoints, const CvMat* imagePoints, const CvMat* pointCounts, CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs, CvMat* rvecs=NULL, CvMat* tvecs=NULL, int flags=0 ) @@ -1204,7 +1204,7 @@ Calibrates the stereo camera. .. ocv:function:: double stereoCalibrate( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, OutputArray R, OutputArray T, OutputArray E, OutputArray F, TermCriteria term_crit = TermCriteria(TermCriteria::COUNT+ TermCriteria::EPS, 30, 1e-6), int flags=CALIB_FIX_INTRINSIC ) -.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize[, R[, T[, E[, F[, criteria[, flags]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F +.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize[, cameraMatrix1[, distCoeffs1[, cameraMatrix2[, distCoeffs2[, R[, T[, E[, F[, criteria[, flags]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F .. ocv:cfunction:: double cvStereoCalibrate( const CvMat* objectPoints, const CvMat* imagePoints1, const CvMat* imagePoints2, const CvMat* pointCounts, CvMat* cameraMatrix1, CvMat* distCoeffs1, CvMat* cameraMatrix2, CvMat* distCoeffs2, CvSize imageSize, CvMat* R, CvMat* T, CvMat* E=0, CvMat* F=0, CvTermCriteria termCrit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), int flags=CV_CALIB_FIX_INTRINSIC ) .. ocv:pyoldfunction:: cv.StereoCalibrate( objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, termCrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC)-> None diff --git a/modules/imgproc/doc/motion_analysis_and_object_tracking.rst b/modules/imgproc/doc/motion_analysis_and_object_tracking.rst index 330328da5610e4a0962cad17e12741e6106015ab..b61315dfd0fa93778854bf108d869776363951b0 100644 --- a/modules/imgproc/doc/motion_analysis_and_object_tracking.rst +++ b/modules/imgproc/doc/motion_analysis_and_object_tracking.rst @@ -9,7 +9,7 @@ Adds an image to the accumulator. .. ocv:function:: void accumulate( InputArray src, InputOutputArray dst, InputArray mask=noArray() ) -.. ocv:pyfunction:: cv2.accumulate(src, dst[, mask]) -> dst +.. ocv:pyfunction:: cv2.accumulate(src, dst[, mask]) -> None .. ocv:cfunction:: void cvAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL ) .. ocv:pyoldfunction:: cv.Acc(src, dst, mask=None)-> None @@ -44,7 +44,7 @@ Adds the square of a source image to the accumulator. .. ocv:function:: void accumulateSquare( InputArray src, InputOutputArray dst, InputArray mask=noArray() ) -.. ocv:pyfunction:: cv2.accumulateSquare(src, dst[, mask]) -> dst +.. ocv:pyfunction:: cv2.accumulateSquare(src, dst[, mask]) -> None .. ocv:cfunction:: void cvSquareAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL ) .. ocv:pyoldfunction:: cv.SquareAcc(src, dst, mask=None)-> None @@ -77,7 +77,7 @@ Adds the per-element product of two input images to the accumulator. .. ocv:function:: void accumulateProduct( InputArray src1, InputArray src2, InputOutputArray dst, InputArray mask=noArray() ) -.. ocv:pyfunction:: cv2.accumulateProduct(src1, src2, dst[, mask]) -> dst +.. ocv:pyfunction:: cv2.accumulateProduct(src1, src2, dst[, mask]) -> None .. ocv:cfunction:: void cvMultiplyAcc( const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL ) .. ocv:pyoldfunction:: cv.MultiplyAcc(src1, src2, dst, mask=None)-> None @@ -112,7 +112,7 @@ Updates a running average. .. ocv:function:: void accumulateWeighted( InputArray src, InputOutputArray dst, double alpha, InputArray mask=noArray() ) -.. ocv:pyfunction:: cv2.accumulateWeighted(src, dst, alpha[, mask]) -> dst +.. ocv:pyfunction:: cv2.accumulateWeighted(src, dst, alpha[, mask]) -> None .. ocv:cfunction:: void cvRunningAvg( const CvArr* src, CvArr* dst, double alpha, const CvArr* mask=NULL ) .. ocv:pyoldfunction:: cv.RunningAvg(src, dst, alpha, mask=None)-> None diff --git a/modules/imgproc/include/opencv2/imgproc/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc/imgproc.hpp index add24fde5ead76a7f9c3e9d5ae3873082da523bf..b95f7a952de226a4a6f43b103d99c9ae4ea0a96e 100644 --- a/modules/imgproc/include/opencv2/imgproc/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc/imgproc.hpp @@ -574,16 +574,16 @@ CV_EXPORTS_AS(integral3) void integral( InputArray src, OutputArray sum, int sdepth=-1 ); //! adds image to the accumulator (dst += src). Unlike cv::add, dst and src can have different types. -CV_EXPORTS_W void accumulate( InputArray src, CV_OUT InputOutputArray dst, +CV_EXPORTS_W void accumulate( InputArray src, InputOutputArray dst, InputArray mask=noArray() ); //! adds squared src image to the accumulator (dst += src*src). -CV_EXPORTS_W void accumulateSquare( InputArray src, CV_OUT InputOutputArray dst, +CV_EXPORTS_W void accumulateSquare( InputArray src, InputOutputArray dst, InputArray mask=noArray() ); //! adds product of the 2 images to the accumulator (dst += src1*src2). CV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2, - CV_OUT InputOutputArray dst, InputArray mask=noArray() ); + InputOutputArray dst, InputArray mask=noArray() ); //! updates the running average (dst = dst*(1-alpha) + src*alpha) -CV_EXPORTS_W void accumulateWeighted( InputArray src, CV_OUT InputOutputArray dst, +CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst, double alpha, InputArray mask=noArray() ); //! type of the threshold operation diff --git a/modules/python/src2/gen2.py b/modules/python/src2/gen2.py index f50af2636daa50aaa46553e2c2c58b190857fc91..42574603baaf0284cb06e16989248923d17b7a4e 100644 --- a/modules/python/src2/gen2.py +++ b/modules/python/src2/gen2.py @@ -357,7 +357,7 @@ class FuncVariant(object): continue if a.returnarg: outlist.append((a.name, argno)) - if not a.inputarg: + if not a.inputarg or a.returnarg: if a.isbig(): outarr_list.append((a.name, argno)) continue diff --git a/modules/video/doc/motion_analysis_and_object_tracking.rst b/modules/video/doc/motion_analysis_and_object_tracking.rst index ba3e44c2e22f7234a9bfbcec1a63162e74acceb5..2fa8e74e51b1aadb5f854be49dfa668372a34210 100644 --- a/modules/video/doc/motion_analysis_and_object_tracking.rst +++ b/modules/video/doc/motion_analysis_and_object_tracking.rst @@ -34,7 +34,7 @@ Calculates an optical flow for a sparse feature set using the iterative Lucas-Ka :param criteria: Parameter specifying the termination criteria of the iterative search algorithm (after the specified maximum number of iterations ``criteria.maxCount`` or when the search window moves by less than ``criteria.epsilon`` . - :param derivLambda: Relative weight of the spatial image derivatives impact to the optical flow estimation. If ``derivLambda=0`` , only the image intensity is used. If ``derivLambda=1`` , only derivatives are used. Any other values between 0 and 1 mean that both derivatives and the image intensity are used (in the corresponding proportions). + :param derivLambda: Not used. :param flags: Operation flags: diff --git a/samples/python2/calibrate.py b/samples/python2/calibrate.py index 2c20f228d884b1e99c50b41c9acf0c59d0530f02..877364a8fd241bd13a0a2a32308b8763268f8421 100644 --- a/samples/python2/calibrate.py +++ b/samples/python2/calibrate.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv import os from common import splitfn @@ -50,10 +51,7 @@ if __name__ == '__main__': print 'ok' - camera_matrix = np.zeros((3, 3)) - dist_coefs = np.zeros(4) - img_n = len(img_points) - rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), camera_matrix, dist_coefs) + rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h)) print "RMS:", rms print "camera matrix:\n", camera_matrix print "distortion coefficients: ", dist_coefs.ravel() diff --git a/samples/python2/coherence.py b/samples/python2/coherence.py index f8dfab91b75333f983c5b1b337d724d3e78a1cd2..f6f2827fac30bec50975c3f84fb22371006027b8 100644 --- a/samples/python2/coherence.py +++ b/samples/python2/coherence.py @@ -7,7 +7,8 @@ ''' import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4): diff --git a/samples/python2/color_histogram.py b/samples/python2/color_histogram.py index cdb7d430e30bc6479fea9e6ca9f856ec8e18ab94..b092c7d80fd4da7d87bc3f8b87cc5510bbf8211c 100644 --- a/samples/python2/color_histogram.py +++ b/samples/python2/color_histogram.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv from time import clock import sys diff --git a/samples/python2/common.py b/samples/python2/common.py index 636ef6986cc8cfdb2cb56d00adcfb89fc07c5b9a..59de42f25c0abd4759dbbf95bf116c3eede65135 100644 --- a/samples/python2/common.py +++ b/samples/python2/common.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv import os image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm'] diff --git a/samples/python2/distrans.py b/samples/python2/distrans.py index dc3fd9474e6b60819da7fc7288d0e1bebbb561bc..c200e7d2905669ad40bc1d6f5e5e6910a28c131c 100644 --- a/samples/python2/distrans.py +++ b/samples/python2/distrans.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv from common import make_cmap help_message = '''USAGE: distrans.py [] diff --git a/samples/python2/edge.py b/samples/python2/edge.py index 2eea855b73ff8e9fb39fd06b974731fa662a71ec..371e7293460c241862461ad548968ca9a02252f4 100644 --- a/samples/python2/edge.py +++ b/samples/python2/edge.py @@ -1,4 +1,5 @@ -import cv2, cv +import cv2 +import cv2.cv as cv import video import sys diff --git a/samples/python2/facedetect.py b/samples/python2/facedetect.py index 1dd0528327c980db81ca2ce95d7147a9b401c240..030cbdf2ce514c12e7413010286d40c2956c5f5c 100644 --- a/samples/python2/facedetect.py +++ b/samples/python2/facedetect.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv from video import create_capture from common import clock, draw_str diff --git a/samples/python2/find_obj.py b/samples/python2/find_obj.py index 2a36ddbd4f80b676dca90c209831b72fa3f72e11..5aaf293b6c1f5e3dccd9ae3d8e0b44bbc1390021 100644 --- a/samples/python2/find_obj.py +++ b/samples/python2/find_obj.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv from common import anorm help_message = '''SURF image match diff --git a/samples/python2/floodfill.py b/samples/python2/floodfill.py index c9d31286ea532749ae5120e46947ceeeb8ce81c2..59987caa9702eb7c887f9f8d4f0d5b6f69348899 100644 --- a/samples/python2/floodfill.py +++ b/samples/python2/floodfill.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv help_message = '''USAGE: floodfill.py [] diff --git a/samples/python2/lk_track.py b/samples/python2/lk_track.py index 2e852d86e9df25838da9ad4adf9cd3d570972ad4..5a63aabac2bb9d3dbb3baf1835567bdd0011fd94 100644 --- a/samples/python2/lk_track.py +++ b/samples/python2/lk_track.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv import video from common import anorm2, draw_str from time import clock @@ -15,7 +16,7 @@ Keys: -lk_params = dict( winSize = (3, 3), +lk_params = dict( winSize = (21, 21), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03), derivLambda = 0.0 ) diff --git a/samples/python2/motempl.py b/samples/python2/motempl.py index 9229861837dad6c97f7129359b42658356c7d1aa..a7319625a45d945f6909091c89e2b9d9330196db 100644 --- a/samples/python2/motempl.py +++ b/samples/python2/motempl.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv import video from common import nothing, clock, draw_str diff --git a/samples/python2/obj_detect.py b/samples/python2/obj_detect.py index a1ddbed1c8ccb892c7648a186b817aab52326403..1e871cf3224806577f161651b09a43e342423a59 100644 --- a/samples/python2/obj_detect.py +++ b/samples/python2/obj_detect.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv def detect(img, cascade): rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) diff --git a/samples/python2/opt_flow.py b/samples/python2/opt_flow.py index 8ab1aa29fbeebaca485ca082c277a9661c0115bf..2c0914d2e20e2b02f9cdb06cb2a078d6a0fdeb72 100644 --- a/samples/python2/opt_flow.py +++ b/samples/python2/opt_flow.py @@ -1,6 +1,7 @@ import numpy as np import math -import cv2, cv +import cv2 +import cv2.cv as cv import video help_message = ''' diff --git a/samples/python2/stereo_match.py b/samples/python2/stereo_match.py index 842298e35ef25ef6138dc001f86f7bf3a2bd4125..e7321ee1ade52a7488db9cfe911f2fd5c2d1e193 100644 --- a/samples/python2/stereo_match.py +++ b/samples/python2/stereo_match.py @@ -5,7 +5,8 @@ Resulting .ply file cam be easily viewed using MeshLab (http://meshlab.sourcefor ''' import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv ply_header = '''ply format ascii 1.0 diff --git a/samples/python2/turing.py b/samples/python2/turing.py index d610afe31152da86ba8adb8d7acd39567458fb0f..f04c5a1ef7b29879f902ecc185203fee9121fcea 100644 --- a/samples/python2/turing.py +++ b/samples/python2/turing.py @@ -4,7 +4,8 @@ Inspired by http://www.jonathanmccabe.com/Cyclic_Symmetric_Multi-Scale_Turing_Pa ''' import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv from common import draw_str import getopt, sys from itertools import count diff --git a/samples/python2/watershed.py b/samples/python2/watershed.py index ffddbbc4e77727fc02c77a5bd9c5853a2c0369ee..864d011cf44b1c183150b731f75331f67eb1b0a4 100644 --- a/samples/python2/watershed.py +++ b/samples/python2/watershed.py @@ -1,5 +1,6 @@ import numpy as np -import cv2, cv +import cv2 +import cv2.cv as cv from common import Sketcher help_message = '''