提交 18cbfa4a 编写于 作者: A Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

......@@ -54,7 +54,7 @@ bool CvCascadeImageReader::NegReader::nextImg()
size_t count = imgFilenames.size();
for( size_t i = 0; i < count; i++ )
{
src = imread( imgFilenames[last++], 0 );
src = imread( imgFilenames[last++], IMREAD_GRAYSCALE );
if( src.empty() ){
last %= count;
continue;
......
......@@ -1370,3 +1370,10 @@
journal = {IEEE transactions on pattern analysis and machine intelligence},
doi = {10.1109/TPAMI.2006.153}
}
@article{Buades2005DenoisingIS,
title={Denoising image sequences does not require motion estimation},
author={Antoni Buades and Bartomeu Coll and Jean-Michel Morel},
journal={IEEE Conference on Advanced Video and Signal Based Surveillance, 2005.},
year={2005},
pages={70-74}
}
......@@ -41,8 +41,8 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
imgL = cv.imread('tsukuba_l.png',0)
imgR = cv.imread('tsukuba_r.png',0)
imgL = cv.imread('tsukuba_l.png', cv.IMREAD_GRAYSCALE)
imgR = cv.imread('tsukuba_r.png', cv.IMREAD_GRAYSCALE)
stereo = cv.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(imgL,imgR)
......
......@@ -76,8 +76,8 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img1 = cv.imread('myleft.jpg',0) #queryimage # left image
img2 = cv.imread('myright.jpg',0) #trainimage # right image
img1 = cv.imread('myleft.jpg', cv.IMREAD_GRAYSCALE) #queryimage # left image
img2 = cv.imread('myright.jpg', cv.IMREAD_GRAYSCALE) #trainimage # right image
sift = cv.SIFT_create()
......
......@@ -25,6 +25,7 @@ Let's load a color image first:
>>> import cv2 as cv
>>> img = cv.imread('messi5.jpg')
>>> assert img is not None, "file could not be read, check with os.path.exists()"
@endcode
You can access a pixel value by its row and column coordinates. For BGR image, it returns an array
of Blue, Green, Red values. For grayscale image, just corresponding intensity is returned.
......@@ -173,6 +174,7 @@ from matplotlib import pyplot as plt
BLUE = [255,0,0]
img1 = cv.imread('opencv-logo.png')
assert img1 is not None, "file could not be read, check with os.path.exists()"
replicate = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REPLICATE)
reflect = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT)
......
......@@ -50,6 +50,8 @@ Here \f$\gamma\f$ is taken as zero.
@code{.py}
img1 = cv.imread('ml.png')
img2 = cv.imread('opencv-logo.png')
assert img1 is not None, "file could not be read, check with os.path.exists()"
assert img2 is not None, "file could not be read, check with os.path.exists()"
dst = cv.addWeighted(img1,0.7,img2,0.3,0)
......@@ -76,6 +78,8 @@ bitwise operations as shown below:
# Load two images
img1 = cv.imread('messi5.jpg')
img2 = cv.imread('opencv-logo-white.png')
assert img1 is not None, "file could not be read, check with os.path.exists()"
assert img2 is not None, "file could not be read, check with os.path.exists()"
# I want to put logo on top-left corner, So I create a ROI
rows,cols,channels = img2.shape
......
......@@ -37,6 +37,7 @@ of odd sizes ranging from 5 to 49. (Don't worry about what the result will look
goal):
@code{.py}
img1 = cv.imread('messi5.jpg')
assert img1 is not None, "file could not be read, check with os.path.exists()"
e1 = cv.getTickCount()
for i in range(5,49,2):
......
......@@ -63,7 +63,7 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('simple.jpg',0)
img = cv.imread('simple.jpg', cv.IMREAD_GRAYSCALE)
# Initiate FAST detector
star = cv.xfeatures2d.StarDetector_create()
......
......@@ -98,7 +98,7 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('blox.jpg',0) # `<opencv_root>/samples/data/blox.jpg`
img = cv.imread('blox.jpg', cv.IMREAD_GRAYSCALE) # `<opencv_root>/samples/data/blox.jpg`
# Initiate FAST object with default values
fast = cv.FastFeatureDetector_create()
......
......@@ -40,8 +40,8 @@ from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 10
img1 = cv.imread('box.png',0) # queryImage
img2 = cv.imread('box_in_scene.png',0) # trainImage
img1 = cv.imread('box.png', cv.IMREAD_GRAYSCALE) # queryImage
img2 = cv.imread('box_in_scene.png', cv.IMREAD_GRAYSCALE) # trainImage
# Initiate SIFT detector
sift = cv.SIFT_create()
......
......@@ -67,7 +67,7 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('simple.jpg',0)
img = cv.imread('simple.jpg', cv.IMREAD_GRAYSCALE)
# Initiate ORB detector
orb = cv.ORB_create()
......
......@@ -76,7 +76,7 @@ and descriptors.
First we will see a simple demo on how to find SURF keypoints and descriptors and draw it. All
examples are shown in Python terminal since it is just same as SIFT only.
@code{.py}
>>> img = cv.imread('fly.png',0)
>>> img = cv.imread('fly.png', cv.IMREAD_GRAYSCALE)
# Create SURF object. You can specify params here or later.
# Here I set Hessian Threshold to 400
......
......@@ -83,7 +83,8 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
edges = cv.Canny(img,100,200)
plt.subplot(121),plt.imshow(img,cmap = 'gray')
......
......@@ -24,7 +24,8 @@ The function **cv.moments()** gives a dictionary of all moment values calculated
import numpy as np
import cv2 as cv
img = cv.imread('star.jpg',0)
img = cv.imread('star.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
ret,thresh = cv.threshold(img,127,255,0)
contours,hierarchy = cv.findContours(thresh, 1, 2)
......
......@@ -29,6 +29,7 @@ import numpy as np
import cv2 as cv
im = cv.imread('test.jpg')
assert im is not None, "file could not be read, check with os.path.exists()"
imgray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
......
......@@ -41,6 +41,7 @@ import cv2 as cv
import numpy as np
img = cv.imread('star.jpg')
assert img is not None, "file could not be read, check with os.path.exists()"
img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(img_gray, 127, 255,0)
contours,hierarchy = cv.findContours(thresh,2,1)
......@@ -92,8 +93,10 @@ docs.
import cv2 as cv
import numpy as np
img1 = cv.imread('star.jpg',0)
img2 = cv.imread('star2.jpg',0)
img1 = cv.imread('star.jpg', cv.IMREAD_GRAYSCALE)
img2 = cv.imread('star2.jpg', cv.IMREAD_GRAYSCALE)
assert img1 is not None, "file could not be read, check with os.path.exists()"
assert img2 is not None, "file could not be read, check with os.path.exists()"
ret, thresh = cv.threshold(img1, 127, 255,0)
ret, thresh2 = cv.threshold(img2, 127, 255,0)
......
......@@ -29,6 +29,7 @@ import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('opencv_logo.png')
assert img is not None, "file could not be read, check with os.path.exists()"
kernel = np.ones((5,5),np.float32)/25
dst = cv.filter2D(img,-1,kernel)
......@@ -70,6 +71,7 @@ import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('opencv-logo-white.png')
assert img is not None, "file could not be read, check with os.path.exists()"
blur = cv.blur(img,(5,5))
......
......@@ -28,6 +28,7 @@ import numpy as np
import cv2 as cv
img = cv.imread('messi5.jpg')
assert img is not None, "file could not be read, check with os.path.exists()"
res = cv.resize(img,None,fx=2, fy=2, interpolation = cv.INTER_CUBIC)
......@@ -49,7 +50,8 @@ function. See the below example for a shift of (100,50):
import numpy as np
import cv2 as cv
img = cv.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
rows,cols = img.shape
M = np.float32([[1,0,100],[0,1,50]])
......@@ -87,7 +89,8 @@ where:
To find this transformation matrix, OpenCV provides a function, **cv.getRotationMatrix2D**. Check out the
below example which rotates the image by 90 degree with respect to center without any scaling.
@code{.py}
img = cv.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
rows,cols = img.shape
# cols-1 and rows-1 are the coordinate limits.
......@@ -108,6 +111,7 @@ which is to be passed to **cv.warpAffine**.
Check the below example, and also look at the points I selected (which are marked in green color):
@code{.py}
img = cv.imread('drawing.png')
assert img is not None, "file could not be read, check with os.path.exists()"
rows,cols,ch = img.shape
pts1 = np.float32([[50,50],[200,50],[50,200]])
......@@ -137,6 +141,7 @@ matrix.
See the code below:
@code{.py}
img = cv.imread('sudoku.png')
assert img is not None, "file could not be read, check with os.path.exists()"
rows,cols,ch = img.shape
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
......
......@@ -93,6 +93,7 @@ import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('messi5.jpg')
assert img is not None, "file could not be read, check with os.path.exists()"
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
......@@ -122,7 +123,8 @@ remaining background with gray. Then loaded that mask image in OpenCV, edited or
got with corresponding values in newly added mask image. Check the code below:*
@code{.py}
# newmask is the mask image I manually labelled
newmask = cv.imread('newmask.png',0)
newmask = cv.imread('newmask.png', cv.IMREAD_GRAYSCALE)
assert newmask is not None, "file could not be read, check with os.path.exists()"
# wherever it is marked white (sure foreground), change mask=1
# wherever it is marked black (sure background), change mask=0
......
......@@ -42,7 +42,8 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('dave.jpg',0)
img = cv.imread('dave.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
laplacian = cv.Laplacian(img,cv.CV_64F)
sobelx = cv.Sobel(img,cv.CV_64F,1,0,ksize=5)
......@@ -79,7 +80,8 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('box.png',0)
img = cv.imread('box.png', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
# Output dtype = cv.CV_8U
sobelx8u = cv.Sobel(img,cv.CV_8U,1,0,ksize=5)
......
......@@ -38,6 +38,7 @@ import numpy as np
import cv2 as cv
img = cv.imread('home.jpg')
assert img is not None, "file could not be read, check with os.path.exists()"
hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
hist = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
......@@ -55,6 +56,7 @@ import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('home.jpg')
assert img is not None, "file could not be read, check with os.path.exists()"
hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
hist, xbins, ybins = np.histogram2d(h.ravel(),s.ravel(),[180,256],[[0,180],[0,256]])
......@@ -89,6 +91,7 @@ import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('home.jpg')
assert img is not None, "file could not be read, check with os.path.exists()"
hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
hist = cv.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
......
......@@ -38,10 +38,12 @@ import cv2 as cvfrom matplotlib import pyplot as plt
#roi is the object or region of object we need to find
roi = cv.imread('rose_red.png')
assert roi is not None, "file could not be read, check with os.path.exists()"
hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV)
#target is the image we search in
target = cv.imread('rose.png')
assert target is not None, "file could not be read, check with os.path.exists()"
hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV)
# Find the histograms using calcHist. Can be done with np.histogram2d also
......@@ -85,9 +87,11 @@ import numpy as np
import cv2 as cv
roi = cv.imread('rose_red.png')
assert roi is not None, "file could not be read, check with os.path.exists()"
hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV)
target = cv.imread('rose.png')
assert target is not None, "file could not be read, check with os.path.exists()"
hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV)
# calculating object histogram
......
......@@ -77,7 +77,8 @@ and its parameters :
So let's start with a sample image. Simply load an image in grayscale mode and find its full
histogram.
@code{.py}
img = cv.imread('home.jpg',0)
img = cv.imread('home.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
hist = cv.calcHist([img],[0],None,[256],[0,256])
@endcode
hist is a 256x1 array, each value corresponds to number of pixels in that image with its
......@@ -121,7 +122,8 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('home.jpg',0)
img = cv.imread('home.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
plt.hist(img.ravel(),256,[0,256]); plt.show()
@endcode
You will get a plot as below :
......@@ -136,6 +138,7 @@ import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('home.jpg')
assert img is not None, "file could not be read, check with os.path.exists()"
color = ('b','g','r')
for i,col in enumerate(color):
histr = cv.calcHist([img],[i],None,[256],[0,256])
......@@ -164,7 +167,8 @@ We used cv.calcHist() to find the histogram of the full image. What if you want
of some regions of an image? Just create a mask image with white color on the region you want to
find histogram and black otherwise. Then pass this as the mask.
@code{.py}
img = cv.imread('home.jpg',0)
img = cv.imread('home.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
# create a mask
mask = np.zeros(img.shape[:2], np.uint8)
......
......@@ -30,7 +30,8 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('wiki.jpg',0)
img = cv.imread('wiki.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
hist,bins = np.histogram(img.flatten(),256,[0,256])
......@@ -81,7 +82,8 @@ output is our histogram equalized image.
Below is a simple code snippet showing its usage for same image we used :
@code{.py}
img = cv.imread('wiki.jpg',0)
img = cv.imread('wiki.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
equ = cv.equalizeHist(img)
res = np.hstack((img,equ)) #stacking images side-by-side
cv.imwrite('res.png',res)
......@@ -124,7 +126,8 @@ Below code snippet shows how to apply CLAHE in OpenCV:
import numpy as np
import cv2 as cv
img = cv.imread('tsukuba_l.png',0)
img = cv.imread('tsukuba_l.png', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
# create a CLAHE object (Arguments are optional).
clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
......
......@@ -23,7 +23,8 @@ explained in the documentation. So we directly go to the code.
import numpy as np
import cv2 as cv
img = cv.imread('opencv-logo-white.png',0)
img = cv.imread('opencv-logo-white.png', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
img = cv.medianBlur(img,5)
cimg = cv.cvtColor(img,cv.COLOR_GRAY2BGR)
......
......@@ -38,7 +38,8 @@ Here, as an example, I would use a 5x5 kernel with full of ones. Let's see it ho
import cv2 as cv
import numpy as np
img = cv.imread('j.png',0)
img = cv.imread('j.png', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
kernel = np.ones((5,5),np.uint8)
erosion = cv.erode(img,kernel,iterations = 1)
@endcode
......
......@@ -31,6 +31,7 @@ Similarly while expanding, area becomes 4 times in each level. We can find Gauss
**cv.pyrDown()** and **cv.pyrUp()** functions.
@code{.py}
img = cv.imread('messi5.jpg')
assert img is not None, "file could not be read, check with os.path.exists()"
lower_reso = cv.pyrDown(higher_reso)
@endcode
Below is the 4 levels in an image pyramid.
......@@ -84,6 +85,8 @@ import numpy as np,sys
A = cv.imread('apple.jpg')
B = cv.imread('orange.jpg')
assert A is not None, "file could not be read, check with os.path.exists()"
assert B is not None, "file could not be read, check with os.path.exists()"
# generate Gaussian pyramid for A
G = A.copy()
......
......@@ -38,9 +38,11 @@ import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
img2 = img.copy()
template = cv.imread('template.jpg',0)
template = cv.imread('template.jpg', cv.IMREAD_GRAYSCALE)
assert template is not None, "file could not be read, check with os.path.exists()"
w, h = template.shape[::-1]
# All the 6 methods for comparison in a list
......@@ -113,8 +115,10 @@ import numpy as np
from matplotlib import pyplot as plt
img_rgb = cv.imread('mario.png')
assert img_rgb is not None, "file could not be read, check with os.path.exists()"
img_gray = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY)
template = cv.imread('mario_coin.png',0)
template = cv.imread('mario_coin.png', cv.IMREAD_GRAYSCALE)
assert template is not None, "file could not be read, check with os.path.exists()"
w, h = template.shape[::-1]
res = cv.matchTemplate(img_gray,template,cv.TM_CCOEFF_NORMED)
......
......@@ -37,7 +37,8 @@ import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('gradient.png',0)
img = cv.imread('gradient.png', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
ret,thresh1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
ret,thresh2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV)
ret,thresh3 = cv.threshold(img,127,255,cv.THRESH_TRUNC)
......@@ -85,7 +86,8 @@ import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('sudoku.png',0)
img = cv.imread('sudoku.png', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
img = cv.medianBlur(img,5)
ret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
......@@ -133,7 +135,8 @@ import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('noisy2.png',0)
img = cv.imread('noisy2.png', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
# global thresholding
ret1,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
......@@ -183,7 +186,8 @@ where
It actually finds a value of t which lies in between two peaks such that variances to both classes
are minimal. It can be simply implemented in Python as follows:
@code{.py}
img = cv.imread('noisy2.png',0)
img = cv.imread('noisy2.png', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
blur = cv.GaussianBlur(img,(5,5),0)
# find normalized_histogram, and its cumulative distribution function
......
......@@ -54,7 +54,8 @@ import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
......@@ -121,7 +122,8 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
dft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
......@@ -184,7 +186,8 @@ So how do we find this optimal size ? OpenCV provides a function, **cv.getOptima
this. It is applicable to both **cv.dft()** and **np.fft.fft2()**. Let's check their performance
using IPython magic command %timeit.
@code{.py}
In [16]: img = cv.imread('messi5.jpg',0)
In [15]: img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE)
In [16]: assert img is not None, "file could not be read, check with os.path.exists()"
In [17]: rows,cols = img.shape
In [18]: print("{} {}".format(rows,cols))
342 548
......
......@@ -49,6 +49,7 @@ import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('coins.png')
assert img is not None, "file could not be read, check with os.path.exists()"
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
@endcode
......
......@@ -56,7 +56,7 @@ import numpy as np
import cv2 as cv
img = cv.imread('messi_2.jpg')
mask = cv.imread('mask2.png',0)
mask = cv.imread('mask2.png', cv.IMREAD_GRAYSCALE)
dst = cv.inpaint(img,mask,3,cv.INPAINT_TELEA)
......
......@@ -63,7 +63,7 @@ Making a project
int main( int argc, char** argv )
{
Mat image;
image = imread( argv[1], 1 );
image = imread( argv[1], IMREAD_COLOR );
if( argc != 2 || !image.data )
{
......
......@@ -42,7 +42,7 @@ int main(int argc, char** argv )
}
Mat image;
image = imread( argv[1], 1 );
image = imread( argv[1], IMREAD_COLOR );
if ( !image.data )
{
......
......@@ -427,9 +427,9 @@ public:
eps_rvec_noise[CALIB_HAND_EYE_ANDREFF] = 1.0e-2;
eps_rvec_noise[CALIB_HAND_EYE_DANIILIDIS] = 1.0e-2;
eps_tvec_noise[CALIB_HAND_EYE_TSAI] = 5.0e-2;
eps_tvec_noise[CALIB_HAND_EYE_PARK] = 5.0e-2;
eps_tvec_noise[CALIB_HAND_EYE_HORAUD] = 5.0e-2;
eps_tvec_noise[CALIB_HAND_EYE_TSAI] = 7.0e-2;
eps_tvec_noise[CALIB_HAND_EYE_PARK] = 7.0e-2;
eps_tvec_noise[CALIB_HAND_EYE_HORAUD] = 7.0e-2;
if (eyeToHandConfig)
{
eps_tvec_noise[CALIB_HAND_EYE_ANDREFF] = 7.0e-2;
......@@ -454,7 +454,7 @@ void CV_CalibrateHandEyeTest::run(int)
{
ts->set_failed_test_info(cvtest::TS::OK);
RNG& rng = ts->get_rng();
RNG& rng = cv::theRNG();
std::vector<std::vector<double> > vec_rvec_diff(5);
std::vector<std::vector<double> > vec_tvec_diff(5);
......
......@@ -224,7 +224,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
/* read the image */
String img_file = board_list[idx * 2];
Mat gray = imread( folder + img_file, 0);
Mat gray = imread( folder + img_file, IMREAD_GRAYSCALE);
if( gray.empty() )
{
......
......@@ -73,60 +73,27 @@ int METHOD[METHODS_COUNT] = {0, cv::RANSAC, cv::LMEDS, cv::RHO};
using namespace cv;
using namespace std;
class CV_HomographyTest: public cvtest::ArrayTest
{
public:
CV_HomographyTest();
~CV_HomographyTest();
void run (int);
protected:
int method;
int image_size;
double reproj_threshold;
double sigma;
private:
float max_diff, max_2diff;
bool check_matrix_size(const cv::Mat& H);
bool check_matrix_diff(const cv::Mat& original, const cv::Mat& found, const int norm_type, double &diff);
int check_ransac_mask_1(const Mat& src, const Mat& mask);
int check_ransac_mask_2(const Mat& original_mask, const Mat& found_mask);
void print_information_1(int j, int N, int method, const Mat& H);
void print_information_2(int j, int N, int method, const Mat& H, const Mat& H_res, int k, double diff);
void print_information_3(int method, int j, int N, const Mat& mask);
void print_information_4(int method, int j, int N, int k, int l, double diff);
void print_information_5(int method, int j, int N, int l, double diff);
void print_information_6(int method, int j, int N, int k, double diff, bool value);
void print_information_7(int method, int j, int N, int k, double diff, bool original_value, bool found_value);
void print_information_8(int method, int j, int N, int k, int l, double diff);
};
CV_HomographyTest::CV_HomographyTest() : max_diff(1e-2f), max_2diff(2e-2f)
{
method = 0;
image_size = 100;
reproj_threshold = 3.0;
sigma = 0.01;
}
CV_HomographyTest::~CV_HomographyTest() {}
namespace HomographyTestUtils {
static const float max_diff = 0.032f;
static const float max_2diff = 0.020f;
static const int image_size = 100;
static const double reproj_threshold = 3.0;
static const double sigma = 0.01;
bool CV_HomographyTest::check_matrix_size(const cv::Mat& H)
static bool check_matrix_size(const cv::Mat& H)
{
return (H.rows == 3) && (H.cols == 3);
}
bool CV_HomographyTest::check_matrix_diff(const cv::Mat& original, const cv::Mat& found, const int norm_type, double &diff)
static bool check_matrix_diff(const cv::Mat& original, const cv::Mat& found, const int norm_type, double &diff)
{
diff = cvtest::norm(original, found, norm_type);
return diff <= max_diff;
}
int CV_HomographyTest::check_ransac_mask_1(const Mat& src, const Mat& mask)
static int check_ransac_mask_1(const Mat& src, const Mat& mask)
{
if (!(mask.cols == 1) && (mask.rows == src.cols)) return 1;
if (countNonZero(mask) < mask.rows) return 2;
......@@ -134,14 +101,14 @@ int CV_HomographyTest::check_ransac_mask_1(const Mat& src, const Mat& mask)
return 0;
}
int CV_HomographyTest::check_ransac_mask_2(const Mat& original_mask, const Mat& found_mask)
static int check_ransac_mask_2(const Mat& original_mask, const Mat& found_mask)
{
if (!(found_mask.cols == 1) && (found_mask.rows == original_mask.rows)) return 1;
for (int i = 0; i < found_mask.rows; ++i) if (found_mask.at<uchar>(i, 0) > 1) return 2;
return 0;
}
void CV_HomographyTest::print_information_1(int j, int N, int _method, const Mat& H)
static void print_information_1(int j, int N, int _method, const Mat& H)
{
cout << endl; cout << "Checking for homography matrix sizes..." << endl; cout << endl;
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";
......@@ -153,7 +120,7 @@ void CV_HomographyTest::print_information_1(int j, int N, int _method, const Mat
cout << "Number of rows: " << H.rows << " Number of cols: " << H.cols << endl; cout << endl;
}
void CV_HomographyTest::print_information_2(int j, int N, int _method, const Mat& H, const Mat& H_res, int k, double diff)
static void print_information_2(int j, int N, int _method, const Mat& H, const Mat& H_res, int k, double diff)
{
cout << endl; cout << "Checking for accuracy of homography matrix computing..." << endl; cout << endl;
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";
......@@ -169,7 +136,7 @@ void CV_HomographyTest::print_information_2(int j, int N, int _method, const Mat
cout << "Maximum allowed difference: " << max_diff << endl; cout << endl;
}
void CV_HomographyTest::print_information_3(int _method, int j, int N, const Mat& mask)
static void print_information_3(int _method, int j, int N, const Mat& mask)
{
cout << endl; cout << "Checking for inliers/outliers mask..." << endl; cout << endl;
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";
......@@ -181,7 +148,7 @@ void CV_HomographyTest::print_information_3(int _method, int j, int N, const Mat
cout << "Number of rows: " << mask.rows << " Number of cols: " << mask.cols << endl; cout << endl;
}
void CV_HomographyTest::print_information_4(int _method, int j, int N, int k, int l, double diff)
static void print_information_4(int _method, int j, int N, int k, int l, double diff)
{
cout << endl; cout << "Checking for accuracy of reprojection error computing..." << endl; cout << endl;
cout << "Method: "; if (_method == 0) cout << 0 << endl; else cout << "CV_LMEDS" << endl;
......@@ -195,7 +162,7 @@ void CV_HomographyTest::print_information_4(int _method, int j, int N, int k, in
cout << "Maximum allowed difference: " << max_2diff << endl; cout << endl;
}
void CV_HomographyTest::print_information_5(int _method, int j, int N, int l, double diff)
static void print_information_5(int _method, int j, int N, int l, double diff)
{
cout << endl; cout << "Checking for accuracy of reprojection error computing..." << endl; cout << endl;
cout << "Method: "; if (_method == 0) cout << 0 << endl; else cout << "CV_LMEDS" << endl;
......@@ -208,7 +175,7 @@ void CV_HomographyTest::print_information_5(int _method, int j, int N, int l, do
cout << "Maximum allowed difference: " << max_diff << endl; cout << endl;
}
void CV_HomographyTest::print_information_6(int _method, int j, int N, int k, double diff, bool value)
static void print_information_6(int _method, int j, int N, int k, double diff, bool value)
{
cout << endl; cout << "Checking for inliers/outliers mask..." << endl; cout << endl;
cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl;
......@@ -221,7 +188,7 @@ void CV_HomographyTest::print_information_6(int _method, int j, int N, int k, do
cout << "Value of found mask: "<< value << endl; cout << endl;
}
void CV_HomographyTest::print_information_7(int _method, int j, int N, int k, double diff, bool original_value, bool found_value)
static void print_information_7(int _method, int j, int N, int k, double diff, bool original_value, bool found_value)
{
cout << endl; cout << "Checking for inliers/outliers mask..." << endl; cout << endl;
cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl;
......@@ -234,7 +201,7 @@ void CV_HomographyTest::print_information_7(int _method, int j, int N, int k, do
cout << "Value of original mask: "<< original_value << " Value of found mask: " << found_value << endl; cout << endl;
}
void CV_HomographyTest::print_information_8(int _method, int j, int N, int k, int l, double diff)
static void print_information_8(int _method, int j, int N, int k, int l, double diff)
{
cout << endl; cout << "Checking for reprojection error of inlier..." << endl; cout << endl;
cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl;
......@@ -248,11 +215,15 @@ void CV_HomographyTest::print_information_8(int _method, int j, int N, int k, in
cout << "Maximum allowed difference: " << max_2diff << endl; cout << endl;
}
void CV_HomographyTest::run(int)
} // HomographyTestUtils::
TEST(Calib3d_Homography, accuracy)
{
using namespace HomographyTestUtils;
for (int N = MIN_COUNT_OF_POINTS; N <= MAX_COUNT_OF_POINTS; ++N)
{
RNG& rng = ts->get_rng();
RNG& rng = cv::theRNG();
float *src_data = new float [2*N];
......@@ -308,7 +279,7 @@ void CV_HomographyTest::run(int)
for (int i = 0; i < METHODS_COUNT; ++i)
{
method = METHOD[i];
const int method = METHOD[i];
switch (method)
{
case 0:
......@@ -411,7 +382,7 @@ void CV_HomographyTest::run(int)
for (int i = 0; i < METHODS_COUNT; ++i)
{
method = METHOD[i];
const int method = METHOD[i];
switch (method)
{
case 0:
......@@ -573,8 +544,6 @@ void CV_HomographyTest::run(int)
}
}
TEST(Calib3d_Homography, accuracy) { CV_HomographyTest test; test.safe_run(); }
TEST(Calib3d_Homography, EKcase)
{
float pt1data[] =
......
......@@ -456,8 +456,8 @@ void CV_StereoMatchingTest::run(int)
string datasetFullDirName = dataPath + DATASETS_DIR + datasetName + "/";
Mat leftImg = imread(datasetFullDirName + LEFT_IMG_NAME);
Mat rightImg = imread(datasetFullDirName + RIGHT_IMG_NAME);
Mat trueLeftDisp = imread(datasetFullDirName + TRUE_LEFT_DISP_NAME, 0);
Mat trueRightDisp = imread(datasetFullDirName + TRUE_RIGHT_DISP_NAME, 0);
Mat trueLeftDisp = imread(datasetFullDirName + TRUE_LEFT_DISP_NAME, IMREAD_GRAYSCALE);
Mat trueRightDisp = imread(datasetFullDirName + TRUE_RIGHT_DISP_NAME, IMREAD_GRAYSCALE);
Rect calcROI;
if( leftImg.empty() || rightImg.empty() || trueLeftDisp.empty() )
......@@ -835,9 +835,9 @@ TEST_P(Calib3d_StereoBM_BufferBM, memAllocsTest)
const int SADWindowSize = get<1>(get<1>(GetParam()));
String path = cvtest::TS::ptr()->get_data_path() + "cv/stereomatching/datasets/teddy/";
Mat leftImg = imread(path + "im2.png", 0);
Mat leftImg = imread(path + "im2.png", IMREAD_GRAYSCALE);
ASSERT_FALSE(leftImg.empty());
Mat rightImg = imread(path + "im6.png", 0);
Mat rightImg = imread(path + "im6.png", IMREAD_GRAYSCALE);
ASSERT_FALSE(rightImg.empty());
Mat leftDisp;
{
......@@ -923,9 +923,9 @@ TEST(Calib3d_StereoSGBM, regression) { CV_StereoSGBMTest test; test.safe_run();
TEST(Calib3d_StereoSGBM_HH4, regression)
{
String path = cvtest::TS::ptr()->get_data_path() + "cv/stereomatching/datasets/teddy/";
Mat leftImg = imread(path + "im2.png", 0);
Mat leftImg = imread(path + "im2.png", IMREAD_GRAYSCALE);
ASSERT_FALSE(leftImg.empty());
Mat rightImg = imread(path + "im6.png", 0);
Mat rightImg = imread(path + "im6.png", IMREAD_GRAYSCALE);
ASSERT_FALSE(rightImg.empty());
Mat testData = imread(path + "disp2_hh4.png",-1);
ASSERT_FALSE(testData.empty());
......
......@@ -306,7 +306,7 @@ CV_INLINE int cvIsInf( double value )
#elif defined(__x86_64__) || defined(_M_X64) || defined(__aarch64__) || defined(_M_ARM64) || defined(__PPC64__) || defined(__loongarch64)
Cv64suf ieee754;
ieee754.f = value;
return (ieee754.u & 0x7fffffff00000000) ==
return (ieee754.u & 0x7fffffffffffffff) ==
0x7ff0000000000000;
#else
Cv64suf ieee754;
......
......@@ -247,6 +247,7 @@ std::wstring GetTempFileNameWinRT(std::wstring prefix)
#if defined __MACH__ && defined __APPLE__
#include <mach/mach.h>
#include <mach/mach_time.h>
#include <sys/sysctl.h>
#endif
#endif
......@@ -635,6 +636,14 @@ struct HWFeatures
#if (defined __ARM_FP && (((__ARM_FP & 0x2) != 0) && defined __ARM_NEON__))
have[CV_CPU_FP16] = true;
#endif
#if (defined __ARM_FEATURE_DOTPROD)
int has_feat_dotprod = 0;
size_t has_feat_dotprod_size = sizeof(has_feat_dotprod);
sysctlbyname("hw.optional.arm.FEAT_DotProd", &has_feat_dotprod, &has_feat_dotprod_size, NULL, 0);
if (has_feat_dotprod) {
have[CV_CPU_NEON_DOTPROD] = true;
}
#endif
#elif (defined __clang__)
#if (defined __ARM_NEON__ || (defined __ARM_NEON && defined __aarch64__))
have[CV_CPU_NEON] = true;
......
......@@ -3992,6 +3992,13 @@ TEST(Core_FastMath, InlineNaN)
EXPECT_EQ( cvIsNaN((double) NAN), 1);
EXPECT_EQ( cvIsNaN((double) -NAN), 1);
EXPECT_EQ( cvIsNaN(0.0), 0);
// Regression: check the +/-Inf cases
Cv64suf suf;
suf.u = 0x7FF0000000000000UL;
EXPECT_EQ( cvIsNaN(suf.f), 0);
suf.u = 0xFFF0000000000000UL;
EXPECT_EQ( cvIsNaN(suf.f), 0);
}
TEST(Core_FastMath, InlineIsInf)
......@@ -4003,6 +4010,13 @@ TEST(Core_FastMath, InlineIsInf)
EXPECT_EQ( cvIsInf((double) HUGE_VAL), 1);
EXPECT_EQ( cvIsInf((double) -HUGE_VAL), 1);
EXPECT_EQ( cvIsInf(0.0), 0);
// Regression: check the cases of 0x7FF00000xxxxxxxx
Cv64suf suf;
suf.u = 0x7FF0000000000001UL;
EXPECT_EQ( cvIsInf(suf.f), 0);
suf.u = 0x7FF0000012345678UL;
EXPECT_EQ( cvIsInf(suf.f), 0);
}
}} // namespace
......
......@@ -974,6 +974,9 @@ TEST_P(Test_Int8_nets, opencv_face_detector)
TEST_P(Test_Int8_nets, EfficientDet)
{
if (cvtest::skipUnstableTests)
throw SkipTestException("Skip unstable test"); // detail: https://github.com/opencv/opencv/pull/23167
applyTestTag(CV_TEST_TAG_DEBUG_VERYLONG);
if (target == DNN_TARGET_OPENCL_FP16 && !ocl::Device::getDefault().isIntel())
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
......
......@@ -960,11 +960,18 @@ if( dstMat.type() == CV_32F )
__dst = v_min(v_max(v_cvt_f32(v_round(__dst * __nrm2)), __min), __max);
v_store(dst + k, __dst);
}
#endif
#if defined(__GNUC__) && __GNUC__ >= 9
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Waggressive-loop-optimizations" // iteration XX invokes undefined behavior
#endif
for( ; k < len; k++ )
{
dst[k] = saturate_cast<uchar>(rawDst[k]*nrm2);
}
#if defined(__GNUC__) && __GNUC__ >= 9
#pragma GCC diagnostic pop
#endif
}
else // CV_8U
{
......@@ -984,9 +991,8 @@ else // CV_8U
#endif
#if defined(__GNUC__) && __GNUC__ >= 9
// avoid warning "iteration 7 invokes undefined behavior" on Linux ARM64
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Waggressive-loop-optimizations"
#pragma GCC diagnostic ignored "-Waggressive-loop-optimizations" // iteration XX invokes undefined behavior
#endif
for( ; k < len; k++ )
{
......
......@@ -82,7 +82,7 @@ TEST( Features2d_DescriptorExtractor, batch_ORB )
for( i = 0; i < n; i++ )
{
string imgname = format("%s/img%d.png", path.c_str(), i+1);
Mat img = imread(imgname, 0);
Mat img = imread(imgname, IMREAD_GRAYSCALE);
imgs.push_back(img);
}
......@@ -110,7 +110,7 @@ TEST( Features2d_DescriptorExtractor, batch_SIFT )
for( i = 0; i < n; i++ )
{
string imgname = format("%s/img%d.png", path.c_str(), i+1);
Mat img = imread(imgname, 0);
Mat img = imread(imgname, IMREAD_GRAYSCALE);
imgs.push_back(img);
}
......
......@@ -45,7 +45,7 @@ public class ImgcodecsTest extends OpenCVTestCase {
}
public void testImreadStringInt() {
dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, 0);
dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, Imgcodecs.IMREAD_GRAYSCALE);
assertFalse(dst.empty());
assertEquals(1, dst.channels());
assertTrue(512 == dst.cols());
......
......@@ -1077,7 +1077,7 @@ double CV_ColorLabTest::get_success_error_level( int /*test_case_idx*/, int i, i
{
int depth = test_mat[i][j].depth();
// j == 0 is for forward code, j == 1 is for inverse code
return (depth == CV_8U) ? (srgb ? 32 : 8) :
return (depth == CV_8U) ? (srgb ? 37 : 8) :
//(depth == CV_16U) ? 32 : // 16u is disabled
srgb ? ((j == 0) ? 0.4 : 0.0055) : 1e-3;
}
......@@ -1256,7 +1256,7 @@ double CV_ColorLuvTest::get_success_error_level( int /*test_case_idx*/, int i, i
{
int depth = test_mat[i][j].depth();
// j == 0 is for forward code, j == 1 is for inverse code
return (depth == CV_8U) ? (srgb ? 36 : 8) :
return (depth == CV_8U) ? (srgb ? 37 : 8) :
//(depth == CV_16U) ? 32 : // 16u is disabled
5e-2;
}
......
......@@ -81,7 +81,7 @@ void CV_ConnectedComponentsTest::run(int /* start_from */)
int ccltype[] = { cv::CCL_DEFAULT, cv::CCL_WU, cv::CCL_GRANA, cv::CCL_BOLELLI, cv::CCL_SAUF, cv::CCL_BBDT, cv::CCL_SPAGHETTI };
string exp_path = string(ts->get_data_path()) + "connectedcomponents/ccomp_exp.png";
Mat exp = imread(exp_path, 0);
Mat exp = imread(exp_path, IMREAD_GRAYSCALE);
Mat orig = imread(string(ts->get_data_path()) + "connectedcomponents/concentric_circles.png", 0);
if (orig.empty())
......
......@@ -180,7 +180,7 @@ cvTsIsPointOnLineSegment(const cv::Point2f &x, const cv::Point2f &a, const cv::P
double d2 = cvTsDist(cvPoint2D32f(x.x, x.y), cvPoint2D32f(b.x, b.y));
double d3 = cvTsDist(cvPoint2D32f(a.x, a.y), cvPoint2D32f(b.x, b.y));
return (abs(d1 + d2 - d3) <= (1E-5));
return (abs(d1 + d2 - d3) <= (1E-4));
}
......
......@@ -53,7 +53,7 @@ protected:
void run(int)
{
string imgpath = string(ts->get_data_path()) + "shared/lena.png";
Mat img = imread(imgpath, 1), gray, smallimg, result;
Mat img = imread(imgpath, IMREAD_COLOR), gray, smallimg, result;
UMat uimg = img.getUMat(ACCESS_READ), ugray, usmallimg, uresult;
cvtColor(img, gray, COLOR_BGR2GRAY);
......
......@@ -102,7 +102,6 @@ void CV_ImgWarpBaseTest::get_test_array_types_and_sizes( int test_case_idx,
int cn = cvtest::randInt(rng) % 3 + 1;
cvtest::ArrayTest::get_test_array_types_and_sizes( test_case_idx, sizes, types );
depth = depth == 0 ? CV_8U : depth == 1 ? CV_16U : CV_32F;
cn += cn == 2;
types[INPUT][0] = types[INPUT_OUTPUT][0] = types[REF_INPUT_OUTPUT][0] = CV_MAKETYPE(depth, cn);
if( test_array[INPUT].size() > 1 )
......
......@@ -151,8 +151,6 @@ void CV_ImageWarpBaseTest::generate_test_data()
depth = rng.uniform(0, CV_64F);
int cn = rng.uniform(1, 4);
while (cn == 2)
cn = rng.uniform(1, 4);
src.create(ssize, CV_MAKE_TYPE(depth, cn));
......@@ -237,7 +235,7 @@ float CV_ImageWarpBaseTest::get_success_error_level(int _interpolation, int) con
else if (_interpolation == INTER_LANCZOS4)
return 1.0f;
else if (_interpolation == INTER_NEAREST)
return 1.0f;
return 255.0f; // FIXIT: check is not reliable for Black/White (0/255) images
else if (_interpolation == INTER_AREA)
return 2.0f;
else
......@@ -430,8 +428,6 @@ void CV_Resize_Test::generate_test_data()
depth = rng.uniform(0, CV_64F);
int cn = rng.uniform(1, 4);
while (cn == 2)
cn = rng.uniform(1, 4);
src.create(ssize, CV_MAKE_TYPE(depth, cn));
......
......@@ -60,7 +60,7 @@ CV_WatershedTest::~CV_WatershedTest() {}
void CV_WatershedTest::run( int /* start_from */)
{
string exp_path = string(ts->get_data_path()) + "watershed/wshed_exp.png";
Mat exp = imread(exp_path, 0);
Mat exp = imread(exp_path, IMREAD_GRAYSCALE);
Mat orig = imread(string(ts->get_data_path()) + "inpaint/orig.png");
FileStorage fs(string(ts->get_data_path()) + "watershed/comp.xml", FileStorage::READ);
......
......@@ -149,7 +149,7 @@ public class OpenCVTestCase extends TestCase {
rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128));
rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH);
grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0);
grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, Imgcodecs.IMREAD_GRAYSCALE);
gray255_32f_3d = new Mat(new int[]{matSize, matSize, matSize}, CvType.CV_32F, new Scalar(255.0));
......
......@@ -175,7 +175,7 @@ public class OpenCVTestCase extends TestCase {
rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128));
rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH);
grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0);
grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, Imgcodecs.IMREAD_GRAYSCALE);
gray255_32f_3d = new Mat(new int[]{matSize, matSize, matSize}, CvType.CV_32F, new Scalar(255.0));
......
......@@ -138,7 +138,7 @@ int CV_DetectorTest::prepareData( FileStorage& _fs )
String filename;
it >> filename;
imageFilenames.push_back(filename);
Mat img = imread( dataPath+filename, 1 );
Mat img = imread( dataPath+filename, IMREAD_COLOR );
images.push_back( img );
}
}
......
......@@ -201,8 +201,8 @@ CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst,
/** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
captured in small period of time. For example video. This version of the function is for grayscale
images or for manual manipulation with colorspaces. For more details see
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
images or for manual manipulation with colorspaces. See @cite Buades2005DenoisingIS for more details
(open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
@param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or
4-channel images sequence. All images should have the same type and
......@@ -228,8 +228,8 @@ CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputA
/** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
captured in small period of time. For example video. This version of the function is for grayscale
images or for manual manipulation with colorspaces. For more details see
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
images or for manual manipulation with colorspaces. See @cite Buades2005DenoisingIS for more details
(open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
@param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
2-channel, 3-channel or 4-channel images sequence. All images should
......
......@@ -157,7 +157,7 @@ TEST(Photo_White, issue_2646)
TEST(Photo_Denoising, speed)
{
string imgname = string(cvtest::TS::ptr()->get_data_path()) + "shared/5MP.png";
Mat src = imread(imgname, 0), dst;
Mat src = imread(imgname, IMREAD_GRAYSCALE), dst;
double t = (double)getTickCount();
fastNlMeansDenoising(src, dst, 5, 7, 21);
......
......@@ -454,9 +454,6 @@ private:
performance_metrics metrics;
void validateMetrics();
static int64 _timeadjustment;
static int64 _calibrate();
static void warmup_impl(cv::Mat m, WarmUpType wtype);
static int getSizeInBytes(cv::InputArray a);
static cv::Size getSize(cv::InputArray a);
......
......@@ -522,13 +522,35 @@ namespace cvtest
int validCount = 0;
for (size_t i = 0; i < gold.size(); ++i)
if (actual.size() == gold.size())
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (keyPointsEquals(p1, p2))
++validCount;
if (keyPointsEquals(p1, p2))
++validCount;
}
}
else
{
std::vector<cv::KeyPoint>& shorter = gold;
std::vector<cv::KeyPoint>& longer = actual;
if (actual.size() < gold.size())
{
shorter = actual;
longer = gold;
}
for (size_t i = 0; i < shorter.size(); ++i)
{
const cv::KeyPoint& p1 = shorter[i];
const cv::KeyPoint& p2 = longer[i];
const cv::KeyPoint& p3 = longer[i+1];
if (keyPointsEquals(p1, p2) || keyPointsEquals(p1, p3))
++validCount;
}
}
return validCount;
......
......@@ -623,20 +623,27 @@ void TS::set_gtest_status()
void TS::update_context( BaseTest* test, int test_case_idx, bool update_ts_context )
{
CV_UNUSED(update_ts_context);
if( current_test_info.test != test )
{
for( int i = 0; i <= CONSOLE_IDX; i++ )
output_buf[i] = string();
rng = RNG(params.rng_seed);
current_test_info.rng_seed0 = current_test_info.rng_seed = rng.state;
}
if (test_case_idx >= 0)
{
current_test_info.rng_seed = param_seed + test_case_idx;
current_test_info.rng_seed0 = current_test_info.rng_seed;
rng = RNG(current_test_info.rng_seed);
cv::theRNG() = rng;
}
current_test_info.test = test;
current_test_info.test_case_idx = test_case_idx;
current_test_info.code = 0;
cvSetErrStatus( CV_StsOk );
if( update_ts_context )
current_test_info.rng_seed = rng.state;
}
......
......@@ -26,7 +26,6 @@ using namespace perf;
int64 TestBase::timeLimitDefault = 0;
unsigned int TestBase::iterationsLimitDefault = UINT_MAX;
int64 TestBase::_timeadjustment = 0;
// Item [0] will be considered the default implementation.
static std::vector<std::string> available_impls;
......@@ -1159,7 +1158,6 @@ void TestBase::Init(const std::vector<std::string> & availableImpls,
timeLimitDefault = param_time_limit == 0.0 ? 1 : (int64)(param_time_limit * cv::getTickFrequency());
iterationsLimitDefault = param_force_samples == 0 ? UINT_MAX : param_force_samples;
_timeadjustment = _calibrate();
}
void TestBase::RecordRunParameters()
......@@ -1193,66 +1191,6 @@ enum PERF_STRATEGY TestBase::getCurrentModulePerformanceStrategy()
return strategyForce == PERF_STRATEGY_DEFAULT ? strategyModule : strategyForce;
}
int64 TestBase::_calibrate()
{
CV_TRACE_FUNCTION();
if (iterationsLimitDefault <= 1)
return 0;
class _helper : public ::perf::TestBase
{
public:
_helper() { testStrategy = PERF_STRATEGY_BASE; }
performance_metrics& getMetrics() { return calcMetrics(); }
virtual void TestBody() {}
virtual void PerfTestBody()
{
//the whole system warmup
SetUp();
cv::Mat a(2048, 2048, CV_32S, cv::Scalar(1));
cv::Mat b(2048, 2048, CV_32S, cv::Scalar(2));
declare.time(30);
double s = 0;
declare.iterations(20);
minIters = nIters = 20;
for(; next() && startTimer(); stopTimer())
s+=a.dot(b);
declare.time(s);
//self calibration
SetUp();
declare.iterations(1000);
minIters = nIters = 1000;
for(int iters = 0; next() && startTimer(); iters++, stopTimer()) { /*std::cout << iters << nIters << std::endl;*/ }
}
};
// Initialize ThreadPool
class _dummyParallel : public ParallelLoopBody
{
public:
void operator()(const cv::Range& range) const
{
// nothing
CV_UNUSED(range);
}
};
parallel_for_(cv::Range(0, 1000), _dummyParallel());
_timeadjustment = 0;
_helper h;
h.PerfTestBody();
double compensation = h.getMetrics().min;
if (getCurrentModulePerformanceStrategy() == PERF_STRATEGY_SIMPLE)
{
CV_Assert(compensation < 0.01 * cv::getTickFrequency());
compensation = 0.0f; // simple strategy doesn't require any compensation
}
LOGD("Time compensation is %.0f", compensation);
return (int64)compensation;
}
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable:4355) // 'this' : used in base member initializer list
......@@ -1561,9 +1499,8 @@ void TestBase::stopTimer()
if (lastTime == 0)
ADD_FAILURE() << " stopTimer() is called before startTimer()/next()";
lastTime = time - lastTime;
CV_Assert(lastTime >= 0); // TODO: CV_Check* for int64
totalTime += lastTime;
lastTime -= _timeadjustment;
if (lastTime < 0) lastTime = 0;
times.push_back(lastTime);
lastTime = 0;
......
......@@ -83,7 +83,7 @@ struct WrapAff2D
bool CV_RigidTransform_Test::testNPoints(int from)
{
cv::RNG rng = ts->get_rng();
cv::RNG rng = cv::theRNG();
int progress = 0;
int k, ntests = 10000;
......@@ -181,4 +181,4 @@ void CV_RigidTransform_Test::run( int start_from )
TEST(Video_RigidFlow, accuracy) { CV_RigidTransform_Test test; test.safe_run(); }
}} // namespace
\ No newline at end of file
}} // namespace
......@@ -250,7 +250,7 @@ int main( int argc, char** argv )
{
int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
printf("%s\n", imageList[i*3+k].c_str());
view = imread(imageList[i*3+k], 1);
view = imread(imageList[i*3+k], IMREAD_COLOR);
if(!view.empty())
{
......@@ -338,7 +338,7 @@ int main( int argc, char** argv )
{
int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
int k2 = k == 0 ? 1 : k == 1 ? 0 : 2;
view = imread(imageList[i*3+k], 1);
view = imread(imageList[i*3+k], IMREAD_COLOR);
if(view.empty())
continue;
......
......@@ -496,7 +496,7 @@ int main( int argc, char** argv )
view0.copyTo(view);
}
else if( i < (int)imageList.size() )
view = imread(imageList[i], 1);
view = imread(imageList[i], IMREAD_COLOR);
if(view.empty())
{
......@@ -621,7 +621,7 @@ int main( int argc, char** argv )
for( i = 0; i < (int)imageList.size(); i++ )
{
view = imread(imageList[i], 1);
view = imread(imageList[i], IMREAD_COLOR);
if(view.empty())
continue;
remap(view, rview, map1, map2, INTER_LINEAR);
......
......@@ -145,7 +145,7 @@ int main( int argc, const char** argv )
len--;
buf[len] = '\0';
cout << "file " << buf << endl;
image = imread( buf, 1 );
image = imread( buf, IMREAD_COLOR );
if( !image.empty() )
{
detectAndDraw( image, cascade, nestedCascade, scale, tryflip );
......
......@@ -59,7 +59,7 @@ static void read_imgList(const string& filename, vector<Mat>& images) {
}
string line;
while (getline(file, line)) {
images.push_back(imread(line, 0));
images.push_back(imread(line, IMREAD_GRAYSCALE));
}
}
......
......@@ -80,7 +80,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, float squareSize, b
for( k = 0; k < 2; k++ )
{
const string& filename = imagelist[i*2+k];
Mat img = imread(filename, 0);
Mat img = imread(filename, IMREAD_GRAYSCALE);
if(img.empty())
break;
if( imageSize == Size() )
......@@ -298,7 +298,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, float squareSize, b
{
for( k = 0; k < 2; k++ )
{
Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
Mat img = imread(goodImageList[i*2+k], IMREAD_GRAYSCALE), rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
cvtColor(rimg, cimg, COLOR_GRAY2BGR);
Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
......
......@@ -8,7 +8,7 @@ using namespace std;
int main(int argc, char** argv)
{
Mat img, gray;
if( argc != 2 || !(img=imread(argv[1], 1)).data)
if( argc != 2 || !(img=imread(argv[1], IMREAD_COLOR)).data)
return -1;
cvtColor(img, gray, COLOR_BGR2GRAY);
// smooth it, otherwise a lot of false circles may be detected
......
......@@ -7,7 +7,7 @@ using namespace std;
int main(int argc, char** argv)
{
Mat src, dst, color_dst;
if( argc != 2 || !(src=imread(argv[1], 0)).data)
if( argc != 2 || !(src=imread(argv[1], IMREAD_GRAYSCALE)).data)
return -1;
Canny( src, dst, 50, 200, 3 );
......
......@@ -6,7 +6,7 @@ using namespace cv;
int main( int argc, char** argv )
{
Mat src, hsv;
if( argc != 2 || !(src=imread(argv[1], 1)).data )
if( argc != 2 || !(src=imread(argv[1], IMREAD_COLOR)).data )
return -1;
cvtColor(src, hsv, COLOR_BGR2HSV);
......
......@@ -9,7 +9,7 @@ int main( int argc, char** argv )
Mat src;
// the first command-line parameter must be a filename of the binary
// (black-n-white) image
if( argc != 2 || !(src=imread(argv[1], 0)).data)
if( argc != 2 || !(src=imread(argv[1], IMREAD_GRAYSCALE)).data)
return -1;
Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3);
......
......@@ -54,7 +54,7 @@ int main( int argc, char** argv )
return 0;
}
string filename = samples::findFile(parser.get<string>("@input"));
Mat img0 = imread(filename, 1), imgGray;
Mat img0 = imread(filename, IMREAD_COLOR), imgGray;
if( img0.empty() )
{
......
......@@ -57,7 +57,7 @@ def main():
def processImage(fn):
print('processing %s... ' % fn)
img = cv.imread(fn, 0)
img = cv.imread(fn, cv.IMREAD_GRAYSCALE)
if img is None:
print("Failed to load", fn)
return None
......
......@@ -69,7 +69,7 @@ class App():
if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm":
print(infile)
img = cv.imread(infile,1)
img = cv.imread(infile, cv.IMREAD_COLOR)
if img is None:
continue
self.sel = (0,0,0,0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册