image_processing.py 7.4 KB
Newer Older
HypoX64's avatar
preview  
HypoX64 已提交
1 2
import cv2
import numpy as np
H
hypox64 已提交
3
import random
HypoX64's avatar
preview  
HypoX64 已提交
4

5
import platform
H
hypox64 已提交
6

7 8 9 10
system_type = 'Linux'
if 'Windows' in platform.platform():
    system_type = 'Windows'

H
BVDNet  
hypox64 已提交
11
def imread(file_path,mod = 'normal',loadsize = 0, rgb=False):
H
hypox64 已提交
12
    '''
H
hypox64 已提交
13 14
    mod:  'normal' | 'gray' | 'all'
    loadsize: 0->original
H
hypox64 已提交
15
    '''
16 17
    if system_type == 'Linux':
        if mod == 'normal':
H
hypox64 已提交
18
            img = cv2.imread(file_path,1)
19 20 21 22 23
        elif mod == 'gray':
            img = cv2.imread(file_path,0)
        elif mod == 'all':
            img = cv2.imread(file_path,-1)
    
24
    #In windows, for chinese path, use cv2.imdecode insteaded.
25 26
    #It will loss EXIF, I can't fix it
    else: 
H
hypox64 已提交
27 28 29
        if mod == 'normal':
            img = cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),1)
        elif mod == 'gray':
30
            img = cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),0)
H
hypox64 已提交
31
        elif mod == 'all':
32
            img = cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),-1)
H
hypox64 已提交
33
            
H
hypox64 已提交
34 35
    if loadsize != 0:
        img = resize(img, loadsize, interpolation=cv2.INTER_CUBIC)
36

H
BVDNet  
hypox64 已提交
37 38 39
    if rgb and img.ndim==3:
        img = img[:,:,::-1]

40
    return img
H
hypox64 已提交
41

42 43 44 45 46 47 48 49 50
def imwrite(file_path,img):
    '''
    in other to save chinese path images in windows,
    this fun just for save final output images
    '''
    if system_type == 'Linux':
        cv2.imwrite(file_path, img)
    else:
        cv2.imencode('.jpg', img)[1].tofile(file_path)
51

H
hypox64 已提交
52
def resize(img,size,interpolation=cv2.INTER_LINEAR):
H
hypox64 已提交
53 54 55 56 57 58 59
    '''
    cv2.INTER_NEAREST      最邻近插值点法
    cv2.INTER_LINEAR        双线性插值法
    cv2.INTER_AREA         邻域像素再取样插补
    cv2.INTER_CUBIC        双立方插补,4*4大小的补点
    cv2.INTER_LANCZOS4     8x8像素邻域的Lanczos插值
    '''
HypoX64's avatar
preview  
HypoX64 已提交
60
    h, w = img.shape[:2]
H
hypox64 已提交
61
    if np.min((w,h)) ==size:
HypoX64's avatar
preview  
HypoX64 已提交
62 63
        return img
    if w >= h:
H
hypox64 已提交
64
        res = cv2.resize(img,(int(size*w/h), size),interpolation=interpolation)
HypoX64's avatar
preview  
HypoX64 已提交
65
    else:
H
hypox64 已提交
66
        res = cv2.resize(img,(size, int(size*h/w)),interpolation=interpolation)
HypoX64's avatar
preview  
HypoX64 已提交
67 68
    return res

H
hypox64 已提交
69 70 71 72 73
def resize_like(img,img_like):
    h, w = img_like.shape[:2]
    img = cv2.resize(img, (w,h))
    return img

HypoX64's avatar
HypoX64 已提交
74 75
def ch_one2three(img):
    res = cv2.merge([img, img, img])
HypoX64's avatar
preview  
HypoX64 已提交
76 77
    return res

H
HypoX64 已提交
78
def color_adjust(img,alpha=0,beta=0,b=0,g=0,r=0,ran = False):
H
hypox64 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
    '''
    g(x) = (1+α)g(x)+255*β, 
    g(x) = g(x[:+b*255,:+g*255,:+r*255])
    
    Args:
        img   : input image
        alpha : contrast
        beta  : brightness
        b     : blue hue
        g     : green hue
        r     : red hue
        ran   : if True, randomly generated color correction parameters
    Retuens:
        img   : output image
    '''
    img = img.astype('float')
    if ran:
H
hypox64 已提交
96 97 98 99 100
        alpha = random.uniform(-0.1,0.1)
        beta  = random.uniform(-0.1,0.1)
        b     = random.uniform(-0.05,0.05)
        g     = random.uniform(-0.05,0.05)
        r     = random.uniform(-0.05,0.05)
H
hypox64 已提交
101 102 103 104 105 106
    img = (1+alpha)*img+255.0*beta
    bgr = [b*255.0,g*255.0,r*255.0]
    for i in range(3): img[:,:,i]=img[:,:,i]+bgr[i]
    
    return (np.clip(img,0,255)).astype('uint8')

HypoX64's avatar
preview  
HypoX64 已提交
107 108 109 110 111 112 113 114
def makedataset(target_image,orgin_image):
    target_image = resize(target_image,256)
    orgin_image = resize(orgin_image,256)
    img = np.zeros((256,512,3), dtype = "uint8")
    w = orgin_image.shape[1]
    img[0:256,0:256] = target_image[0:256,int(w/2-256/2):int(w/2+256/2)]
    img[0:256,256:512] = orgin_image[0:256,int(w/2-256/2):int(w/2+256/2)]
    return img
H
hypox64 已提交
115
    
116
def find_mostlikely_ROI(mask):
H
hypox64 已提交
117 118 119 120 121 122 123 124 125 126
    contours,hierarchy=cv2.findContours(mask, cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours)>0:
        areas = []
        for contour in contours:
            areas.append(cv2.contourArea(contour))
        index = areas.index(max(areas))
        mask = np.zeros_like(mask)
        mask = cv2.fillPoly(mask,[contours[index]],(255))
    return mask

H
hypox64 已提交
127
def boundingSquare(mask,Ex_mul):
HypoX64's avatar
preview  
HypoX64 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
    # thresh = mask_threshold(mask,10,threshold)
    area = mask_area(mask)
    if area == 0 :
        return 0,0,0,0

    x,y,w,h = cv2.boundingRect(mask)
    
    center = np.array([int(x+w/2),int(y+h/2)])
    size = max(w,h)
    point0=np.array([x,y])
    point1=np.array([x+size,y+size])

    h, w = mask.shape[:2]
    if size*Ex_mul > min(h, w):
        size = min(h, w)
        halfsize = int(min(h, w)/2)
    else:
        size = Ex_mul*size
        halfsize = int(size/2)
        size = halfsize*2
    point0 = center - halfsize
    point1 = center + halfsize
    if point0[0]<0:
        point0[0]=0
        point1[0]=size
    if point0[1]<0:
        point0[1]=0
        point1[1]=size
    if point1[0]>w:
        point1[0]=w
        point0[0]=w-size
    if point1[1]>h:
        point1[1]=h
        point0[1]=h-size
    center = ((point0+point1)/2).astype('int')
    return center[0],center[1],halfsize,area

165
def mask_threshold(mask,ex_mun,threshold):
HypoX64's avatar
preview  
HypoX64 已提交
166
    mask = cv2.threshold(mask,threshold,255,cv2.THRESH_BINARY)[1]
167
    mask = cv2.blur(mask, (ex_mun, ex_mun))
H
hypox64 已提交
168
    mask = cv2.threshold(mask,threshold/5,255,cv2.THRESH_BINARY)[1]
HypoX64's avatar
preview  
HypoX64 已提交
169 170 171
    return mask

def mask_area(mask):
HypoX64's avatar
HypoX64 已提交
172 173 174
    mask = cv2.threshold(mask,127,255,0)[1]
    # contours= cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[1] #for opencv 3.4
    contours= cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[0]#updata to opencv 4.0
HypoX64's avatar
preview  
HypoX64 已提交
175 176 177 178
    try:
        area = cv2.contourArea(contours[0])
    except:
        area = 0
HypoX64's avatar
HypoX64 已提交
179
    return area
H
hypox64 已提交
180

H
hypox64 已提交
181
def replace_mosaic(img_origin,img_fake,mask,x,y,size,no_feather):
H
hypox64 已提交
182
    img_fake = cv2.resize(img_fake,(size*2,size*2),interpolation=cv2.INTER_LANCZOS4)
H
hypox64 已提交
183
    if no_feather:
H
hypox64 已提交
184 185 186
        img_origin[y-size:y+size,x-size:x+size]=img_fake
        img_result = img_origin
    else:
H
hypox64 已提交
187 188 189 190 191
        #color correction
        RGB_origin = img_origin[y-size:y+size,x-size:x+size].mean(0).mean(0)
        RGB_fake = img_fake.mean(0).mean(0)
        for i in range(3):img_fake[:,:,i] = np.clip(img_fake[:,:,i]+RGB_origin[i]-RGB_fake[i],0,255)      
        #eclosion
H
hypox64 已提交
192 193
        eclosion_num = int(size/5)
        entad = int(eclosion_num/2+2)
H
hypox64 已提交
194 195 196 197

        mask = cv2.resize(mask,(img_origin.shape[1],img_origin.shape[0]))
        mask = ch_one2three(mask)
        
H
hypox64 已提交
198
        mask = (cv2.blur(mask, (eclosion_num, eclosion_num)))
H
hypox64 已提交
199 200 201
        mask_tmp = np.zeros_like(mask)
        mask_tmp[y-size:y+size,x-size:x+size] = mask[y-size:y+size,x-size:x+size]# Fix edge overflow
        mask = mask_tmp/255.0
H
hypox64 已提交
202 203 204 205 206

        img_tmp = np.zeros(img_origin.shape)
        img_tmp[y-size:y+size,x-size:x+size]=img_fake
        img_result = img_origin.copy()
        img_result = (img_origin*(1-mask)+img_tmp*mask).astype('uint8')
H
hypox64 已提交
207

H
BVDNet  
hypox64 已提交
208 209
    return img_result

H
hypox64 已提交
210 211 212 213 214 215 216 217 218 219 220 221
def Q_lapulase(resImg):
    '''
    Evaluate image quality
    score > 20   normal
    score > 50   clear
    '''
    img2gray = cv2.cvtColor(resImg, cv2.COLOR_BGR2GRAY)
    img2gray = resize(img2gray,512)
    res = cv2.Laplacian(img2gray, cv2.CV_64F)
    score = res.var()
    return score

H
BVDNet  
hypox64 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
def psnr(img1,img2):
    mse = np.mean((img1/255.0-img2/255.0)**2)
    if mse < 1e-10:
        return 100
    psnr_v = 20*np.log10(1/np.sqrt(mse))
    return psnr_v

def splice(imgs,splice_shape):
    '''Stitching multiple images, all imgs must have the same size
    imgs : [img1,img2,img3,img4]
    splice_shape: (2,2)
    '''
    h,w,ch = imgs[0].shape
    output = np.zeros((h*splice_shape[0],w*splice_shape[1],ch),np.uint8)
    cnt = 0
    for i in range(splice_shape[0]):
        for j in range(splice_shape[1]):
            if cnt < len(imgs):
                output[h*i:h*(i+1),w*j:w*(j+1)] = imgs[cnt]
                cnt += 1
    return output