提交 22c356da 编写于 作者: Eric.Lee2021's avatar Eric.Lee2021 🚴🏻

update frame

上级 f1cfd3e8
#-*-coding:utf-8-*-
'''
DpCas-Light
|||| ||||| |||| || |||||||
|| || || || || || |||| || ||
|| || || || || || || || ||
|| || || || || ||====|| ||||||
|| || ||||| || || ||======|| ||
|| || || || || || || || ||
|||| || |||| || || |||||||
/--------------------- Face Bioassay ---------------------/
'''
# date:2021-04-18
# Author: Eric.Lee
# function: Face Bioassay "基于人脸动作的活体检测"
import os
import cv2
import time
import numpy as np
import random
import time
import shutil
# 加载模型组件库
from face_detect.yolo_v3_face import yolo_v3_face_model
from face_multi_task.face_multi_task_component import FaceMuitiTask_Model
from face_euler_angle.face_euler_angle_component import FaceAngle_Model
# 加载工具库
import sys
sys.path.append("./lib/face_bioassay_lib/")
from cores.face_fuction import get_faces_batch_attribute
from utils.utils import parse_data_cfg
def main_face_bioassay(video_path,cfg_file):
config = parse_data_cfg(cfg_file)
print("\n/************** Face Bioassay *****************/")
print("/********************************************************/\n")
# pose_model = light_pose_model()
face_detect_model = yolo_v3_face_model(conf_thres=float(config["detect_conf_thres"]),nms_thres=float(config["detect_nms_thres"]),
model_arch = config["detect_model_arch"],model_path = config["detect_model_path"],yolo_anchor_scale = float(config["yolo_anchor_scale"]),
img_size = float(config["detect_input_size"]),
)
face_multitask_model = FaceMuitiTask_Model(model_path = config["face_multitask_model_path"], model_arch = config["face_multitask_model_arch"])
face_euler_model = FaceAngle_Model(model_path = config["face_euler_model_path"])
cap = cv2.VideoCapture(video_path)
frame_idx = 0
while cap.isOpened():
ret,img = cap.read()
if ret:
frame_idx += 1
video_time = cap.get(cv2.CAP_PROP_POS_MSEC)
faces_bboxes =face_detect_model.predict(img,vis = False) # 检测手,获取手的边界框
faces_message = get_faces_batch_attribute(
face_multitask_model,face_euler_model,faces_bboxes,img,use_cuda = True,vis = True)
if faces_message is not None:
print("faces_message : {} \n".format(faces_message))
cv2.namedWindow("DriverFatigueMonitor",0)
cv2.imshow("DriverFatigueMonitor",img)
if cv2.waitKey(1)==27:
break
else:
break
cv2.destroyAllWindows()
detect_model_path=./wyw2s_models/face_yolo_416-20210418.pt
detect_model_arch=yolo
detect_input_size = 416
yolo_anchor_scale=1.
detect_conf_thres=0.4
detect_nms_thres=0.45
#face_multitask_model_path=./wyw2s_models/face_multitask-resnet_50_imgsize-256-20210411.pth
#face_multitask_model_arch=resnet50
face_multitask_model_path=./wyw2s_models/face_multitask-resnet_34_imgsize-256-20210425.pth
face_multitask_model_arch=resnet34
face_euler_model_path=./wyw2s_models/euler_angle-resnet_18_imgsize_256.pth
#-*-coding:utf-8-*-
'''
DpCas-Light
|||| |||| |||| || ||||
|| || || || || || |||| || ||
|| || || || || || || || ||
|| || || || || ||====|| ||||
|| || |||| || || ||======|| ||
|| || || || || || || || ||
|||| || |||| || || ||||
/------------------ Who You Want 2 See ------------------/
'''
# date:2021-04-17
# Author: Eric.Lee
# function: pipline
import os
import numpy as np
import cv2
import torch
from PIL import Image
def compute_iou(rec1, rec2):
"""
computing IoU
:param rec1: (y0, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
#return (intersect / (sum_area - intersect))*1.0
return (intersect / (S_rec1 + 1e-6))*1.0
def draw_landmarks(img,output,face_w,face_h,x0,y0,vis = False):
img_width = img.shape[1]
img_height = img.shape[0]
dict_landmarks = {}
eyes_center = []
x_list = []
y_list = []
for i in range(int(output.shape[0]/2)):
x = output[i*2+0]*float(face_w) + x0
y = output[i*2+1]*float(face_h) + y0
x_list.append(x)
y_list.append(y)
if 41>= i >=33:
if 'left_eyebrow' not in dict_landmarks.keys():
dict_landmarks['left_eyebrow'] = []
dict_landmarks['left_eyebrow'].append([int(x),int(y),(0,255,0)])
if vis:
cv2.circle(img, (int(x),int(y)), 2, (0,255,0),-1)
elif 50>= i >=42:
if 'right_eyebrow' not in dict_landmarks.keys():
dict_landmarks['right_eyebrow'] = []
dict_landmarks['right_eyebrow'].append([int(x),int(y),(0,255,0)])
if vis:
cv2.circle(img, (int(x),int(y)), 2, (0,255,0),-1)
elif 67>= i >=60:
if 'left_eye' not in dict_landmarks.keys():
dict_landmarks['left_eye'] = []
dict_landmarks['left_eye'].append([int(x),int(y),(255,55,255)])
if vis:
cv2.circle(img, (int(x),int(y)), 2, (255,0,255),-1)
elif 75>= i >=68:
if 'right_eye' not in dict_landmarks.keys():
dict_landmarks['right_eye'] = []
dict_landmarks['right_eye'].append([int(x),int(y),(255,55,255)])
if vis:
cv2.circle(img, (int(x),int(y)), 2, (255,0,255),-1)
elif 97>= i >=96:
eyes_center.append((x,y))
if vis:
cv2.circle(img, (int(x),int(y)), 2, (0,0,255),-1)
elif 54>= i >=51:
if 'bridge_nose' not in dict_landmarks.keys():
dict_landmarks['bridge_nose'] = []
dict_landmarks['bridge_nose'].append([int(x),int(y),(0,170,255)])
if vis:
cv2.circle(img, (int(x),int(y)), 2, (0,170,255),-1)
elif 32>= i >=0:
if 'basin' not in dict_landmarks.keys():
dict_landmarks['basin'] = []
dict_landmarks['basin'].append([int(x),int(y),(255,30,30)])
if vis:
cv2.circle(img, (int(x),int(y)), 2, (255,30,30),-1)
elif 59>= i >=55:
if 'wing_nose' not in dict_landmarks.keys():
dict_landmarks['wing_nose'] = []
dict_landmarks['wing_nose'].append([int(x),int(y),(0,255,255)])
if vis:
cv2.circle(img, (int(x),int(y)), 2, (0,255,255),-1)
elif 87>= i >=76:
if 'out_lip' not in dict_landmarks.keys():
dict_landmarks['out_lip'] = []
dict_landmarks['out_lip'].append([int(x),int(y),(255,255,0)])
if vis:
cv2.circle(img, (int(x),int(y)), 2, (255,255,0),-1)
elif 95>= i >=88:
if 'in_lip' not in dict_landmarks.keys():
dict_landmarks['in_lip'] = []
dict_landmarks['in_lip'].append([int(x),int(y),(50,220,255)])
if vis:
cv2.circle(img, (int(x),int(y)), 2, (50,220,255),-1)
else:
if vis:
cv2.circle(img, (int(x),int(y)), 2, (255,0,255),-1)
face_area = (max(x_list) - min(x_list))*(max(y_list) - min(y_list))
return dict_landmarks,eyes_center,face_area
def draw_contour(image,dict,vis = False):
x0 = 0# 偏置
y0 = 0
for key in dict.keys():
# print(key)
_,_,color = dict[key][0]
if 'left_eye' == key:
eye_x = np.mean([dict[key][i][0]+x0 for i in range(len(dict[key]))])
eye_y = np.mean([dict[key][i][1]+y0 for i in range(len(dict[key]))])
if vis:
cv2.circle(image, (int(eye_x),int(eye_y)), 3, (255,255,55),-1)
if 'right_eye' == key:
eye_x = np.mean([dict[key][i][0]+x0 for i in range(len(dict[key]))])
eye_y = np.mean([dict[key][i][1]+y0 for i in range(len(dict[key]))])
if vis:
cv2.circle(image, (int(eye_x),int(eye_y)), 3, (255,215,25),-1)
if 'basin' == key or 'wing_nose' == key:
pts = np.array([[dict[key][i][0]+x0,dict[key][i][1]+y0] for i in range(len(dict[key]))],np.int32)
if vis:
cv2.polylines(image,[pts],False,color,thickness = 2)
else:
points_array = np.zeros((1,len(dict[key]),2),dtype = np.int32)
for i in range(len(dict[key])):
x,y,_ = dict[key][i]
points_array[0,i,0] = x+x0
points_array[0,i,1] = y+y0
# cv2.fillPoly(image, points_array, color)
if vis:
cv2.drawContours(image,points_array,-1,color,thickness=2)
def plot_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * max(img.shape[0:2])) + 1 # line thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 2) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [185, 195,190], thickness=tf, lineType=cv2.LINE_AA)
#-------------------------------------------------------------------------------
def face_alignment(imgn,eye_left_n,eye_right_n,\
desiredLeftEye=(0.34, 0.42),desiredFaceWidth=256, desiredFaceHeight=None):
if desiredFaceHeight is None:
desiredFaceHeight = desiredFaceWidth
leftEyeCenter = eye_left_n
rightEyeCenter = eye_right_n
# compute the angle between the eye centroids
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX))
# compute the desired right eye x-coordinate based on the
# desired x-coordinate of the left eye
desiredRightEyeX = 1.0 - desiredLeftEye[0]
# determine the scale of the new resulting image by taking
# the ratio of the distance between eyes in the *current*
# image to the ratio of distance between eyes in the
# *desired* image
dist = np.sqrt((dX ** 2) + (dY ** 2))
desiredDist = (desiredRightEyeX - desiredLeftEye[0])
desiredDist *= desiredFaceWidth
scale = desiredDist / dist
# compute center (x, y)-coordinates (i.e., the median point)
# between the two eyes in the input image
eyesCenter = ((leftEyeCenter[0] + rightEyeCenter[0]) / 2,(leftEyeCenter[1] + rightEyeCenter[1]) / 2)
# grab the rotation matrix for rotating and scaling the face
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
# update the translation component of the matrix
tX = desiredFaceWidth * 0.5
tY = desiredFaceHeight * desiredLeftEye[1]
M[0, 2] += (tX - eyesCenter[0])
M[1, 2] += (tY - eyesCenter[1])
M_reg = np.zeros((3,3),dtype = np.float32)
M_reg[0,:] = M[0,:]
M_reg[1,:] = M[1,:]
M_reg[2,:] = (0,0,1.)
# print(M_reg)
M_I = np.linalg.inv(M_reg)#矩阵求逆,从而获得,目标图到原图的关系
# print(M_I)
# apply the affine transformation
(w, h) = (desiredFaceWidth, desiredFaceHeight)
# cv_resize_model = [cv2.INTER_LINEAR,cv2.INTER_CUBIC,cv2.INTER_NEAREST,cv2.INTER_AREA]
output = cv2.warpAffine(imgn, M, (w, h),flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_CONSTANT)#
#---------------------------------------------------------------------------------------
# ptx1 = int(eye_left_gt_n[0]*M[0][0] + eye_left_gt_n[1]*M[0][1] + M[0][2])
# pty1 = int(eye_left_gt_n[0]*M[1][0] + eye_left_gt_n[1]*M[1][1] + M[1][2])
#
# ptx2 = int(eye_right_gt_n[0]*M[0][0] + eye_right_gt_n[1]*M[0][1] + M[0][2])
# pty2 = int(eye_right_gt_n[0]*M[1][0] + eye_right_gt_n[1]*M[1][1] + M[1][2])
return output
def refine_face_bbox(bbox,img_shape):
height,width,_ = img_shape
x1,y1,x2,y2 = bbox
expand_w = (x2-x1)
expand_h = (y2-y1)
x1 -= expand_w*0.12
y1 -= expand_h*0.12
x2 += expand_w*0.12
y2 += expand_h*0.08
x1,y1,x2,y2 = int(x1),int(y1),int(x2),int(y2)
x1 = np.clip(x1,0,width-1)
y1 = np.clip(y1,0,height-1)
x2 = np.clip(x2,0,width-1)
y2 = np.clip(y2,0,height-1)
return (x1,y1,x2,y2)
def get_faces_batch_attribute(face_multitask_model,face_euler_model,dets,img_raw,use_cuda,face_size = 256,vis = False):
if len(dets) == 0:
return None
img_align = img_raw.copy()
# 绘制图像
image_batch = None
r_bboxes = []
imgs_crop = []
for b in dets:
b = list(map(int, b))
r_bbox = refine_face_bbox((b[0],b[1],b[2],b[3]),img_raw.shape)
r_bboxes.append(r_bbox)
img_crop = img_raw[r_bbox[1]:r_bbox[3],r_bbox[0]:r_bbox[2]]
imgs_crop.append(img_crop)
img_ = cv2.resize(img_crop, (face_size,face_size), interpolation = cv2.INTER_LINEAR) # INTER_LINEAR INTER_CUBIC
img_ = img_.astype(np.float32)
img_ = (img_-128.)/256.
img_ = img_.transpose(2, 0, 1)
img_ = np.expand_dims(img_,0)
if image_batch is None:
image_batch = img_
else:
image_batch = np.concatenate((image_batch,img_),axis=0)
landmarks_pre,gender_pre,age_pre = face_multitask_model.predict(image_batch)
euler_angles = face_euler_model.predict(image_batch)
faces_message = None
for i in range(len(dets)):
x0,y0 = r_bboxes[i][0],r_bboxes[i][1]
face_w = r_bboxes[i][2]-r_bboxes[i][0]
face_h = r_bboxes[i][3]-r_bboxes[i][1]
dict_landmarks,eyes_center,face_area = draw_landmarks(img_raw,landmarks_pre[i],face_w,face_h,x0,y0,vis = False)
gray_ = cv2.cvtColor(img_align[r_bboxes[i][1]:r_bboxes[i][3],r_bboxes[i][0]:r_bboxes[i][2],:], cv2.COLOR_BGR2GRAY)
blur_ = cv2.Laplacian(gray_, cv2.CV_64F).var()
gender_max_index = np.argmax(gender_pre[i])#概率最大类别索引
score_gender = gender_pre[i][gender_max_index]# 最大概率
yaw,pitch,roll = euler_angles[i] # 欧拉角
if vis:
cv2.putText(img_raw, "yaw:{:.1f},pitch:{:.1f},roll:{:.1f}".format(yaw,pitch,roll),(int(r_bboxes[i][0]-20),int(r_bboxes[i][1]-30)),cv2.FONT_HERSHEY_DUPLEX, 0.65, (253,139,54), 5)
cv2.putText(img_raw, "yaw:{:.1f},pitch:{:.1f},roll:{:.1f}".format(yaw,pitch,roll),(int(r_bboxes[i][0]-20),int(r_bboxes[i][1]-30)),cv2.FONT_HERSHEY_DUPLEX, 0.65, (20,185,255), 1)
cv2.putText(img_raw, "{}".format(int(face_area)),(int(r_bboxes[i][0]-1),int(r_bboxes[i][3]-3)),cv2.FONT_HERSHEY_DUPLEX, 0.65, (253,39,54), 5) # face_area
cv2.putText(img_raw, "{}".format(int(face_area)),(int(r_bboxes[i][0]-1),int(r_bboxes[i][3]-3)),cv2.FONT_HERSHEY_DUPLEX, 0.65, (20,185,255), 1)
if gender_max_index == 1.:
gender_str = "male"
else:
gender_str = "female"
if abs(yaw)<45.:
face_align_output = face_alignment(img_align,eyes_center[0],eyes_center[1],
desiredLeftEye=(0.365, 0.38),desiredFaceWidth=112, desiredFaceHeight=None)
else:
face_align_output = face_alignment(img_align,eyes_center[0],eyes_center[1],
desiredLeftEye=(0.38, 0.40),desiredFaceWidth=112, desiredFaceHeight=None)
faces_message.append(
{
"xyxy":r_bboxes[i],
"age":age_pre[i][0],
"gender":gender_str,
"head_3d_angle":{"yaw":yaw,"pitch":pitch,"roll":roll}
}
)
# plot_box(r_bboxes[i][0:4], img_raw,label="{}, age: {:.1f}, unblur:{}".format(gender_str,age_pre[i][0],int(blur_)), color=(255,90,90), line_thickness=2)
if vis:
plot_box(r_bboxes[i][0:4], img_raw,label="{}, age: {:.1f}".format(gender_str,age_pre[i][0]), color=(255,90,90), line_thickness=2)
return faces_message
#-*-coding:utf-8-*-
'''
DpCas-Light
|||| |||| |||| || ||||
|| || || || || || |||| || ||
|| || || || || || || || ||
|| || || || || ||====|| ||||
|| || |||| || || ||======|| ||
|| || || || || || || || ||
|||| || |||| || || ||||
/------------------ Who You Want 2 See ------------------/
'''
# date:2020-12-12
# Author: Eric.Lee
# function: show clip video
import os
import cv2
import copy
import time
import threading
from threading import current_thread, Lock
import psutil
import numpy as np
import random
lock = Lock()
def run_one_process(path,process_id,vis):
lock.acquire()
video_ = cv2.VideoCapture(path)
lock.release()
while True:
ret, img_ = video_.read()
if ret:
#------------------------------------------------
if vis:
cv2.namedWindow('video_seg_{}'.format(process_id),0)
cv2.resizeWindow('video_seg_{}'.format(process_id), 300, 210);
cv2.moveWindow('video_seg_{}'.format(process_id), (process_id%6)*300+60,int(process_id/6)*230)
cv2.imshow('video_seg_{}'.format(process_id),img_)
if cv2.waitKey(300) == 27:
flag_break =True
break
else:
break
if vis:
cv2.waitKey(30000)
cv2.destroyWindow('video_seg_{}'.format(process_id))
def run_show(path,vis):
seg_num = len(os.listdir(path))
videos_path = os.listdir(path)
# #--------------------------------------
st_ = time.time()
process_list = []
for i in range(0,seg_num):
# print(video_list[i])
t = threading.Thread(target=run_one_process, args=(path + videos_path[i],i,vis))
process_list.append(t)
for i in range(0,seg_num):
process_list[i].start()
print(' start run ~ ')
for i in range(0,seg_num):
process_list[i].join()# 设置主线程等待子线程结束
del process_list
et_ = time.time()
if __name__ == "__main__":
path = './video/'
vis = True
run_show_hights(path,vis)
#--------------------------------------
import os
"""Parses the data configuration file"""
def parse_data_cfg(path):
print('data_cfg : ',path)
options = dict()
with open(path, 'r',encoding='UTF-8') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options
......@@ -16,37 +16,67 @@
# function: main
import os
import argparse
import warnings
warnings.filterwarnings("ignore")
import sys
sys.path.append("./components/") # 添加模型组件路径
from applications.handpose_local_app import main_handpose_x #加载 handpose 应用
from applications.wyw2s_local_app import main_wyw2s #加载 who you want 2 see 应用
# from applications.video_analysis_app import main_video_analysis #加载 video_analysis 应用
def demo_logo():
print("\n/*********************************/")
print("/---------------------------------/\n")
print(" WELCOME : DpCas-Light ")
print(" << APP_X >> ")
print(" Copyright 2021 Eric.Lee2021 ")
print(" << APP_X >> ")
print(" Copyright 2021 Eric.Lee2021 ")
print(" Apache License 2.0 ")
print("\n/---------------------------------/")
print("/*********************************/\n")
if __name__ == '__main__':
demo_logo()
parser = argparse.ArgumentParser(description= " DpCas : << Deep Learning Componentized Application System >> ")
parser.add_argument('-app', type=int, default = 0,
help = "handpose_x:0, wyw2s:1, face_bioassay:2, video_ana:3, face_pay:4, drive:5") # 设置 App Example
APP_P = "wyw2s"
app_dict = {
0:"handpose_x",
1:"wyw2s",
2:"face_bioassay",
3:"video_ana",
4:"face_pay",
5:"drive"}
args = parser.parse_args()# 解析添加参数
APP_P = app_dict[args.app]
if APP_P == "handpose_x": # 手势识别
from applications.handpose_local_app import main_handpose_x #加载 handpose 应用
cfg_file = "./lib/hand_lib/cfg/handpose.cfg"
main_handpose_x(cfg_file)#加载 handpose 应用
elif APP_P == "wyw2s": # 基于人脸识别的视频剪辑
from applications.wyw2s_local_app import main_wyw2s #加载 who you want 2 see 应用
cfg_file = "./lib/wyw2s_lib/cfg/wyw2s.cfg"
main_wyw2s(video_path = "./video/f1.mp4",cfg_file = cfg_file)#加载 who you want 2 see 应用
# elif APP_P == "video_ana": # 基于人脸识别的视频剪辑
# main_video_analysis(video_path = "./video/f3.mp4")#加载 who you want 2 see 应用
elif APP_P == "face_bioassay":
from applications.face_bioassay_local_app import main_face_bioassay #face_bioassay 应用
cfg_file = "./lib/face_bioassay_lib/cfg/face_bioassay.cfg"
main_face_bioassay(video_path = "./video/f1.mp4",cfg_file = cfg_file)#加载 face_bioassay 应用
# elif APP_P == "video_ana":
# from applications.VideoAnalysis_app import main_VideoAnalysis #加载 video_analysis 应用
# main_VideoAnalysis(video_path = "./video/f3.mp4")#加载 video_analysis 应用
#
# elif APP_P == "face_pay":
# cfg_file = "./lib/facepay_lib/cfg/facepay.cfg"
# from applications.FacePay_local_app import main_facePay #加载 face pay 应用
# main_facePay(video_path = 0,cfg_file = cfg_file) # 加载 face pay 应用
#
# elif APP_P == "drive":
# from applications.DangerousDriveWarning_local_app import main_DangerousDriveWarning #加载 危险驾驶预警 应用
# cfg_file = "./lib/dfmonitor_lib/cfg/dfm.cfg"
# main_DangerousDriveWarning(video_path = "./video/drive1.mp4",cfg_file = cfg_file)
print(" well done ~")
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册