From bc20c001ecbd7778c16bb50955ccded2262f6770 Mon Sep 17 00:00:00 2001 From: "Eric.Lee2021" <305141918@qq.com> Date: Thu, 25 Mar 2021 01:56:31 +0800 Subject: [PATCH] create first dpcas demo --- README.md | 55 ++ applications/handpose_local_app.py | 371 ++++++++++ components/hand_detect/acc_model.py | 243 +++++++ components/hand_detect/utils/__init__.py | 0 components/hand_detect/utils/common_utils.py | 656 ++++++++++++++++++ components/hand_detect/utils/datasets.py | 395 +++++++++++ components/hand_detect/utils/torch_utils.py | 24 + components/hand_detect/utils/utils.py | 438 ++++++++++++ components/hand_detect/yolo_v3_hand.py | 312 +++++++++ components/hand_detect/yolov3.py | 505 ++++++++++++++ components/hand_keypoints/handpose_x.py | 136 ++++ .../hand_keypoints/models/mobilenetv2.py | 105 +++ components/hand_keypoints/models/my_model.py | 67 ++ components/hand_keypoints/models/resnet.py | 263 +++++++ components/hand_keypoints/models/resnet_50.py | 194 ++++++ components/hand_keypoints/models/rexnetv1.py | 183 +++++ .../hand_keypoints/models/shufflenet.py | 254 +++++++ .../hand_keypoints/models/shufflenetv2.py | 157 +++++ .../hand_keypoints/models/squeezenet.py | 153 ++++ .../hand_keypoints/utils/common_utils.py | 132 ++++ .../hand_keypoints/utils/model_utils.py | 61 ++ lib/hand_lib/cfg/handpose.cfg | 11 + lib/hand_lib/cores/hand_pnp.py | 164 +++++ lib/hand_lib/cores/handpose_fuction.py | 317 +++++++++ lib/hand_lib/cores/tracking_utils.py | 89 +++ lib/hand_lib/utils/utils.py | 15 + main.py | 41 ++ .../sentences/IdentifyingObjectsWait.mp3 | Bin 0 -> 17280 bytes .../audio/sentences/ObjectMayBeIdentified.mp3 | Bin 0 -> 13608 bytes 29 files changed, 5341 insertions(+) create mode 100644 README.md create mode 100644 applications/handpose_local_app.py create mode 100644 components/hand_detect/acc_model.py create mode 100644 components/hand_detect/utils/__init__.py create mode 100644 components/hand_detect/utils/common_utils.py create mode 100644 components/hand_detect/utils/datasets.py create mode 100644 components/hand_detect/utils/torch_utils.py create mode 100644 components/hand_detect/utils/utils.py create mode 100644 components/hand_detect/yolo_v3_hand.py create mode 100644 components/hand_detect/yolov3.py create mode 100644 components/hand_keypoints/handpose_x.py create mode 100644 components/hand_keypoints/models/mobilenetv2.py create mode 100644 components/hand_keypoints/models/my_model.py create mode 100644 components/hand_keypoints/models/resnet.py create mode 100644 components/hand_keypoints/models/resnet_50.py create mode 100644 components/hand_keypoints/models/rexnetv1.py create mode 100644 components/hand_keypoints/models/shufflenet.py create mode 100644 components/hand_keypoints/models/shufflenetv2.py create mode 100644 components/hand_keypoints/models/squeezenet.py create mode 100644 components/hand_keypoints/utils/common_utils.py create mode 100644 components/hand_keypoints/utils/model_utils.py create mode 100644 lib/hand_lib/cfg/handpose.cfg create mode 100644 lib/hand_lib/cores/hand_pnp.py create mode 100644 lib/hand_lib/cores/handpose_fuction.py create mode 100644 lib/hand_lib/cores/tracking_utils.py create mode 100644 lib/hand_lib/utils/utils.py create mode 100644 main.py create mode 100644 materials/audio/sentences/IdentifyingObjectsWait.mp3 create mode 100644 materials/audio/sentences/ObjectMayBeIdentified.mp3 diff --git a/README.md b/README.md new file mode 100644 index 0000000..4a3f794 --- /dev/null +++ b/README.md @@ -0,0 +1,55 @@ +# DpCas-Light +### dpcas(Deep Learning Componentized Application System):深度学习组件化应用系统,为了更好更快的将已有的模型进行快速集成,实现应用。 + +### 第一个完整pipelien 的 Demo,本地手势交互应用,之后会推出web架构的手势交互。 + +## 项目介绍 +### 项目1:手势交互项目(local 本地版本) +* 采用python多进程实现,100% python代码。 +* 1、实现单手点击,即大拇指和食指捏合时认为点击。 +* 2、实现双手配合点击选中目标区域。 +* 3、基于第2点的功能,支持识别架构的拓展(目前没有加任何物体识别模型,后面会加上)。 +* 4、实现基于IOU的手部跟踪。 +* 5、支持语音拓展功能。 + + +## 项目配置 +### 1、软件 +* Python 3.7 +* PyTorch >= 1.5.1 +* opencv-python +* playsound +### 2、硬件 +* 普通USB彩色(RGB)网络摄像头 + +## 相关项目 +### 1、手部检测项目(yolo_v3) +* 项目地址:https://codechina.csdn.net/EricLee/yolo_v3 +* [预训练模型下载地址(百度网盘 Password: 7mk0 )](https://pan.baidu.com/s/1hqzvz0MeFX0EdpWXUV6aFg) + +### 2、手21关键点回归项目(handpose_x) +* https://codechina.csdn.net/EricLee/handpose_x +* * [预训练模型下载地址(百度网盘 Password: 99f3 )](https://pan.baidu.com/s/1Ur6Ikp31XGEuA3hQjYzwIw) + +## 项目使用方法 +### 项目1:手势交互项目(local 本地版本) +### 1、下载手部检测模型和21关键点回归模型。 +### 2、确定摄像头连接成功。 +### 3、打开配置文件 lib/hand_lib/cfg/handpose.cfg 进行相关参数配置,具体配置参数如下,请仔细阅读(一般只需要配置模型路径及模型结构) +``` +detect_model_path=./components/hand_detect/weights/latest_416.pt #手部检测模型地址 +detect_model_arch=yolo_v3 #检测模型类型 ,yolo or yolo-tiny +detect_conf_thres=0.5 # 检测模型阈值 +detect_nms_thres=0.45 # 检测模型 nms 阈值 + +handpose_x_model_path=./components/hand_keypoints/weights/ReXNetV1-size-256-wingloss102-0.1063.pth # 21点手回归模型地址 +handpose_x_model_arch=rexnetv1 # 回归模型结构 + +camera_id = 0 # 相机 ID ,一般默认为0,如果不是请自行确认 +vis_gesture_lines = True # True: 点击时的轨迹可视化, Flase:点击时的轨迹不可视化 +charge_cycle_step = 32 # 点击稳定状态计数器,点击稳定充电环。 +``` +### 4、根目录下运行命令: python main.py + +## 联系方式 (Contact) +* E-mails: 305141918@qq.com diff --git a/applications/handpose_local_app.py b/applications/handpose_local_app.py new file mode 100644 index 0000000..25aea78 --- /dev/null +++ b/applications/handpose_local_app.py @@ -0,0 +1,371 @@ +#-*-coding:utf-8-*- +''' +DpCas-Light +|||| ||||| |||| || ||||||| +|| || || || || || |||| || || +|| || || || || || || || || +|| || || || || ||====|| |||||| +|| || ||||| || || ||======|| || +|| || || || || || || || || +|||| || |||| || || ||||||| + +/--------------------- HandPose_X ---------------------/ +''' +# date:2021-03-12 +# Author: Eric.Lee +# function: handpose demo + +import os +import cv2 +import time + +from multiprocessing import Process +from multiprocessing import Manager + +import cv2 +import numpy as np +import random +import time + +# 加载模型组件库 +from hand_detect.yolo_v3_hand import yolo_v3_hand_model +from hand_keypoints.handpose_x import handpose_x_model + +# 加载工具库 +import sys +sys.path.append("./lib/hand_lib/") +from cores.handpose_fuction import handpose_track_keypoints21_pipeline +from cores.handpose_fuction import hand_tracking,audio_recognize,judge_click_stabel,draw_click_lines +from utils.utils import parse_data_cfg +from playsound import playsound + +def audio_process_dw_edge_cnt(info_dict): + + while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载 + time.sleep(2) + + gesture_names = ["click"] + gesture_dict = {} + + for k_ in gesture_names: + gesture_dict[k_] = None + # time.sleep(1) + # playsound("./materials/audio/sentences/WelcomeAR.mp3") + # time.sleep(0.01) + # playsound("./materials/audio/sentences/MorningEric.mp3") + # time.sleep(1) + reg_cnt = 0 + while True: + time.sleep(0.01) + try: + reg_cnt = info_dict["click_dw_cnt"] + for i in range(reg_cnt): + # playsound("./materials/audio/cue/winwin-1.mp3") + playsound("./materials/audio/sentences/welldone.mp3") + info_dict["click_dw_cnt"] = info_dict["click_dw_cnt"] - reg_cnt + except Exception as inst: + print(type(inst),inst) # exception instance + + + if info_dict["break"] == True: + break + +def audio_process_up_edge_cnt(info_dict): + + while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载 + time.sleep(2) + + gesture_names = ["click"] + gesture_dict = {} + + for k_ in gesture_names: + gesture_dict[k_] = None + + reg_cnt = 0 + while True: + time.sleep(0.01) + # print(" --->>> audio_process") + try: + reg_cnt = info_dict["click_up_cnt"] + for i in range(reg_cnt): + # playsound("./materials/audio/cue/m2-0.mp3") + playsound("./materials/audio/sentences/Click.mp3") + info_dict["click_up_cnt"] = info_dict["click_up_cnt"] - reg_cnt + except Exception as inst: + print(type(inst),inst) # the exception instance + + + if info_dict["break"] == True: + break + +def audio_process_dw_edge(info_dict): + + while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载 + time.sleep(2) + + gesture_names = ["click"] + gesture_dict = {} + + for k_ in gesture_names: + gesture_dict[k_] = None + while True: + time.sleep(0.01) + # print(" --->>> audio_process") + try: + for g_ in gesture_names: + if gesture_dict[g_] is None: + gesture_dict[g_] = info_dict[g_] + else: + + if ("click"==g_): + if (info_dict[g_]^gesture_dict[g_]) and info_dict[g_]==False:# 判断Click手势信号为下降沿,Click动作结束 + playsound("./materials/audio/cue/winwin.mp3") + # playsound("./materials/audio/sentences/welldone.mp3") + + gesture_dict[g_] = info_dict[g_] + + except Exception as inst: + print(type(inst),inst) # the exception instance + + + if info_dict["break"] == True: + break + +def audio_process_up_edge(info_dict): + + while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载 + time.sleep(2) + + gesture_names = ["click"] + gesture_dict = {} + + for k_ in gesture_names: + gesture_dict[k_] = None + while True: + time.sleep(0.01) + # print(" --->>> audio_process") + try: + for g_ in gesture_names: + if gesture_dict[g_] is None: + gesture_dict[g_] = info_dict[g_] + else: + + if ("click"==g_): + if (info_dict[g_]^gesture_dict[g_]) and info_dict[g_]==True:# 判断Click手势信号为上升沿,Click动作开始 + playsound("./materials/audio/cue/m2.mp3") + # playsound("./materials/audio/sentences/clik_quick.mp3") + + gesture_dict[g_] = info_dict[g_] + + except Exception as inst: + print(type(inst),inst) # the exception instance + + + if info_dict["break"] == True: + break +''' + 启动识别语音进程 +''' +def audio_process_recognize_up_edge(info_dict): + + while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载 + time.sleep(2) + + gesture_names = ["double_en_pts"] + gesture_dict = {} + + for k_ in gesture_names: + gesture_dict[k_] = None + + while True: + time.sleep(0.01) + # print(" --->>> audio_process") + try: + for g_ in gesture_names: + if gesture_dict[g_] is None: + gesture_dict[g_] = info_dict[g_] + else: + + if ("double_en_pts"==g_): + if (info_dict[g_]^gesture_dict[g_]) and info_dict[g_]==True:# 判断Click手势信号为上升沿,Click动作开始 + playsound("./materials/audio/sentences/IdentifyingObjectsWait.mp3") + playsound("./materials/audio/sentences/ObjectMayBeIdentified.mp3") + + gesture_dict[g_] = info_dict[g_] + + except Exception as inst: + print(type(inst),inst) # exception instance + + + if info_dict["break"] == True: + break +''' +/*****************************************/ + 算法 pipeline +/*****************************************/ +''' +def handpose_x_process(info_dict,config): + # 模型初始化 + print("load model component ...") + # yolo v3 手部检测模型初始化 + hand_detect_model = yolo_v3_hand_model(conf_thres=float(config["detect_conf_thres"]),nms_thres=float(config["detect_nms_thres"]), + model_arch = config["detect_model_arch"],model_path = config["detect_model_path"]) + # handpose_x 21 关键点回归模型初始化 + handpose_model = handpose_x_model(model_arch = config["handpose_x_model_arch"],model_path = config["handpose_x_model_path"]) + # + gesture_model = None # 目前缺省 + # + object_recognize_model = None # 识别分类模型,目前缺省 + + # + img_reco_crop = None + + cap = cv2.VideoCapture(int(config["camera_id"])) # 开启摄像机 + + cap.set(cv2.CAP_PROP_EXPOSURE, -8) # 设置相机曝光,(注意:不是所有相机有效) + + # url="http://admin:admin@192.168.43.1:8081" + # cap=cv2.VideoCapture(url) + print("start handpose process ~") + + info_dict["handpose_procss_ready"] = True #多进程间的开始同步信号 + + gesture_lines_dict = {} # 点击使能时的轨迹点 + + hands_dict = {} # 手的信息 + hands_click_dict = {} #手的按键信息计数 + track_index = 0 # 跟踪的全局索引 + + while True: + ret, img = cap.read()# 读取相机图像 + if ret:# 读取相机图像成功 + # img = cv2.flip(img,-1) + algo_img = img.copy() + st_ = time.time() + #------ + hand_bbox =hand_detect_model.predict(img,vis = True) # 检测手,获取手的边界框 + + hands_dict,track_index = hand_tracking(data = hand_bbox,hands_dict = hands_dict,track_index = track_index) # 手跟踪,目前通过IOU方式进行目标跟踪 + # 检测每个手的关键点及相关信息 + handpose_list = handpose_track_keypoints21_pipeline(img,hands_dict = hands_dict,hands_click_dict = hands_click_dict,track_index = track_index,algo_img = algo_img, + handpose_model = handpose_model,gesture_model = gesture_model, + icon = None,vis = True) + et_ = time.time() + fps_ = 1./(et_-st_+1e-8) + #------------------------------------------ 跟踪手的 信息维护 + #------------------ 获取跟踪到的手ID + id_list = [] + for i in range(len(handpose_list)): + _,_,_,dict_ = handpose_list[i] + id_list.append(dict_["id"]) + # print(id_list) + #----------------- 获取需要删除的手ID + id_del_list = [] + for k_ in gesture_lines_dict.keys(): + if k_ not in id_list:#去除过往已经跟踪失败的目标手的相关轨迹 + id_del_list.append(k_) + #----------------- 删除无法跟踪到的手的相关信息 + for k_ in id_del_list: + del gesture_lines_dict[k_] + del hands_click_dict[k_] + + #----------------- 更新检测到手的轨迹信息,及手点击使能时的上升沿和下降沿信号 + double_en_pts = [] + for i in range(len(handpose_list)): + _,_,_,dict_ = handpose_list[i] + id_ = dict_["id"] + if dict_["click"]: + if id_ not in gesture_lines_dict.keys(): + gesture_lines_dict[id_] = {} + gesture_lines_dict[id_]["pts"]=[] + gesture_lines_dict[id_]["line_color"] = (random.randint(100,255),random.randint(100,255),random.randint(100,255)) + gesture_lines_dict[id_]["click"] = None + #判断是否上升沿 + if gesture_lines_dict[id_]["click"] is not None: + if gesture_lines_dict[id_]["click"] == False:#上升沿计数器 + info_dict["click_up_cnt"] += 1 + #获得点击状态 + gesture_lines_dict[id_]["click"] = True + #---获得坐标 + gesture_lines_dict[id_]["pts"].append(dict_["choose_pt"]) + double_en_pts.append(dict_["choose_pt"]) + else: + if id_ not in gesture_lines_dict.keys(): + gesture_lines_dict[id_] = {} + gesture_lines_dict[id_]["pts"]=[] + gesture_lines_dict[id_]["line_color"] = (random.randint(100,255),random.randint(100,255),random.randint(100,255)) + gesture_lines_dict[id_]["click"] = None + elif id_ in gesture_lines_dict.keys(): + + gesture_lines_dict[id_]["pts"]=[]# 清除轨迹 + #判断是否上升沿 + if gesture_lines_dict[id_]["click"] == True:#下降沿计数器 + info_dict["click_dw_cnt"] += 1 + # 更新点击状态 + gesture_lines_dict[id_]["click"] = False + + #绘制手click 状态时的大拇指和食指中心坐标点轨迹 + draw_click_lines(img,gesture_lines_dict,vis = bool(config["vis_gesture_lines"])) + # 判断各手的click状态是否稳定,且满足设定阈值 + flag_click_stable = judge_click_stabel(img,handpose_list,int(config["charge_cycle_step"])) + # 判断是否启动识别语音,且进行选中目标识别 + img_reco_crop = audio_recognize(img,algo_img,img_reco_crop,object_recognize_model,info_dict,double_en_pts,flag_click_stable) + + cv2.putText(img, 'HandNum:[{}]'.format(len(hand_bbox)), (5,25),cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 0, 0),5) + cv2.putText(img, 'HandNum:[{}]'.format(len(hand_bbox)), (5,25),cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255)) + + cv2.namedWindow("image",0) + cv2.imshow("image",img) + if cv2.waitKey(1) == 27: + info_dict["break"] = True + break + else: + break + + cap.release() + cv2.destroyAllWindows() + +def main_handpose_x(cfg_file): + config = parse_data_cfg(cfg_file) + + print("\n/---------------------- main_handpose_x config ------------------------/\n") + for k_ in config.keys(): + print("{} : {}".format(k_,config[k_])) + print("\n/------------------------------------------------------------------------/\n") + + print(" loading handpose_x local demo ...") + g_info_dict = Manager().dict()# 多进程共享字典初始化:用于多进程间的 key:value 操作 + g_info_dict["handpose_procss_ready"] = False # 进程间的开启同步信号 + g_info_dict["break"] = False # 进程间的退出同步信号 + g_info_dict["double_en_pts"] = False # 双手选中动作使能信号 + + g_info_dict["click_up_cnt"] = 0 + g_info_dict["click_dw_cnt"] = 0 + + print(" multiprocessing dict key:\n") + for key_ in g_info_dict.keys(): + print( " -> ",key_) + print() + + #-------------------------------------------------- 初始化各进程 + process_list = [] + t = Process(target=handpose_x_process,args=(g_info_dict,config,)) + process_list.append(t) + + t = Process(target=audio_process_recognize_up_edge,args=(g_info_dict,)) # 上升沿播放 + process_list.append(t) + + # t = Process(target=audio_process_dw_edge_cnt,args=(g_info_dict,)) # 下降沿播放 + # process_list.append(t) + # t = Process(target=audio_process_up_edge_cnt,args=(g_info_dict,)) # 上升沿播放 + # process_list.append(t) + + + + for i in range(len(process_list)): + process_list[i].start() + + for i in range(len(process_list)): + process_list[i].join()# 设置主线程等待子线程结束 + + del process_list diff --git a/components/hand_detect/acc_model.py b/components/hand_detect/acc_model.py new file mode 100644 index 0000000..f2a8658 --- /dev/null +++ b/components/hand_detect/acc_model.py @@ -0,0 +1,243 @@ +import torch +import torch.nn as nn +import torchvision +import time +import numpy as np +import sys + +def get_model_op(model_,print_flag = False): + # print('/********************* modules *******************/') + op_dict = {} + idx = 0 + for m in model_.modules(): + idx += 1 + if isinstance(m, nn.Conv2d): + if 'Conv2d' not in op_dict.keys(): + op_dict['Conv2d'] = 1 + else: + op_dict['Conv2d'] += 1 + if print_flag: + print('{}) {}'.format(idx,m)) + pass + elif isinstance(m, nn.BatchNorm2d): + if 'BatchNorm2d' not in op_dict.keys(): + op_dict['BatchNorm2d'] = 1 + else: + op_dict['BatchNorm2d'] += 1 + if print_flag: + print('{}) {}'.format(idx,m)) + pass + elif isinstance(m, nn.Linear): + if 'Linear' not in op_dict.keys(): + op_dict['Linear'] = 1 + else: + op_dict['Linear'] += 1 + if print_flag: + print('{}) {}'.format(idx,m)) + pass + elif isinstance(m, nn.Sequential): + if print_flag: + print('*******************{}) {}'.format(idx,m)) + for n in m: + if print_flag: + print('{}) {}'.format(idx,n)) + if 'Conv2d' not in op_dict.keys(): + op_dict['Conv2d'] = 1 + else: + op_dict['Conv2d'] += 1 + if 'BatchNorm2d' not in op_dict.keys(): + op_dict['BatchNorm2d'] = 1 + else: + op_dict['BatchNorm2d'] += 1 + if 'Linear' not in op_dict.keys(): + op_dict['Linear'] = 1 + else: + op_dict['Linear'] += 1 + if 'ReLU6' not in op_dict.keys(): + op_dict['ReLU6'] = 1 + else: + op_dict['ReLU6'] += 1 + pass + elif isinstance(m, nn.ReLU6): + if print_flag: + print('{}) {}'.format(idx,m)) + if 'ReLU6' not in op_dict.keys(): + op_dict['ReLU6'] = 1 + else: + op_dict['ReLU6'] += 1 + pass + elif isinstance(m, nn.Module): + if print_flag: + print('{}) {}'.format(idx,m)) + for n in m.modules(): + if isinstance(n, nn.Conv2d): + if print_flag: + print('{}) {}'.format(idx,n)) + if 'Conv2d' not in op_dict.keys(): + op_dict['Conv2d'] = 1 + else: + op_dict['Conv2d'] += 1 + if 'BatchNorm2d' not in op_dict.keys(): + op_dict['BatchNorm2d'] = 1 + else: + op_dict['BatchNorm2d'] += 1 + if 'Linear' not in op_dict.keys(): + op_dict['Linear'] = 1 + else: + op_dict['Linear'] += 1 + if 'ReLU6' not in op_dict.keys(): + op_dict['ReLU6'] = 1 + else: + op_dict['ReLU6'] += 1 + pass + pass + + else: + if print_flag: + print('{}) {}'.format(idx,m)) + pass + + # print('\n/********************** {} ********************/\n'.format(ops.network)) + for key in op_dict.keys(): + if print_flag: + print(' operation - {} : {}'.format(key,op_dict[key])) + +class DummyModule(nn.Module): + def __init__(self): + super(DummyModule, self).__init__() + + def forward(self, x): + return x + +def fuse(conv, bn): + # https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + with torch.no_grad(): + # init + if isinstance(conv, nn.Conv2d): + fusedconv = torch.nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + bias=True) + elif isinstance(conv, nn.ConvTranspose2d): # not supprot nn.ConvTranspose2d + fusedconv = nn.ConvTranspose2d( + conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + output_padding=conv.output_padding, + bias=True) + else: + print("error") + exit() + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) + + # prepare spatial bias + if conv.bias is not None: + b_conv = conv.bias + #b_conv = conv.bias.mul(bn.weight.div(torch.sqrt(bn.running_var + bn.eps))) # maybe, you should this one ? + else: + b_conv = torch.zeros(conv.weight.size(0)) + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(b_conv + b_bn) + + return fusedconv + +# idxx = 0 +def fuse_module(m): + # global idxx + children = list(m.named_children()) + c = None + cn = None + + for name, child in children: + # idxx += 1 + # print('-------------->>',idxx) + # if idxx%10==0: + # continue + # print("name {}, child {}".format(name, child)) + if isinstance(child, nn.BatchNorm2d) and c is not None: + bc = fuse(c, child) + m._modules[cn] = bc + # print('DummyModule() : ',DummyModule()) + m._modules[name] = DummyModule() + c = None + elif isinstance(child, nn.Conv2d): + c = child + cn = name + else: + fuse_module(child) + +def test_net(ops,m): + + use_cuda = torch.cuda.is_available() + use_cpu = False + if ops.force_cpu or use_cuda == False: + p = torch.randn([1, 3, 256, 256]) + device = torch.device("cpu") + use_cpu = True + else: + p = torch.randn([1, 3, 256, 256]).cuda() + device = torch.device("cuda:0") + + count = 50 + time_org = [] + m_o = m.to(device) + get_model_op(m_o) + # print(m) + for i in range(count): + s1 = time.time() + if use_cpu: + o_output = m_o(p) + else: + o_output = m_o(p).cpu() + s2 = time.time() + time_org.append(s2 - s1) + print("Original time: ", s2 - s1) + print('------------------------------------>>>>') + + fuse_module(m.to(torch.device("cpu"))) + + # print(m) + + m_f = m.to(device) + get_model_op(m_f) + + time_fuse = [] + for i in range(count): + s1 = time.time() + if use_cpu: + f_output = m_f(p) + else: + f_output = m_f(p).cpu() + s2 = time.time() + time_fuse.append(s2 - s1) + print("Fused time: ", s2 - s1) + + print("-" * 50) + print("org time:", np.mean(time_org)) + print("fuse time:", np.mean(time_fuse)) + for o in o_output: + print("org size:", o.size()) + for o in f_output: + print("fuse size:", o.size()) + for i in range(len(o_output)): + assert o_output[i].size()==f_output[i].size() + print("output[{}] max abs diff: {}".format(i, (o_output[i] - f_output[i]).abs().max().item())) + print("output[{}] MSE diff: {}".format(i, nn.MSELoss()(o_output[i], f_output[i]).item())) + + +def acc_model(ops,m): + # print('\n-------------------------------->>> before acc model') + get_model_op(m) + fuse_module(m) + # print('\n-------------------------------->>> after acc model') + get_model_op(m) + + return m diff --git a/components/hand_detect/utils/__init__.py b/components/hand_detect/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/components/hand_detect/utils/common_utils.py b/components/hand_detect/utils/common_utils.py new file mode 100644 index 0000000..6e8f105 --- /dev/null +++ b/components/hand_detect/utils/common_utils.py @@ -0,0 +1,656 @@ +#-*-coding:utf-8-*- +# date:2020-04-11 +# Author: Eric.Lee + +import os +import shutil +import cv2 +import numpy as np +import json +import torch +from dp_models.faceboxes.config import cfg +from dp_models.faceboxes.layers.functions.prior_box import PriorBox +from dp_models.faceboxes.utils.box_utils import decode +from dp_models.faceboxes.headpose.pose import * +import torch.nn.functional as F + +def mkdir_(path, flag_rm=False): + if os.path.exists(path): + if flag_rm == True: + shutil.rmtree(path) + os.mkdir(path) + print('remove {} done ~ '.format(path)) + else: + os.mkdir(path) + +def plot_box(bbox, img, color=None, label=None, line_thickness=None): + tl = line_thickness or round(0.002 * max(img.shape[0:2])) + 1 + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])) + cv2.rectangle(img, c1, c2, color, thickness=tl)# 目标的bbox + if label: + tf = max(tl - 2, 1) + t_size = cv2.getTextSize(label, 0, fontScale=tl / 4, thickness=tf)[0] # label size + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 # 字体的bbox + cv2.rectangle(img, c1, c2, color, -1) # label 矩形填充 + # 文本绘制 + cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 4, [225, 255, 255],thickness=tf, lineType=cv2.LINE_AA) + +class JSON_Encoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + else: + return super(JSON_Encoder, self).default(obj) + +def draw_landmarks(img,output,r_bboxes,draw_circle): + img_width = img.shape[1] + img_height = img.shape[0] + dict_landmarks = {} + global_dict_landmarks = {} # 全局坐标系坐标 + faceswap_list = [] + + face_pts = [] + + for i in range(int(output.shape[0]/2)): + x = output[i*2+0]*float(img_width) + y = output[i*2+1]*float(img_height) + + face_pts .append([x+r_bboxes[0],y+r_bboxes[1]]) + + if i ==33 or i == 46 or i == 96 or i == 97 or i == 54 or i == 76 or i == 82: + faceswap_list.append((x+r_bboxes[0],y+r_bboxes[1])) + # cv2.circle(img, (int(x),int(y)), 8, (0,255,255),-1) + # + if 41>= i >=33: + if 'left_eyebrow' not in dict_landmarks.keys(): + dict_landmarks['left_eyebrow'] = [] + global_dict_landmarks['left_eyebrow'] = [] + dict_landmarks['left_eyebrow'].append([int(x),int(y),(0,255,0)]) + global_dict_landmarks['left_eyebrow'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + + + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (0,255,0),-1) + elif 50>= i >=42: + if 'right_eyebrow' not in dict_landmarks.keys(): + dict_landmarks['right_eyebrow'] = [] + global_dict_landmarks['right_eyebrow'] = [] + dict_landmarks['right_eyebrow'].append([int(x),int(y),(0,255,0)]) + global_dict_landmarks['right_eyebrow'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (0,255,0),-1) + elif 67>= i >=60: + if 'left_eye' not in dict_landmarks.keys(): + dict_landmarks['left_eye'] = [] + global_dict_landmarks['left_eye'] = [] + dict_landmarks['left_eye'].append([int(x),int(y),(255,55,255)]) + global_dict_landmarks['left_eye'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (255,0,255),-1) + elif 75>= i >=68: + if 'right_eye' not in dict_landmarks.keys(): + dict_landmarks['right_eye'] = [] + global_dict_landmarks['right_eye'] = [] + dict_landmarks['right_eye'].append([int(x),int(y),(255,55,255)]) + global_dict_landmarks['right_eye'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (255,0,255),-1) + elif 97>= i >=96: + if 'eye_center' not in dict_landmarks.keys(): + global_dict_landmarks['eye_center'] = [] + global_dict_landmarks['eye_center'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + + cv2.circle(img, (int(x),int(y)), 2, (0,0,255),-1) + elif 54>= i >=51: + if 'bridge_nose' not in dict_landmarks.keys(): + dict_landmarks['bridge_nose'] = [] + global_dict_landmarks['bridge_nose'] = [] + dict_landmarks['bridge_nose'].append([int(x),int(y),(0,170,255)]) + global_dict_landmarks['bridge_nose'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (0,170,255),-1) + elif 32>= i >=0: + if 'basin' not in dict_landmarks.keys(): + dict_landmarks['basin'] = [] + global_dict_landmarks['basin'] = [] + dict_landmarks['basin'].append([int(x),int(y),(255,30,30)]) + global_dict_landmarks['basin'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (255,30,30),-1) + elif 59>= i >=55: + if 'wing_nose' not in dict_landmarks.keys(): + dict_landmarks['wing_nose'] = [] + global_dict_landmarks['wing_nose'] = [] + dict_landmarks['wing_nose'].append([int(x),int(y),(0,255,255)]) + global_dict_landmarks['wing_nose'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (0,255,255),-1) + elif 87>= i >=76: + if 'out_lip' not in dict_landmarks.keys(): + dict_landmarks['out_lip'] = [] + global_dict_landmarks['out_lip'] = [] + dict_landmarks['out_lip'].append([int(x),int(y),(255,255,0)]) + global_dict_landmarks['out_lip'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (255,255,0),-1) + elif 95>= i >=88: + if 'in_lip' not in dict_landmarks.keys(): + dict_landmarks['in_lip'] = [] + global_dict_landmarks['in_lip'] = [] + dict_landmarks['in_lip'].append([int(x),int(y),(50,220,255)]) + global_dict_landmarks['in_lip'].append([int(x+r_bboxes[0]),int(y+r_bboxes[1])]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (50,220,255),-1) + # else: + # if draw_circle: + # cv2.circle(img, (int(x),int(y)), 2, (255,0,255),-1) + + faceswap_list_e = [] + + for i in range(5): + faceswap_list_e.append(faceswap_list[i][0]) + for i in range(5): + faceswap_list_e.append(faceswap_list[i][1]) + + + return dict_landmarks,faceswap_list_e,global_dict_landmarks,face_pts + +def draw_contour(image,dict,r_bbox,face_pts): + x0 = r_bbox[0]# 全图偏置 + y0 = r_bbox[1] + + #------------------------------------------ + face_ola_pts = [] + face_ola_pts.append(face_pts[33]) + face_ola_pts.append(face_pts[38]) + face_ola_pts.append(face_pts[50]) + face_ola_pts.append(face_pts[46]) + + face_ola_pts.append(face_pts[60]) + face_ola_pts.append(face_pts[64]) + face_ola_pts.append(face_pts[68]) + face_ola_pts.append(face_pts[72]) + + face_ola_pts.append(face_pts[51]) + face_ola_pts.append(face_pts[55]) + face_ola_pts.append(face_pts[59]) + + face_ola_pts.append(face_pts[53]) + face_ola_pts.append(face_pts[57]) + + pts_num = len(face_ola_pts) + reprojectdst, euler_angle = get_head_pose(np.array(face_ola_pts).reshape((pts_num,2)),image,vis = False) + pitch, yaw, roll = euler_angle + + for key in dict.keys(): + # print(key) + _,_,color = dict[key][0] + + if 'left_eye' == key: + eye_x = np.mean([dict[key][i][0]+x0 for i in range(len(dict[key]))]) + eye_y = np.mean([dict[key][i][1]+y0 for i in range(len(dict[key]))]) + cv2.circle(image, (int(eye_x),int(eye_y)), 3, (255,255,55),-1) + if 'right_eye' == key: + eye_x = np.mean([dict[key][i][0]+x0 for i in range(len(dict[key]))]) + eye_y = np.mean([dict[key][i][1]+y0 for i in range(len(dict[key]))]) + cv2.circle(image, (int(eye_x),int(eye_y)), 3, (255,215,25),-1) + + if 'basin' == key or 'wing_nose' == key: + pts = np.array([[dict[key][i][0]+x0,dict[key][i][1]+y0] for i in range(len(dict[key]))],np.int32) + # print(pts) + cv2.polylines(image,[pts],False,color,thickness = 2) + + else: + points_array = np.zeros((1,len(dict[key]),2),dtype = np.int32) + for i in range(len(dict[key])): + x,y,_ = dict[key][i] + points_array[0,i,0] = x+x0 + points_array[0,i,1] = y+y0 + + # cv2.fillPoly(image, points_array, color) + cv2.drawContours(image,points_array,-1,color,thickness=2) + return (pitch, yaw, roll) + +import random +rgbs = [] +for j in range(100): + rgb = (random.randint(0,255),random.randint(0,255),random.randint(0,255)) + rgbs.append(rgb) + +def draw_global_contour(image,dict): + + + x0,y0 = 0,0 + idx = 0 + for key in dict.keys(): + idx += 1 + # print(key) + # _,_ = dict[key][0] + + if 'left_eye' == key: + eye_x = np.mean([dict[key][i][0]+x0 for i in range(len(dict[key]))]) + eye_y = np.mean([dict[key][i][1]+y0 for i in range(len(dict[key]))]) + cv2.circle(image, (int(eye_x),int(eye_y)), 3, (255,255,55),-1) + if 'right_eye' == key: + eye_x = np.mean([dict[key][i][0]+x0 for i in range(len(dict[key]))]) + eye_y = np.mean([dict[key][i][1]+y0 for i in range(len(dict[key]))]) + cv2.circle(image, (int(eye_x),int(eye_y)), 3, (255,215,25),-1) + + if 'basin' == key or 'wing_nose' == key: + pts = np.array([[dict[key][i][0]+x0,dict[key][i][1]+y0] for i in range(len(dict[key]))],np.int32) + # print(pts) + cv2.polylines(image,[pts],False,rgbs[idx],thickness = 2) + + else: + points_array = np.zeros((1,len(dict[key]),2),dtype = np.int32) + for i in range(len(dict[key])): + x,y = dict[key][i] + points_array[0,i,0] = x+x0 + points_array[0,i,1] = y+y0 + + # cv2.fillPoly(image, points_array, color) + cv2.drawContours(image,points_array,-1,rgbs[idx],thickness=2) + +def refine_face_bbox(bbox,img_shape): + height,width,_ = img_shape + + x1,y1,x2,y2 = bbox + + expand_w = (x2-x1) + expand_h = (y2-y1) + + x1 -= expand_w*0.06 + y1 += expand_h*0.15 + x2 += expand_w*0.06 + y2 += expand_h*0.03 + + x1,y1,x2,y2 = int(x1),int(y1),int(x2),int(y2) + + x1 = int(max(0,x1)) + y1 = int(max(0,y1)) + x2 = int(min(x2,width-1)) + y2 = int(min(y2,height-1)) + + return (x1,y1,x2,y2) +def py_cpu_nms(dets, thresh): + """Pure Python NMS baseline.""" + x1 = dets[:, 0] + y1 = dets[:, 1] + x2 = dets[:, 2] + y2 = dets[:, 3] + scores = dets[:, 4] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + + return keep + +def check_keys(model, pretrained_state_dict): + ckpt_keys = set(pretrained_state_dict.keys()) + model_keys = set(model.state_dict().keys()) + used_pretrained_keys = model_keys & ckpt_keys + unused_pretrained_keys = ckpt_keys - model_keys + missing_keys = model_keys - ckpt_keys + # print('Missing keys:{}'.format(len(missing_keys))) + # print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys))) + # print('Used keys:{}'.format(len(used_pretrained_keys))) + assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint' + return True + +def remove_prefix(state_dict, prefix): + ''' Old style model is stored with all names of parameters sharing common prefix 'module.' ''' + # print('remove prefix \'{}\''.format(prefix)) + f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x + return {f(key): value for key, value in state_dict.items()} + + +def load_model(model, pretrained_path, load_to_cpu): + # print('Loading pretrained model from {}'.format(pretrained_path)) + if load_to_cpu: + pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage) + else: + device = torch.cuda.current_device() + pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device)) + if "state_dict" in pretrained_dict.keys(): + pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.') + else: + pretrained_dict = remove_prefix(pretrained_dict, 'module.') + check_keys(model, pretrained_dict) + model.load_state_dict(pretrained_dict, strict=False) + return model + + +def detect_faces(ops,detect_model,img_raw,device): + resize = 1 + img = np.float32(img_raw) + if resize != 1: + img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR) + im_height, im_width, _ = img.shape + scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]) + img -= (104, 117, 123) + img = img.transpose(2, 0, 1) + img = torch.from_numpy(img).unsqueeze(0) + img = img.to(device) + scale = scale.to(device) + + + loc, conf = detect_model(img) # forward pass + + priorbox = PriorBox(cfg, image_size=(im_height, im_width)) + priors = priorbox.forward() + priors = priors.to(device) + prior_data = priors.data + boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance']) + boxes = boxes * scale / resize + boxes = boxes.cpu().numpy() + scores = conf.squeeze(0).data.cpu().numpy()[:, 1] + + # ignore low scores + inds = np.where(scores > ops.confidence_threshold)[0] + boxes = boxes[inds] + scores = scores[inds] + + # keep top-K before NMS + order = scores.argsort()[::-1][:ops.top_k] + boxes = boxes[order] + scores = scores[order] + + # do NMS + dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False) + #keep = py_cpu_nms(dets, ops.nms_threshold) + # keep = nms(dets, ops.nms_threshold,force_cpu=True) + keep = py_cpu_nms(dets, ops.nms_threshold) + dets = dets[keep, :] + + # keep top-K faster NMS + dets = dets[:ops.keep_top_k, :] + + return dets + + + + +def get_faces_batch_landmarks(ops,landmarks_model,express_model,dets,img_raw,use_cuda,draw_bbox = True): + # 绘制图像 + image_batch = None + r_bboxes = [] + imgs_crop = [] + for b in dets: + + text = "{:.4f}".format(b[4]) + b = list(map(int, b)) + + r_bbox = refine_face_bbox((b[0],b[1],b[2],b[3]),img_raw.shape) + r_bboxes.append(r_bbox) + img_crop = img_raw[r_bbox[1]:r_bbox[3],r_bbox[0]:r_bbox[2]] + imgs_crop.append(img_crop) + img_ = cv2.resize(img_crop, (256,256), interpolation = cv2.INTER_LINEAR) # INTER_LINEAR INTER_CUBIC + + img_ = img_.astype(np.float32) + img_ = (img_-128.)/256. + + img_ = img_.transpose(2, 0, 1) + img_ = np.expand_dims(img_,0) + + if image_batch is None: + image_batch = img_ + else: + image_batch = np.concatenate((image_batch,img_),axis=0) + for b in dets: + + text = "{:.4f}".format(b[4]) + b = list(map(int, b)) + if draw_bbox: + cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2) + cx = b[0] + cy = b[1] - 3 + if draw_bbox: + cv2.putText(img_raw, text, (cx, cy),cv2.FONT_HERSHEY_DUPLEX, 0.6, (155, 155, 255),3) + cv2.putText(img_raw, text, (cx, cy),cv2.FONT_HERSHEY_DUPLEX, 0.6, (155, 10, 10),1) + + # 填充最大 关键点 批次数据 + # if len(dets) < 5: + # im_mask = np.zeros([1,3,ops.landmarks_img_size[0],ops.landmarks_img_size[1]], dtype = np.float32) + # for i in range(ops.max_batch_size-len(dets)): + # if image_batch is None: + # image_batch = im_mask + # else: + # image_batch = np.concatenate((image_batch,im_mask),axis=0) + + image_batch = torch.from_numpy(image_batch).float() + + if use_cuda: + image_batch = image_batch.cuda() # (bs, 3, h, w) + #----------------- express + pre_e = express_model(image_batch.float()) + + outputs_e = F.softmax(pre_e,dim = 1) + + # print("outputs_e size : ",outputs_e.size()) + + outputs_e = outputs_e.cpu().detach().numpy() + outputs_e = np.array(outputs_e) + # + max_index_e = np.argmax(outputs_e,axis = 1) + # print("max_index_e shape :",max_index_e.shape) + # print("max_index_e:",max_index_e) + # print("outputs_e .shape:",outputs_e.shape) + express_dict = { + 0:"001.anger", + 1:"002.disgust", + 2:"003.fear", + 3:"004.happy", + 4:"005.normal", + 5:"006.sad", + 6:"007.surprised", + } + express_list = [] + for kk in range(max_index_e.shape[0]): + max_index_ = max_index_e[kk] + score_ = outputs_e[kk][max_index_] + express_list.append((max_index_,express_dict[max_index_],score_)) + # print("max_index : {}, score : {:.3f}, express : {}".format(max_index_,score_,express_dict[max_index_])) + # score_e = outputs_e[max_index_e] + # print("score_e : ",score_e) + #----------------- landmarks + pre_ = landmarks_model(image_batch.float()) + + # print(pre_.size()) + output = pre_.cpu().detach().numpy() + # print('output shape : ',output.shape) + # n_array = np.zeros([ops.landmarks_img_size[0],ops.landmarks_img_size[1],3], dtype = np.float) + faceswap_landmarks = [] + output_dict_ = [] + for i in range(len(dets)): + + dict_landmarks,list_e,global_dict_landmarks,face_pts = draw_landmarks(imgs_crop[i],output[i],r_bboxes[i],draw_circle = False) + faceswap_landmarks.append(list_e) + pitch, yaw, roll = draw_contour(img_raw,dict_landmarks,r_bboxes[i],face_pts) + + output_dict_.append({ + "xyxy":(r_bboxes[i][0],r_bboxes[i][1],r_bboxes[i][2],r_bboxes[i][3]), + "score":str(dets[i][4]), + "landmarks":global_dict_landmarks, + "euler_angle":(int(pitch[0]), int(yaw[0]), int(roll[0])), + "express":(float(express_list[i][0]),float(express_list[i][2])), + }) + + + # print('dets :',dets) + #----------------------------------------------------------------------------------- + for i in range(len(dets)): + bbox = dets[i] + min_x = int(bbox[0]) + min_y = int(bbox[1]) + max_x = int(bbox[2]) + max_y = int(bbox[3]) + cv2.rectangle(img_raw, (min_x, min_y), (max_x, max_y), (255, 0, 255), thickness=4) + for k in range(5): + x = int(faceswap_landmarks[i][k+0]) + y = int(faceswap_landmarks[i][k+5]) + # cv2.circle(img_raw,(x,y),5+k*2,(0,0,255),-1) + if draw_bbox: + cv2.circle(img_raw,(x,y),2,(0,0,255),-1) + if draw_bbox: + + cv2.putText(img_raw, "express:{},{:.2f}".format(express_list[i][1],express_list[i][2]), (min_x, min_y-20),cv2.FONT_HERSHEY_DUPLEX, 0.6, (155, 155, 255),3) + cv2.putText(img_raw, "express:{},{:.2f}".format(express_list[i][1],express_list[i][2]), (min_x, min_y-20),cv2.FONT_HERSHEY_DUPLEX, 0.6, (155, 10, 10),1) + if draw_bbox: + cv2.putText(img_raw, 'face:'+str(len(dets)), (3,35),cv2.FONT_HERSHEY_DUPLEX, 1.45, (55, 255, 255),5) + cv2.putText(img_raw, 'face:'+str(len(dets)), (3,35),cv2.FONT_HERSHEY_DUPLEX, 1.45, (135, 135, 5),2) + + return output_dict_ +def get_faces_batch_landmarks_plfd(ops,landmarks_model,express_model,dets,img_raw,use_cuda,draw_bbox = True): + # 绘制图像 + image_batch = None + r_bboxes = [] + imgs_crop = [] + for b in dets: + + text = "{:.4f}".format(b[4]) + b = list(map(int, b)) + + r_bbox = refine_face_bbox((b[0],b[1],b[2],b[3]),img_raw.shape) + r_bboxes.append(r_bbox) + img_crop = img_raw[r_bbox[1]:r_bbox[3],r_bbox[0]:r_bbox[2]] + imgs_crop.append(img_crop) + img_ = cv2.resize(img_crop, (112,112), interpolation = cv2.INTER_LINEAR) # INTER_LINEAR INTER_CUBIC + + img_ = img_.astype(np.float32) + img_ = img_/256. + + img_ = img_.transpose(2, 0, 1) + img_ = np.expand_dims(img_,0) + + if image_batch is None: + image_batch = img_ + else: + image_batch = np.concatenate((image_batch,img_),axis=0) + for b in dets: + + text = "{:.4f}".format(b[4]) + b = list(map(int, b)) + if draw_bbox: + cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2) + cx = b[0] + cy = b[1] - 3 + if draw_bbox: + cv2.putText(img_raw, text, (cx, cy),cv2.FONT_HERSHEY_DUPLEX, 0.6, (155, 155, 255),3) + cv2.putText(img_raw, text, (cx, cy),cv2.FONT_HERSHEY_DUPLEX, 0.6, (155, 10, 10),1) + + # 填充最大 关键点 批次数据 + # if len(dets) < 5: + # im_mask = np.zeros([1,3,ops.landmarks_img_size[0],ops.landmarks_img_size[1]], dtype = np.float32) + # for i in range(ops.max_batch_size-len(dets)): + # if image_batch is None: + # image_batch = im_mask + # else: + # image_batch = np.concatenate((image_batch,im_mask),axis=0) + + image_batch = torch.from_numpy(image_batch).float() + + if use_cuda: + image_batch = image_batch.cuda() # (bs, 3, h, w) + + #----------------- express + pre_e = express_model(image_batch.float()) + + outputs_e = F.softmax(pre_e,dim = 1) + + # print("outputs_e size : ",outputs_e.size()) + + outputs_e = outputs_e.cpu().detach().numpy() + outputs_e = np.array(outputs_e) + # + max_index_e = np.argmax(outputs_e,axis = 1) + # print("max_index_e shape :",max_index_e.shape) + # print("max_index_e:",max_index_e) + # print("outputs_e .shape:",outputs_e.shape) + express_dict = { + 0:"001.anger", + 1:"002.disgust", + 2:"003.fear", + 3:"004.happy", + 4:"005.normal", + 5:"006.sad", + 6:"007.surprised", + } + express_list = [] + for kk in range(max_index_e.shape[0]): + max_index_ = max_index_e[kk] + score_ = outputs_e[kk][max_index_] + express_list.append((max_index_,express_dict[max_index_],score_)) + # print("max_index : {}, score : {:.3f}, express : {}".format(max_index_,score_,express_dict[max_index_])) + # score_e = outputs_e[max_index_e] + # print("score_e : ",score_e) + #----------------------------------------- + _,pre_ = landmarks_model(image_batch.float()) + # print("pre_ : ",pre_) + # print(pre_.size()) + output = pre_.cpu().detach().numpy() + # print('output shape : ',output.shape) + # n_array = np.zeros([ops.landmarks_img_size[0],ops.landmarks_img_size[1],3], dtype = np.float) + faceswap_landmarks = [] + output_dict_ = [] + for i in range(len(dets)): + + dict_landmarks,list_e,global_dict_landmarks,face_pts = draw_landmarks(imgs_crop[i],output[i],r_bboxes[i],draw_circle = False) + faceswap_landmarks.append(list_e) + pitch, yaw, roll = draw_contour(img_raw,dict_landmarks,r_bboxes[i],face_pts) + + output_dict_.append({ + "xyxy":(r_bboxes[i][0],r_bboxes[i][1],r_bboxes[i][2],r_bboxes[i][3]), + "score":str(dets[i][4]), + "landmarks":global_dict_landmarks, + "euler_angle":(int(pitch[0]), int(yaw[0]), int(roll[0])), + "express":(float(express_list[i][0]),float(express_list[i][2])), + }) + + + # print('dets :',dets) + #----------------------------------------------------------------------------------- + for i in range(len(dets)): + bbox = dets[i] + min_x = int(bbox[0]) + min_y = int(bbox[1]) + max_x = int(bbox[2]) + max_y = int(bbox[3]) + cv2.rectangle(img_raw, (min_x, min_y), (max_x, max_y), (255, 0, 255), thickness=2) + for k in range(5): + x = int(faceswap_landmarks[i][k+0]) + y = int(faceswap_landmarks[i][k+5]) + # cv2.circle(img_raw,(x,y),5+k*2,(0,0,255),-1) + if draw_bbox: + cv2.circle(img_raw,(x,y),2,(0,0,255),-1) + if draw_bbox: + + cv2.putText(img_raw, "express:{},{:.2f}".format(express_list[i][1],express_list[i][2]), (min_x, min_y-20),cv2.FONT_HERSHEY_DUPLEX, 0.6, (155, 155, 255),3) + cv2.putText(img_raw, "express:{},{:.2f}".format(express_list[i][1],express_list[i][2]), (min_x, min_y-20),cv2.FONT_HERSHEY_DUPLEX, 0.6, (155, 10, 10),1) + + if draw_bbox: + cv2.putText(img_raw, 'face:'+str(len(dets)), (3,35),cv2.FONT_HERSHEY_DUPLEX, 1.45, (55, 255, 255),5) + cv2.putText(img_raw, 'face:'+str(len(dets)), (3,35),cv2.FONT_HERSHEY_DUPLEX, 1.45, (135, 135, 5),2) + + return output_dict_ diff --git a/components/hand_detect/utils/datasets.py b/components/hand_detect/utils/datasets.py new file mode 100644 index 0000000..02ebd47 --- /dev/null +++ b/components/hand_detect/utils/datasets.py @@ -0,0 +1,395 @@ +import glob +import math +import os +import random +import shutil +from pathlib import Path +from PIL import Image +from tqdm import tqdm +import cv2 +import numpy as np +import torch +from torch.utils.data import Dataset +from torch.utils.data import DataLoader + +def xyxy2xywh(x): + # Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h] + y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 + y[:, 2] = x[:, 2] - x[:, 0] + y[:, 3] = x[:, 3] - x[:, 1] + return y + + +def xywh2xyxy(x): + # Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2] + y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 + y[:, 1] = x[:, 1] - x[:, 3] / 2 + y[:, 2] = x[:, 0] + x[:, 2] / 2 + y[:, 3] = x[:, 1] + x[:, 3] / 2 + return y + + +class LoadImages: # for inference + def __init__(self, path, img_size=416): + self.height = img_size + img_formats = ['.jpg', '.jpeg', '.png', '.tif'] + vid_formats = ['.mov', '.avi', '.mp4'] + + files = [] + if os.path.isdir(path): + files = sorted(glob.glob('%s/*.*' % path)) + elif os.path.isfile(path): + files = [path] + + images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] + videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] + nI, nV = len(images), len(videos) + + self.files = images + videos + self.nF = nI + nV # number of files + self.video_flag = [False] * nI + [True] * nV + self.mode = 'images' + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nF > 0, 'No images or videos found in ' + path + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nF: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + if not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nF: # last video + raise StopIteration + else: + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='') + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, 'File Not Found ' + path + print('image %g/%g %s: ' % (self.count, self.nF, path), end='') + + # Padded resize + img, _, _, _ = letterbox(img0, height=self.height) + + # Normalize RGB + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB + img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + + # cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image + return path, img, img0, self.cap + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nF # number of files + + +class LoadWebcam: # for inference + def __init__(self, img_size=416): + self.cam = cv2.VideoCapture(0) + self.height = img_size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == 27: # esc to quit + cv2.destroyAllWindows() + raise StopIteration + + # Read image + ret_val, img0 = self.cam.read() + assert ret_val, 'Webcam Error' + img_path = 'webcam_%g.jpg' % self.count + img0 = cv2.flip(img0, 1) # flip left-right + + # Padded resize + img, _, _, _ = letterbox(img0, height=self.height) + + # Normalize RGB + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB + img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + + return img_path, img, img0, self.cam + + def __len__(self): + return 0 + + +class LoadImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, batch_size, img_size=416, augment=True, multi_scale=False): + print('LoadImagesAndLabels init : ',path) + with open(path, 'r') as file: + img_files = file.read().splitlines() + img_files = list(filter(lambda x: len(x) > 0, img_files)) + np.random.shuffle(img_files) # shuffle img_list + print("shuffle image...") + self.img_files = img_files + assert len(self.img_files) > 0, 'No images found in %s' % path + self.img_size = img_size + self.batch_size = batch_size + self.multi_scale = multi_scale + self.augment = augment + self.scale_index = 0 + if self.multi_scale: + self.img_size = img_size # initiate with maximum multi_scale size, in case of out of memory + print("Multi scale images training, init img_size", self.img_size) + else: + print("Fixed scale images, img_size", self.img_size) + self.label_files = [ + x.replace('images', 'labels').replace("JPEGImages", 'labels').replace('.bmp', '.txt').replace('.jpg', '.txt').replace('.png', '.txt') + for x in self.img_files] + + # print('self.img_files : ',self.img_files[1]) + # print('self.label_files : ',self.label_files[1]) + + def __len__(self): + return len(self.img_files) + + def __getitem__(self, index): + + # if self.multi_scale and (index % self.batch_size == 0) and index != 0: + if self.multi_scale and (self.scale_index % self.batch_size == 0)and self.scale_index != 0: + self.img_size = random.choice(range(11, 18)) * 32 + # print("++++++ change img_size, index:", self.img_size, index) + if self.multi_scale: + self.scale_index += 1 + if self.scale_index >= (100*self.batch_size): + self.scale_index = 0 + + + img_path = self.img_files[index] + label_path = self.label_files[index] + + img = cv2.imread(img_path) # BGR + assert img is not None, 'File Not Found ' + img_path + + augment_hsv = random.random() < 0.5 # hsv_aug prob = 0.5 + if self.augment and augment_hsv: + # SV augmentation by 50% + fraction = 0.50 # must be < 1.0 + img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + S = img_hsv[:, :, 1].astype(np.float32) + V = img_hsv[:, :, 2].astype(np.float32) + + a = (random.random() * 2 - 1) * fraction + 1 # a in [-0,5, 1.5] + S *= a + if a > 1: + np.clip(S, None, 255, out=S) + + a = (random.random() * 2 - 1) * fraction + 1 + V *= a + if a > 1: + np.clip(V, None, 255, out=V) + + img_hsv[:, :, 1] = S # .astype(np.uint8) + img_hsv[:, :, 2] = V # .astype(np.uint8) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) + + h, w, _ = img.shape + img, ratio, padw, padh = letterbox(img, height=self.img_size, augment=self.augment) + + # Load labels + labels = [] + if os.path.isfile(label_path): + with open(label_path, 'r') as file: + lines = file.read().splitlines() + + x = np.array([x.split() for x in lines], dtype=np.float32) + if x.size > 0: + # Normalized xywh to pixel xyxy format + labels = x.copy() + labels[:, 1] = ratio * w * (x[:, 1] - x[:, 3] / 2) + padw + labels[:, 2] = ratio * h * (x[:, 2] - x[:, 4] / 2) + padh + labels[:, 3] = ratio * w * (x[:, 1] + x[:, 3] / 2) + padw + labels[:, 4] = ratio * h * (x[:, 2] + x[:, 4] / 2) + padh + + # Augment image and labels + if self.augment: + img, labels = random_affine(img, labels, degrees=(-10, 10), translate=(0.10, 0.10), scale=(0.9, 1.1)) + + nL = len(labels) # number of labels + if nL: + # convert xyxy to xywh + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) / self.img_size # 转化 格式 ,且 归一化 + + if self.augment: + # random left-right flip + lr_flip = True + if lr_flip and random.random() > 0.5: + img = np.fliplr(img) + if nL: + labels[:, 1] = 1 - labels[:, 1] + + # random up-down flip + ud_flip = False + if ud_flip and random.random() > 0.5: + img = np.flipud(img) + if nL: + labels[:, 2] = 1 - labels[:, 2] + + labels_out = torch.zeros((nL, 6))# 加了 一个 batch size + if nL: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Normalize + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + + return torch.from_numpy(img), labels_out, img_path, (h, w) + + @staticmethod + def collate_fn(batch): + img, label, path, hw = list(zip(*batch)) # transposed + for i, l in enumerate(label): + l[:, 0] = i # 获取 物体的 归属于 图片 的 index + return torch.stack(img, 0), torch.cat(label, 0), path, hw + + +def letterbox(img, height=416, augment=False, color=(127.5, 127.5, 127.5)): + # Resize a rectangular image to a padded square + shape = img.shape[:2] # shape = [height, width] + ratio = float(height) / max(shape) # ratio = old / new + new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) + dw = (height - new_shape[0]) / 2 # width padding + dh = (height - new_shape[1]) / 2 # height padding + top, bottom = round(dh - 0.1), round(dh + 0.1) + left, right = round(dw - 0.1), round(dw + 0.1) + # resize img + if augment: + interpolation = np.random.choice([None, cv2.INTER_NEAREST, cv2.INTER_LINEAR, + None, cv2.INTER_NEAREST, cv2.INTER_LINEAR, + cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4]) + if interpolation is None: + img = cv2.resize(img, new_shape) + else: + img = cv2.resize(img, new_shape, interpolation=interpolation) + else: + img = cv2.resize(img, new_shape, interpolation=cv2.INTER_NEAREST) + # print("resize time:",time.time()-s1) + + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square + return img, ratio, dw, dh + + +def random_affine(img, targets=(), degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2), + borderValue=(127.5, 127.5, 127.5)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4 + + if targets is None: + targets = [] + border = 0 # width of added border (optional) + height = max(img.shape[0], img.shape[1]) + border * 2 + + # Rotation and Scale + R = np.eye(3) + a = random.random() * (degrees[1] - degrees[0]) + degrees[0] + # a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations + s = random.random() * (scale[1] - scale[0]) + scale[0] + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s) + + # Translation + T = np.eye(3) + T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels) + T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg) + + M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!! + imw = cv2.warpPerspective(img, M, dsize=(height, height), flags=cv2.INTER_LINEAR, + borderValue=borderValue) # BGR order borderValue + + # Return warped points also + if len(targets) > 0: + n = targets.shape[0] + points = targets[:, 1:5].copy() + area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1]) + + # warp points + xy = np.ones((n * 4, 3)) + xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = (xy @ M.T)[:, :2].reshape(n, 8) + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # apply angle-based reduction of bounding boxes + radians = a * math.pi / 180 + reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 + x = (xy[:, 2] + xy[:, 0]) / 2 + y = (xy[:, 3] + xy[:, 1]) / 2 + w = (xy[:, 2] - xy[:, 0]) * reduction + h = (xy[:, 3] - xy[:, 1]) * reduction + xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T + + # reject warped points outside of image + np.clip(xy, 0, height, out=xy) + w = xy[:, 2] - xy[:, 0] + h = xy[:, 3] - xy[:, 1] + area = w * h + ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) + i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10) + + targets = targets[i] + targets[:, 1:5] = xy[i] + + return imw, targets + + +def convert_images2bmp(): + # cv2.imread() jpg at 230 img/s, *.bmp at 400 img/s + for path in ['../coco/images/val2014/', '../coco/images/train2014/']: + folder = os.sep + Path(path).name + output = path.replace(folder, folder + 'bmp') + if os.path.exists(output): + shutil.rmtree(output) # delete output folder + os.makedirs(output) # make new output folder + + for f in tqdm(glob.glob('%s*.jpg' % path)): + save_name = f.replace('.jpg', '.bmp').replace(folder, folder + 'bmp') + cv2.imwrite(save_name, cv2.imread(f)) + + for label_path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']: + with open(label_path, 'r') as file: + lines = file.read() + lines = lines.replace('2014/', '2014bmp/').replace('.jpg', '.bmp').replace( + '/Users/glennjocher/PycharmProjects/', '../') + with open(label_path.replace('5k', '5k_bmp'), 'w') as file: + file.write(lines) diff --git a/components/hand_detect/utils/torch_utils.py b/components/hand_detect/utils/torch_utils.py new file mode 100644 index 0000000..2c7352e --- /dev/null +++ b/components/hand_detect/utils/torch_utils.py @@ -0,0 +1,24 @@ +import torch + +def init_seeds(seed=0): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + +def select_device(force_cpu=False): + if force_cpu: + cuda = False + device = torch.device('cpu') + else: + cuda = torch.cuda.is_available() + device = torch.device('cuda:0' if cuda else 'cpu') + + if torch.cuda.device_count() > 1: + device = torch.device('cuda' if cuda else 'cpu') + # print('Found %g GPUs' % torch.cuda.device_count()) + # print('Multi-GPU Issue: https://github.com/ultralytics/yolov3/issues/21') + # torch.cuda.set_device(0) # OPTIONAL: Set your GPU if multiple available + # print('Using ', torch.cuda.device_count(), ' GPUs') + + # print('Using %s %s\n' % (device.type, torch.cuda.get_device_properties(0) if cuda else '')) + return device diff --git a/components/hand_detect/utils/utils.py b/components/hand_detect/utils/utils.py new file mode 100644 index 0000000..fd529c5 --- /dev/null +++ b/components/hand_detect/utils/utils.py @@ -0,0 +1,438 @@ +import glob +import random +import time +from collections import defaultdict + +import cv2 +import numpy as np +import torch +import torch.nn as nn +from dp_models.light_pose.modules.keypoints import BODY_PARTS_KPT_IDS, BODY_PARTS_PAF_IDS + +# Set printoptions +torch.set_printoptions(linewidth=1320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 + +# Prevent OpenCV from multithreading (to use PyTorch DataLoader) +cv2.setNumThreads(0) + +def float3(x): # format floats to 3 decimals + return float(format(x, '.3f')) + +def init_seeds(seed=0): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + else: + torch.manual_seed(seed) + torch.manual_seed_all(seed) + + +def load_classes(path): + # Loads class labels at 'path' + fp = open(path, 'r') + names = fp.read().split('\n') + return list(filter(None, names)) # filter removes empty strings (such as last line) + + +def model_info(model): + # Plots a line-by-line description of a PyTorch model + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + print('\n%5s %60s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + for i, (name, p) in enumerate(model.named_parameters()): + # name = name.replace('module_list.', '') + print('%5g %60s %9s %12g %20s %10.3g %10.3g' % ( + i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + print('Model Summary: %g layers, %g parameters, %g gradients' % (i + 1, n_p, n_g)) + + + + + +def weights_init_normal(m): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + torch.nn.init.normal_(m.weight.data, 0.0, 0.03) + elif classname.find('BatchNorm2d') != -1: + torch.nn.init.normal_(m.weight.data, 1.0, 0.03) + torch.nn.init.constant_(m.bias.data, 0.0) + + +def xyxy2xywh(x): + # Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h] + y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 + y[:, 2] = x[:, 2] - x[:, 0] + y[:, 3] = x[:, 3] - x[:, 1] + return y + + +def xywh2xyxy(x): + # Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2] + y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 + y[:, 1] = x[:, 1] - x[:, 3] / 2 + y[:, 2] = x[:, 0] + x[:, 2] / 2 + y[:, 3] = x[:, 1] + x[:, 3] / 2 + return y + +def scale_coords(img_size, coords, img0_shape):# image size 转为 原图尺寸 + # Rescale x1, y1, x2, y2 from 416 to image size + # print('coords : ',coords) + # print('img0_shape : ',img0_shape) + gain = float(img_size) / max(img0_shape) # gain = old / new + # print('gain : ',gain) + pad_x = (img_size - img0_shape[1] * gain) / 2 # width padding + pad_y = (img_size - img0_shape[0] * gain) / 2 # height padding + # print('pad_xpad_y : ',pad_x,pad_y) + coords[:, [0, 2]] -= pad_x + coords[:, [1, 3]] -= pad_y + coords[:, :4] /= gain + coords[:, :4] = torch.clamp(coords[:, :4], min=0)# 夹紧区间最小值不为负数 + return coords + + +def ap_per_class(tp, conf, pred_cls, target_cls): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (list). + conf: Objectness value from 0-1 (list). + pred_cls: Predicted object classes (list). + target_cls: True object classes (list). + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes = np.unique(target_cls) + + # Create Precision-Recall curve and compute AP for each class + ap, p, r = [], [], [] + for c in unique_classes: + i = pred_cls == c + n_gt = (target_cls == c).sum() # Number of ground truth objects + n_p = i.sum() # Number of predicted objects + + if n_p == 0 and n_gt == 0: + continue + elif n_p == 0 or n_gt == 0: + ap.append(0) + r.append(0) + p.append(0) + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum() + tpc = (tp[i]).cumsum() + + # Recall + recall_curve = tpc / (n_gt + 1e-16) + r.append(recall_curve[-1]) + + # Precision + precision_curve = tpc / (tpc + fpc) + p.append(precision_curve[-1]) + + # AP from recall-precision curve + ap.append(compute_ap(recall_curve, precision_curve)) + + # Plot + # plt.plot(recall_curve, precision_curve) + + # Compute F1 score (harmonic mean of precision and recall) + p, r, ap = np.array(p), np.array(r), np.array(ap) + f1 = 2 * p * r / (p + r + 1e-16) + + return p, r, ap, f1, unique_classes.astype('int32') + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rbgirshick/py-faster-rcnn. + # Arguments + recall: The recall curve (list). + precision: The precision curve (list). + # Returns + The average precision as computed in py-faster-rcnn. + """ + # correct AP calculation + # first append sentinel values at the end + + mrec = np.concatenate(([0.], recall, [1.])) + mpre = np.concatenate(([0.], precision, [0.])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def bbox_iou(box1, box2, x1y1x2y2=True): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.t() + + # Get the coordinates of bounding boxes + if x1y1x2y2: + # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: + # x, y, w, h = box1 + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter_area = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + union_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1) + 1e-16) + \ + (b2_x2 - b2_x1) * (b2_y2 - b2_y1) - inter_area + + return inter_area / union_area # iou + + +def wh_iou(box1, box2): + + box2 = box2.t() + + # w, h = box1 + w1, h1 = box1[0], box1[1] + w2, h2 = box2[0], box2[1] + + # Intersection area + inter_area = torch.min(w1, w2) * torch.min(h1, h2) + + # Union Area + union_area = (w1 * h1 + 1e-16) + w2 * h2 - inter_area + + return inter_area / union_area # iou + + +def compute_loss(p, targets): # predictions, targets + FT = torch.cuda.FloatTensor if p[0].is_cuda else torch.FloatTensor + lxy, lwh, lcls, lconf = FT([0]), FT([0]), FT([0]), FT([0]) # losses 初始化 为 0 + txy, twh, tcls, indices = targets + MSE = nn.MSELoss() + CE = nn.CrossEntropyLoss() + BCE = nn.BCEWithLogitsLoss()# 多标签分类时 使用 如 [1,1,0], + + # Compute losses + for i, pi0 in enumerate(p): # layer i predictions, i + b, a, gj, gi = indices[i] # image_idx, anchor_idx, gridx, gridy + + # print(i,') b, a, gj, gi : ') + # print('b', b) + # print('a', a) + # print('gj', gj) + # print('gi', gi) + + tconf = torch.zeros_like(pi0[..., 0]) # conf + + # print('tconf: ',tconf.size()) + # Compute losses + k = 1 # nT / bs + if len(b) > 0: + pi = pi0[b, a, gj, gi] # predictions closest to anchors + tconf[b, a, gj, gi] = 1 # conf + + lxy += (k * 8) * MSE(torch.sigmoid(pi[..., 0:2]), txy[i]) # xy loss + lwh += (k * 4) * MSE(pi[..., 2:4], twh[i]) # wh loss + lcls += (k * 1) * CE(pi[..., 5:], tcls[i]) # class_conf loss + + lconf += (k * 64) * BCE(pi0[..., 4], tconf) # obj_conf loss + loss = lxy + lwh + lconf + lcls + + # Add to dictionary + d = defaultdict(float) + losses = [loss.item(), lxy.item(), lwh.item(), lconf.item(), lcls.item()] + for name, x in zip(['total', 'xy', 'wh', 'conf', 'cls'], losses): + d[name] = x + + return loss, d + + +def build_targets(model, targets): + # targets = [image, class, x, y, w, h] + if isinstance(model, nn.parallel.DistributedDataParallel): + model = model.module + + txy, twh, tcls, indices = [], [], [], [] + for i, layer in enumerate(get_yolo_layers(model)):# 遍历 3 个 yolo layer + # print(i,'layer ',model.module_list[layer]) + layer = model.module_list[layer][0] + + # iou of targets-anchors + gwh = targets[:, 4:6] * layer.nG # 以 grid 为单位的 wh + iou = [wh_iou(x, gwh) for x in layer.anchor_vec] + iou, a = torch.stack(iou, 0).max(0) # best iou and anchor + + # reject below threshold ious (OPTIONAL, increases P, lowers R) + reject = True + if reject: + j = iou > 0.10 + t, a, gwh = targets[j], a[j], gwh[j] + else: + t = targets + + # Indices + b, c = t[:, :2].long().t() # target image, class + gxy = t[:, 2:4] * layer.nG + gi, gj = gxy.long().t() # grid_i, grid_j + indices.append((b, a, gj, gi)) # img_index , anchor_index , grid_x , grid_y + + # print('b, a, gj, gi : ') + # print('b', b) + # print('a', a) + # print('gj', gj) + # print('gi', gi) + # print('class c',c) + + # XY coordinates + txy.append(gxy - gxy.floor())#转化为grid相对坐标 + + # Width and height + twh.append(torch.log(gwh / layer.anchor_vec[a])) # yolo method 对数 + # twh.append(torch.sqrt(gwh / layer.anchor_vec[a]) / 2) # power method + + # Class + tcls.append(c) + # try: + # print('c.max,layer.nC: ',c.max().item() ,layer.nC) + # except: + # pass + if c.shape[0]: + assert c.max().item() <= layer.nC, 'Target classes exceed model classes' + + return txy, twh, tcls, indices + + +# @profile +def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4): + """ + Removes detections with lower object confidence score than 'conf_thres' + Non-Maximum Suppression to further filter detections. + Returns detections with shape: + (x1, y1, x2, y2, object_conf, class_conf, class) + """ + + min_wh = 2 # (pixels) minimum box width and height + + output = [None] * len(prediction) + for image_i, pred in enumerate(prediction): + # Experiment: Prior class size rejection + # x, y, w, h = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3] + # a = w * h # area + # ar = w / (h + 1e-16) # aspect ratio + # n = len(w) + # log_w, log_h, log_a, log_ar = torch.log(w), torch.log(h), torch.log(a), torch.log(ar) + # shape_likelihood = np.zeros((n, 60), dtype=np.float32) + # x = np.concatenate((log_w.reshape(-1, 1), log_h.reshape(-1, 1)), 1) + # from scipy.stats import multivariate_normal + # for c in range(60): + # shape_likelihood[:, c] = + # multivariate_normal.pdf(x, mean=mat['class_mu'][c, :2], cov=mat['class_cov'][c, :2, :2]) + + # Filter out confidence scores below threshold + class_conf, class_pred = pred[:, 5:].max(1) # max class_conf, index + pred[:, 4] *= class_conf # finall conf = obj_conf * class_conf + + i = (pred[:, 4] > conf_thres) & (pred[:, 2] > min_wh) & (pred[:, 3] > min_wh) + # s2=time.time() + pred2 = pred[i] + # print("++++++pred2 = pred[i]",time.time()-s2, pred2) + + # If none are remaining => process next image + if len(pred2) == 0: + continue + + # Select predicted classes + class_conf = class_conf[i] + class_pred = class_pred[i].unsqueeze(1).float() + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + pred2[:, :4] = xywh2xyxy(pred2[:, :4]) + # pred[:, 4] *= class_conf # improves mAP from 0.549 to 0.551 + + # Detections ordered as (x1y1x2y2, obj_conf, class_conf, class_pred) + pred2 = torch.cat((pred2[:, :5], class_conf.unsqueeze(1), class_pred), 1) + + # Get detections sorted by decreasing confidence scores + pred2 = pred2[(-pred2[:, 4]).argsort()] + + det_max = [] + nms_style = 'MERGE' # 'OR' (default), 'AND', 'MERGE' (experimental) + for c in pred2[:, -1].unique(): + dc = pred2[pred2[:, -1] == c] # select class c + dc = dc[:min(len(dc), 100)] # limit to first 100 boxes + + # Non-maximum suppression + if nms_style == 'OR': # default + # METHOD1 + # ind = list(range(len(dc))) + # while len(ind): + # j = ind[0] + # det_max.append(dc[j:j + 1]) # save highest conf detection + # reject = (bbox_iou(dc[j], dc[ind]) > nms_thres).nonzero() + # [ind.pop(i) for i in reversed(reject)] + + # METHOD2 + while dc.shape[0]: + det_max.append(dc[:1]) # save highest conf detection + if len(dc) == 1: # Stop if we're at the last detection + break + iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes + dc = dc[1:][iou < nms_thres] # remove ious > threshold + + elif nms_style == 'AND': # requires overlap, single boxes erased + while len(dc) > 1: + iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes + if iou.max() > 0.5: + det_max.append(dc[:1]) + dc = dc[1:][iou < nms_thres] # remove ious > threshold + + elif nms_style == 'MERGE': # weighted mixture box + while len(dc): + i = bbox_iou(dc[0], dc) > nms_thres # iou with other boxes + weights = dc[i, 4:5] + dc[0, :4] = (weights * dc[i, :4]).sum(0) / weights.sum() + det_max.append(dc[:1]) + dc = dc[i == 0] + + if len(det_max): + det_max = torch.cat(det_max) # concatenate + output[image_i] = det_max[(-det_max[:, 4]).argsort()] # sort + return output + + +def get_yolo_layers(model): + yolo_layer_index = [] + for index, l in enumerate(model.module_list): + try: + a = l[0].img_size and l[0].nG # only yolo layer need img_size and nG + # print("---"*50) + # print(l, index) + yolo_layer_index.append(index) + except: + pass + assert len(yolo_layer_index) > 0, "can not find yolo layer" + return yolo_layer_index diff --git a/components/hand_detect/yolo_v3_hand.py b/components/hand_detect/yolo_v3_hand.py new file mode 100644 index 0000000..0002440 --- /dev/null +++ b/components/hand_detect/yolo_v3_hand.py @@ -0,0 +1,312 @@ +#-*-coding:utf-8-*- +# date:2021-03-09 +# Author: Eric.Lee +# function: yolo v3 hand detect + +import os +import cv2 +import numpy as np +import time + +import torch + +from hand_detect.yolov3 import Yolov3, Yolov3Tiny +from hand_detect.utils.torch_utils import select_device +from hand_detect.acc_model import acc_model + +import torch.backends.cudnn as cudnn +import torch.nn.functional as F + + +import random + +def show_model_param(model): + params = list(model.parameters()) + k = 0 + for i in params: + l = 1 + for j in i.size(): + l *= j + print("该层的结构: {}, 参数和: {}".format(str(list(i.size())), str(l))) + k = k + l + print("----------------------") + print("总参数数量和: " + str(k)) + +def process_data(img, img_size=416):# 图像预处理 + img, _, _, _ = letterbox(img, height=img_size) + # Normalize RG25 + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB + img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + return img + +def plot_one_box(x, img, color=None, label=None, line_thickness=None): + # Plots one bounding box on image img + tl = line_thickness or round(0.002 * max(img.shape[0:2])) + 1 # line thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(img, c1, c2, color, thickness=tl) + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(img, c1, c2, color, -1) # filled + cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [255, 55,90], thickness=tf, lineType=cv2.LINE_AA) + +def bbox_iou(box1, box2, x1y1x2y2=True): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.t() + + # Get the coordinates of bounding boxes + if x1y1x2y2: + # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: + # x, y, w, h = box1 + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter_area = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + union_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1) + 1e-16) + \ + (b2_x2 - b2_x1) * (b2_y2 - b2_y1) - inter_area + + return inter_area / union_area # iou + +def xywh2xyxy(x): + # Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2] + y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 + y[:, 1] = x[:, 1] - x[:, 3] / 2 + y[:, 2] = x[:, 0] + x[:, 2] / 2 + y[:, 3] = x[:, 1] + x[:, 3] / 2 + return y + + +def scale_coords(img_size, coords, img0_shape):# image size 转为 原图尺寸 + # Rescale x1, y1, x2, y2 from 416 to image size + # print('coords : ',coords) + # print('img0_shape : ',img0_shape) + gain = float(img_size) / max(img0_shape) # gain = old / new + # print('gain : ',gain) + pad_x = (img_size - img0_shape[1] * gain) / 2 # width padding + pad_y = (img_size - img0_shape[0] * gain) / 2 # height padding + # print('pad_xpad_y : ',pad_x,pad_y) + coords[:, [0, 2]] -= pad_x + coords[:, [1, 3]] -= pad_y + coords[:, :4] /= gain + coords[:, :4] = torch.clamp(coords[:, :4], min=0)# 夹紧区间最小值不为负数 + return coords + +def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4): + """ + Removes detections with lower object confidence score than 'conf_thres' + Non-Maximum Suppression to further filter detections. + Returns detections with shape: + (x1, y1, x2, y2, object_conf, class_conf, class) + """ + + min_wh = 2 # (pixels) minimum box width and height + + output = [None] * len(prediction) + for image_i, pred in enumerate(prediction): + # Experiment: Prior class size rejection + # x, y, w, h = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3] + # a = w * h # area + # ar = w / (h + 1e-16) # aspect ratio + # n = len(w) + # log_w, log_h, log_a, log_ar = torch.log(w), torch.log(h), torch.log(a), torch.log(ar) + # shape_likelihood = np.zeros((n, 60), dtype=np.float32) + # x = np.concatenate((log_w.reshape(-1, 1), log_h.reshape(-1, 1)), 1) + # from scipy.stats import multivariate_normal + # for c in range(60): + # shape_likelihood[:, c] = + # multivariate_normal.pdf(x, mean=mat['class_mu'][c, :2], cov=mat['class_cov'][c, :2, :2]) + + # Filter out confidence scores below threshold + class_conf, class_pred = pred[:, 5:].max(1) # max class_conf, index + pred[:, 4] *= class_conf # finall conf = obj_conf * class_conf + + i = (pred[:, 4] > conf_thres) & (pred[:, 2] > min_wh) & (pred[:, 3] > min_wh) + # s2=time.time() + pred2 = pred[i] + # print("++++++pred2 = pred[i]",time.time()-s2, pred2) + + # If none are remaining => process next image + if len(pred2) == 0: + continue + + # Select predicted classes + class_conf = class_conf[i] + class_pred = class_pred[i].unsqueeze(1).float() + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + pred2[:, :4] = xywh2xyxy(pred2[:, :4]) + # pred[:, 4] *= class_conf # improves mAP from 0.549 to 0.551 + + # Detections ordered as (x1y1x2y2, obj_conf, class_conf, class_pred) + pred2 = torch.cat((pred2[:, :5], class_conf.unsqueeze(1), class_pred), 1) + + # Get detections sorted by decreasing confidence scores + pred2 = pred2[(-pred2[:, 4]).argsort()] + + det_max = [] + nms_style = 'MERGE' # 'OR' (default), 'AND', 'MERGE' (experimental) + for c in pred2[:, -1].unique(): + dc = pred2[pred2[:, -1] == c] # select class c + dc = dc[:min(len(dc), 100)] # limit to first 100 boxes + + # Non-maximum suppression + if nms_style == 'OR': # default + # METHOD1 + # ind = list(range(len(dc))) + # while len(ind): + # j = ind[0] + # det_max.append(dc[j:j + 1]) # save highest conf detection + # reject = (bbox_iou(dc[j], dc[ind]) > nms_thres).nonzero() + # [ind.pop(i) for i in reversed(reject)] + + # METHOD2 + while dc.shape[0]: + det_max.append(dc[:1]) # save highest conf detection + if len(dc) == 1: # Stop if we're at the last detection + break + iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes + dc = dc[1:][iou < nms_thres] # remove ious > threshold + + elif nms_style == 'AND': # requires overlap, single boxes erased + while len(dc) > 1: + iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes + if iou.max() > 0.5: + det_max.append(dc[:1]) + dc = dc[1:][iou < nms_thres] # remove ious > threshold + + elif nms_style == 'MERGE': # weighted mixture box + while len(dc): + i = bbox_iou(dc[0], dc) > nms_thres # iou with other boxes + weights = dc[i, 4:5] + dc[0, :4] = (weights * dc[i, :4]).sum(0) / weights.sum() + det_max.append(dc[:1]) + dc = dc[i == 0] + + if len(det_max): + det_max = torch.cat(det_max) # concatenate + output[image_i] = det_max[(-det_max[:, 4]).argsort()] # sort + return output + +def letterbox(img, height=416, augment=False, color=(127.5, 127.5, 127.5)): + # Resize a rectangular image to a padded square + shape = img.shape[:2] # shape = [height, width] + ratio = float(height) / max(shape) # ratio = old / new + new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) + dw = (height - new_shape[0]) / 2 # width padding + dh = (height - new_shape[1]) / 2 # height padding + top, bottom = round(dh - 0.1), round(dh + 0.1) + left, right = round(dw - 0.1), round(dw + 0.1) + # resize img + if augment: + interpolation = np.random.choice([None, cv2.INTER_NEAREST, cv2.INTER_LINEAR, + None, cv2.INTER_NEAREST, cv2.INTER_LINEAR, + cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4]) + if interpolation is None: + img = cv2.resize(img, new_shape) + else: + img = cv2.resize(img, new_shape, interpolation=interpolation) + else: + img = cv2.resize(img, new_shape, interpolation=cv2.INTER_NEAREST) + # print("resize time:",time.time()-s1) + + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square + return img, ratio, dw, dh +#--------------------------------------------------------- +# model_path = './coco_model/yolov3_coco.pt' # 检测模型路径 +# root_path = './test_images/'# 测试文件夹 +# model_arch = 'yolov3' # 模型类型 +# voc_config = 'cfg/voc.data' # 模型相关配置文件 +# img_size = 416 # 图像尺寸 +# conf_thres = 0.35# 检测置信度 +# nms_thres = 0.5 # nms 阈值 +class yolo_v3_hand_model(object): + def __init__(self, + model_path = './components/hand_detect/weights/latest_416-2021-02-19.pt', + model_arch = 'yolov3', + img_size=416, + conf_thres=0.16, + nms_thres=0.4,): + print("yolo v3 hand_model loading : {}".format(model_path)) + self.use_cuda = torch.cuda.is_available() + self.device = torch.device("cuda:0" if self.use_cuda else "cpu") + self.img_size = img_size + self.classes = ["Hand"] + self.num_classes = len(self.classes) + self.conf_thres = conf_thres + self.nms_thres = nms_thres + #----------------------------------------------------------------------- + weights = model_path + if "-tiny" in model_arch: + a_scalse = 416./img_size + anchors=[(10, 14), (23, 27), (37, 58), (81, 82), (135, 169), (344, 319)] + anchors_new = [ (int(anchors[j][0]/a_scalse),int(anchors[j][1]/a_scalse)) for j in range(len(anchors)) ] + + model = Yolov3Tiny(self.num_classes,anchors = anchors_new) + else: + a_scalse = 416./img_size + anchors=[(10,13), (16,30), (33,23), (30,61), (62,45), (59,119), (116,90), (156,198), (373,326)] + anchors_new = [ (int(anchors[j][0]/a_scalse),int(anchors[j][1]/a_scalse)) for j in range(len(anchors)) ] + model = Yolov3(self.num_classes,anchors = anchors_new) + #----------------------------------------------------------------------- + + self.model = model + # show_model_param(self.model)# 显示模型参数 + + # print('num_classes : ',self.num_classes) + + self.device = select_device() # 运行硬件选择 + self.use_cuda = torch.cuda.is_available() + # Load weights + if os.access(weights,os.F_OK):# 判断模型文件是否存在 + self.model.load_state_dict(torch.load(weights, map_location=lambda storage, loc: storage)['model']) + else: + print('------- >>> error : model not exists') + return False + # + self.model.eval()#模型设置为 eval + acc_model('',self.model) + self.model = self.model.to(self.device) + + def predict(self, img_,vis): + with torch.no_grad(): + t = time.time() + img = process_data(img_, self.img_size) + t1 = time.time() + img = torch.from_numpy(img).unsqueeze(0).to(self.device) + + pred, _ = self.model(img)#图片检测 + + t2 = time.time() + detections = non_max_suppression(pred, self.conf_thres, self.nms_thres)[0] # nms + t3 = time.time() + # print("t3 time:", t3) + + if (detections is None) or len(detections) == 0: + return [] + # Rescale boxes from 416 to true image size + detections[:, :4] = scale_coords(self.img_size, detections[:, :4], img_.shape).round() + # 绘制检测结果 :detect reslut + dets_for_landmarks = [] + colors = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) for v in range(1, 10 + 1)][::-1] + + output_dict_ = [] + for *xyxy, conf, cls_conf, cls in detections: + label = '%s %.2f' % (self.classes[0], conf) + x1,y1,x2,y2 = xyxy + output_dict_.append((float(x1),float(y1),float(x2),float(y2),float(conf.item()))) + if vis: + plot_one_box(xyxy, img_, label=label, color=(0,175,255), line_thickness = 2) + return output_dict_ diff --git a/components/hand_detect/yolov3.py b/components/hand_detect/yolov3.py new file mode 100644 index 0000000..519ae7e --- /dev/null +++ b/components/hand_detect/yolov3.py @@ -0,0 +1,505 @@ +import os +import numpy as np +from collections import OrderedDict + +import torch +import torch.nn.functional as F +import torch.nn as nn + + +# reference: +# https://github.com/ultralytics/yolov3/blob/master/models.py +# https://github.com/TencentYoutuResearch/ObjectDetection-OneStageDet/blob/master/yolo/vedanet/network/backbone/brick/darknet53.py +# network structure https://blog.csdn.net/u010397980/article/details/85058630 + +flag_yolo_structure = False # True 查看 相关的网络 log + +class Conv2dBatchLeaky(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride, leaky_slope=0.1): + super(Conv2dBatchLeaky, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.stride = stride + if isinstance(kernel_size, (list, tuple)): + self.padding = [int(ii/2) for ii in kernel_size] + if flag_yolo_structure: + print('------------------->>>> Conv2dBatchLeaky isinstance') + else: + self.padding = int(kernel_size/2) + + self.leaky_slope = leaky_slope + # Layer + # LeakyReLU : y = max(0, x) + leaky_slope*min(0,x) + self.layers = nn.Sequential( + nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, bias=False), + nn.BatchNorm2d(self.out_channels), + nn.LeakyReLU(self.leaky_slope, inplace=True) + ) + + def forward(self, x): + x = self.layers(x) + return x + +class ResBlockSum(nn.Module): + def __init__(self, nchannels): + super().__init__() + self.block = nn.Sequential( + Conv2dBatchLeaky(nchannels, int(nchannels/2), 1, 1), + Conv2dBatchLeaky(int(nchannels/2), nchannels, 3, 1) + ) + + def forward(self, x): + return x + self.block(x) + +class HeadBody(nn.Module): + def __init__(self, in_channels, out_channels): + super(HeadBody, self).__init__() + + self.layer = nn.Sequential( + Conv2dBatchLeaky(in_channels, out_channels, 1, 1), + Conv2dBatchLeaky(out_channels, out_channels*2, 3, 1), + Conv2dBatchLeaky(out_channels*2, out_channels, 1, 1), + Conv2dBatchLeaky(out_channels, out_channels*2, 3, 1), + Conv2dBatchLeaky(out_channels*2, out_channels, 1, 1) + ) + + def forward(self, x): + x = self.layer(x) + return x + +class Upsample(nn.Module): + # Custom Upsample layer (nn.Upsample gives deprecated warning message) + + def __init__(self, scale_factor=1, mode='nearest'): + super(Upsample, self).__init__() + self.scale_factor = scale_factor + self.mode = mode + + def forward(self, x): + return F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode) + +# default anchors=[(10,13), (16,30), (33,23), (30,61), (62,45), (59,119), (116,90), (156,198), (373,326)] +class YOLOLayer(nn.Module): + def __init__(self, anchors, nC): + super(YOLOLayer, self).__init__() + + self.anchors = torch.FloatTensor(anchors) + self.nA = len(anchors) # number of anchors (3) + self.nC = nC # number of classes + self.img_size = 0 + if flag_yolo_structure: + print('init YOLOLayer ------ >>> ') + print('anchors : ',self.anchors) + print('nA : ',self.nA) + print('nC : ',self.nC) + print('img_size : ',self.img_size) + + def forward(self, p, img_size, var=None):# p : feature map + bs, nG = p.shape[0], p.shape[-1] # batch_size , grid + if flag_yolo_structure: + print('bs, nG --->>> ',bs, nG) + if self.img_size != img_size: + create_grids(self, img_size, nG, p.device) + + # p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, xywh + confidence + classes) + p = p.view(bs, self.nA, self.nC + 5, nG, nG).permute(0, 1, 3, 4, 2).contiguous() # prediction + + if self.training: + return p + else: # inference + io = p.clone() # inference output + io[..., 0:2] = torch.sigmoid(io[..., 0:2]) + self.grid_xy # xy + io[..., 2:4] = torch.exp(io[..., 2:4]) * self.anchor_wh # wh yolo method + io[..., 4:] = torch.sigmoid(io[..., 4:]) # p_conf, p_cls + io[..., :4] *= self.stride + if self.nC == 1: + io[..., 5] = 1 # single-class model + # flatten prediction, reshape from [bs, nA, nG, nG, nC] to [bs, nA * nG * nG, nC] + return io.view(bs, -1, 5 + self.nC), p + +def create_grids(self, img_size, nG, device='cpu'): + # self.nA : len(anchors) # number of anchors (3) + # self.nC : nC # number of classes + # nG : feature map grid 13*13 26*26 52*52 + self.img_size = img_size + self.stride = img_size / nG + if flag_yolo_structure: + print('create_grids stride : ',self.stride) + + # build xy offsets + grid_x = torch.arange(nG).repeat((nG, 1)).view((1, 1, nG, nG)).float() + grid_y = grid_x.permute(0, 1, 3, 2) + self.grid_xy = torch.stack((grid_x, grid_y), 4).to(device) + if flag_yolo_structure: + print('grid_x : ',grid_x.size(),grid_x) + print('grid_y : ',grid_y.size(),grid_y) + print('grid_xy : ',self.grid_xy.size(),self.grid_xy) + + # build wh gains + self.anchor_vec = self.anchors.to(device) / self.stride # 基于 stride 的归一化 + # print('self.anchor_vecself.anchor_vecself.anchor_vec:',self.anchor_vec) + self.anchor_wh = self.anchor_vec.view(1, self.nA, 1, 1, 2).to(device) + self.nG = torch.FloatTensor([nG]).to(device) + + +def get_yolo_layer_index(module_list): + yolo_layer_index = [] + for index, l in enumerate(module_list): + try: + a = l[0].img_size and l[0].nG # only yolo layer need img_size and nG + yolo_layer_index.append(index) + except: + pass + assert len(yolo_layer_index) > 0, "can not find yolo layer" + return yolo_layer_index + + +# ----------------------yolov3------------------------ + +class Yolov3(nn.Module): + def __init__(self, num_classes=80, anchors=[(10,13), (16,30), (33,23), (30,61), (62,45), (59,119), (116,90), (156,198), (373,326)]): + super().__init__() + anchor_mask1 = [i for i in range(2 * len(anchors) // 3, len(anchors), 1)] # [6, 7, 8] + anchor_mask2 = [i for i in range(len(anchors) // 3, 2 * len(anchors) // 3, 1)] # [3, 4, 5] + anchor_mask3 = [i for i in range(0, len(anchors) // 3, 1)] # [0, 1, 2] + if flag_yolo_structure: + print('anchor_mask1 : ',anchor_mask1) # 大物体 anchor + print('anchor_mask2 : ',anchor_mask2) # 中物体 anchor + print('anchor_mask3 : ',anchor_mask3) # 小物体 anchor + + # Network + # OrderedDict 是 dict 的子类,其最大特征是,它可以“维护”添加 key-value 对的顺序 + layer_list = [] + + ''' + ****** Conv2dBatchLeaky ***** + op : Conv2d,BatchNorm2d,LeakyReLU + inputs : in_channels, out_channels, kernel_size, stride, leaky_slope + ''' + + ''' + ****** ResBlockSum ****** + op : Conv2dBatchLeaky * 2 + x + inputs : nchannels + ''' + # list 0 + layer_list.append(OrderedDict([ + ('0_stage1_conv', Conv2dBatchLeaky(3, 32, 3, 1, 1)), # 416 x 416 x 32 # Convolutional + + ("0_stage2_conv", Conv2dBatchLeaky(32, 64, 3, 2)), # 208 x 208 x 64 # Convolutional + ("0_stage2_ressum1", ResBlockSum(64)), # Convolutional*2 + Resiudal + + ("0_stage3_conv", Conv2dBatchLeaky(64, 128, 3, 2)), # 104 x 104 128 # Convolutional + ("0_stage3_ressum1", ResBlockSum(128)), + ("0_stage3_ressum2", ResBlockSum(128)), # (Convolutional*2 + Resiudal)**2 + + ("0_stage4_conv", Conv2dBatchLeaky(128, 256, 3, 2)), # 52 x 52 x 256 # Convolutional + ("0_stage4_ressum1", ResBlockSum(256)), + ("0_stage4_ressum2", ResBlockSum(256)), + ("0_stage4_ressum3", ResBlockSum(256)), + ("0_stage4_ressum4", ResBlockSum(256)), + ("0_stage4_ressum5", ResBlockSum(256)), + ("0_stage4_ressum6", ResBlockSum(256)), + ("0_stage4_ressum7", ResBlockSum(256)), + ("0_stage4_ressum8", ResBlockSum(256)), # 52 x 52 x 256 output_feature_0 (Convolutional*2 + Resiudal)**8 + ])) + # list 1 + layer_list.append(OrderedDict([ + ("1_stage5_conv", Conv2dBatchLeaky(256, 512, 3, 2)), # 26 x 26 x 512 # Convolutional + ("1_stage5_ressum1", ResBlockSum(512)), + ("1_stage5_ressum2", ResBlockSum(512)), + ("1_stage5_ressum3", ResBlockSum(512)), + ("1_stage5_ressum4", ResBlockSum(512)), + ("1_stage5_ressum5", ResBlockSum(512)), + ("1_stage5_ressum6", ResBlockSum(512)), + ("1_stage5_ressum7", ResBlockSum(512)), + ("1_stage5_ressum8", ResBlockSum(512)), # 26 x 26 x 512 output_feature_1 # (Convolutional*2 + Resiudal)**8 + ])) + + ''' + ****** HeadBody ****** + op : Conv2dBatchLeaky * 5 + inputs : in_channels, out_channels + ''' + # list 2 + layer_list.append(OrderedDict([ + ("2_stage6_conv", Conv2dBatchLeaky(512, 1024, 3, 2)), # 13 x 13 x 1024 # Convolutional + ("2_stage6_ressum1", ResBlockSum(1024)), + ("2_stage6_ressum2", ResBlockSum(1024)), + ("2_stage6_ressum3", ResBlockSum(1024)), + ("2_stage6_ressum4", ResBlockSum(1024)), # 13 x 13 x 1024 output_feature_2 # (Convolutional*2 + Resiudal)**4 + ("2_headbody1", HeadBody(in_channels=1024, out_channels=512)), # 13 x 13 x 512 # Convalutional Set = Conv2dBatchLeaky * 5 + ])) + # list 3 + layer_list.append(OrderedDict([ + ("3_conv_1", Conv2dBatchLeaky(in_channels=512, out_channels=1024, kernel_size=3, stride=1)), + ("3_conv_2", nn.Conv2d(in_channels=1024, out_channels=len(anchor_mask1) * (num_classes + 5), kernel_size=1, stride=1, padding=0, bias=True)), + ])) # predict one + # list 4 + layer_list.append(OrderedDict([ + ("4_yolo", YOLOLayer([anchors[i] for i in anchor_mask1], num_classes)) + ])) # 3*((x, y, w, h, confidence) + classes ) + + # list 5 + layer_list.append(OrderedDict([ + ("5_conv", Conv2dBatchLeaky(512, 256, 1, 1)), + ("5_upsample", Upsample(scale_factor=2)), + ])) + # list 6 + layer_list.append(OrderedDict([ + ("6_head_body2", HeadBody(in_channels=768, out_channels=256)) # Convalutional Set = Conv2dBatchLeaky * 5 + ])) + # list 7 + layer_list.append(OrderedDict([ + ("7_conv_1", Conv2dBatchLeaky(in_channels=256, out_channels=512, kernel_size=3, stride=1)), + ("7_conv_2", nn.Conv2d(in_channels=512, out_channels=len(anchor_mask2) * (num_classes + 5), kernel_size=1, stride=1, padding=0, bias=True)), + ])) # predict two + # list 8 + layer_list.append(OrderedDict([ + ("8_yolo", YOLOLayer([anchors[i] for i in anchor_mask2], num_classes)) + ])) # 3*((x, y, w, h, confidence) + classes ) + # list 9 + layer_list.append(OrderedDict([ + ("9_conv", Conv2dBatchLeaky(256, 128, 1, 1)), + ("9_upsample", Upsample(scale_factor=2)), + ])) + # list 10 + layer_list.append(OrderedDict([ + ("10_head_body3", HeadBody(in_channels=384, out_channels=128)) # Convalutional Set = Conv2dBatchLeaky * 5 + ])) + # list 11 + layer_list.append(OrderedDict([ + ("11_conv_1", Conv2dBatchLeaky(in_channels=128, out_channels=256, kernel_size=3, stride=1)), + ("11_conv_2", nn.Conv2d(in_channels=256, out_channels=len(anchor_mask3) * (num_classes + 5), kernel_size=1, stride=1, padding=0, bias=True)), + ])) # predict three + # list 12 + layer_list.append(OrderedDict([ + ("12_yolo", YOLOLayer([anchors[i] for i in anchor_mask3], num_classes)) + ])) # 3*((x, y, w, h, confidence) + classes ) + # nn.ModuleList类似于pytho中的list类型,只是将一系列层装入列表,并没有实现forward()方法,因此也不会有网络模型产生的副作用 + self.module_list = nn.ModuleList([nn.Sequential(i) for i in layer_list]) + self.yolo_layer_index = get_yolo_layer_index(self.module_list) + if flag_yolo_structure: + print('yolo_layer : ',len(layer_list),'\n') + print(self.module_list[4]) + print(self.module_list[8]) + print(self.module_list[12]) + + # print('self.module_list -------->>> ',self.module_list) + # print('self.yolo_layer_index -------->>> ',self.yolo_layer_index) + + def forward(self, x): + img_size = x.shape[-1] + if flag_yolo_structure: + print('forward img_size : ',img_size,x.shape) + output = [] + + x = self.module_list[0](x) + x_route1 = x + x = self.module_list[1](x) + x_route2 = x + x = self.module_list[2](x) + + yolo_head = self.module_list[3](x) + if flag_yolo_structure: + print('mask1 yolo_head : ',yolo_head.size()) + yolo_head_out_13x13 = self.module_list[4][0](yolo_head, img_size) + output.append(yolo_head_out_13x13) + + x = self.module_list[5](x) + x = torch.cat([x, x_route2], 1) + x = self.module_list[6](x) + + yolo_head = self.module_list[7](x) + if flag_yolo_structure: + print('mask2 yolo_head : ',yolo_head.size()) + yolo_head_out_26x26 = self.module_list[8][0](yolo_head, img_size) + output.append(yolo_head_out_26x26) + + x = self.module_list[9](x) + x = torch.cat([x, x_route1], 1) + x = self.module_list[10](x) + + yolo_head = self.module_list[11](x) + if flag_yolo_structure: + print('mask3 yolo_head : ',yolo_head.size()) + yolo_head_out_52x52 = self.module_list[12][0](yolo_head, img_size) + output.append(yolo_head_out_52x52) + + if self.training: + return output + else: + io, p = list(zip(*output)) # inference output, training output + return torch.cat(io, 1), p + + +# ----------------------yolov3 tiny------------------------ + +class EmptyLayer(nn.Module): + """Placeholder for 'route' and 'shortcut' layers""" + def __init__(self): + super(EmptyLayer, self).__init__() + + def forward(self, x): + return x + + +class Yolov3Tiny(nn.Module): + def __init__(self, num_classes=80, anchors=[(10, 14), (23, 27), (37, 58), (81, 82), (135, 169), (344, 319)]): + super(Yolov3Tiny, self).__init__() + + anchor_mask1 = [i for i in range(len(anchors) // 2, len(anchors), 1)] # [3, 4, 5] + anchor_mask2 = [i for i in range(0, len(anchors) // 2, 1)] # [0, 1, 2] + + layer_list = [] + layer_list.append(OrderedDict([ + # layer 0 + ("conv_0", nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False)), + ("batch_norm_0", nn.BatchNorm2d(16)), + ("leaky_0", nn.LeakyReLU(0.1)), + # layer 1 + ("maxpool_1", nn.MaxPool2d(kernel_size=2, stride=2, padding=0)), + # layer 2 + ("conv_2", nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1, bias=False)), + ("batch_norm_2", nn.BatchNorm2d(32)), + ("leaky_2", nn.LeakyReLU(0.1)), + # layer 3 + ("maxpool_3", nn.MaxPool2d(kernel_size=2, stride=2, padding=0)), + # layer 4 + ("conv_4", nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)), + ("batch_norm_4", nn.BatchNorm2d(64)), + ("leaky_4", nn.LeakyReLU(0.1)), + # layer 5 + ("maxpool_5", nn.MaxPool2d(kernel_size=2, stride=2, padding=0)), + # layer 6 + ("conv_6", nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False)), + ("batch_norm_6", nn.BatchNorm2d(128)), + ("leaky_6", nn.LeakyReLU(0.1)), + # layer 7 + ("maxpool_7", nn.MaxPool2d(kernel_size=2, stride=2, padding=0)), + # layer 8 + ("conv_8", nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)), + ("batch_norm_8", nn.BatchNorm2d(256)), + ("leaky_8", nn.LeakyReLU(0.1)), + ])) + + layer_list.append(OrderedDict([ + # layer 9 + ("maxpool_9", nn.MaxPool2d(kernel_size=2, stride=2, padding=0)), + # layer 10 + ("conv_10", nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False)), + ("batch_norm_10", nn.BatchNorm2d(512)), + ("leaky_10", nn.LeakyReLU(0.1)), + # layer 11 + ('_debug_padding_11', nn.ZeroPad2d((0, 1, 0, 1))), + ("maxpool_11", nn.MaxPool2d(kernel_size=2, stride=1, padding=0)), + # layer 12 + ("conv_12", nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, stride=1, padding=1, bias=False)), + ("batch_norm_12", nn.BatchNorm2d(1024)), + ("leaky_12", nn.LeakyReLU(0.1)), + # layer 13 + ("conv_13", nn.Conv2d(in_channels=1024, out_channels=256, kernel_size=1, stride=1, padding=0, bias=False)), + ("batch_norm_13", nn.BatchNorm2d(256)), + ("leaky_13", nn.LeakyReLU(0.1)), + ])) + + layer_list.append(OrderedDict([ + # layer 14 + ("conv_14", nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False)), + ("batch_norm_14", nn.BatchNorm2d(512)), + ("leaky_14", nn.LeakyReLU(0.1)), + # layer 15 + ("conv_15", + nn.Conv2d(in_channels=512, out_channels=len(anchor_mask1) * (num_classes + 5), kernel_size=1, stride=1, padding=0, bias=True)), + ])) + + # layer 16 + anchor_tmp1 = [anchors[i] for i in anchor_mask1] + layer_list.append(OrderedDict([("yolo_16", YOLOLayer(anchor_tmp1, num_classes))])) + + # layer 17 + layer_list.append(OrderedDict([("route_17", EmptyLayer())])) + + layer_list.append(OrderedDict([ + # layer 18 + ("conv_18", nn.Conv2d(in_channels=256, out_channels=128, kernel_size=1, stride=1, padding=0, bias=False)), + ("batch_norm_18", nn.BatchNorm2d(128)), + ("leaky_18", nn.LeakyReLU(0.1)), + # layer 19 + ("upsample_19", Upsample(scale_factor=2)), + ])) + + # layer 20 + layer_list.append(OrderedDict([('route_20', EmptyLayer())])) + + layer_list.append(OrderedDict([ + # layer 21 + ("conv_21", nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)), + ("batch_norm_21", nn.BatchNorm2d(256)), + ("leaky_21", nn.LeakyReLU(0.1)), + # layer 22 + ("conv_22", + nn.Conv2d(in_channels=256, out_channels=len(anchor_mask2) * (num_classes + 5), kernel_size=1, stride=1, padding=0, bias=True)), + ])) + + # layer 23 + anchor_tmp2 = [anchors[i] for i in anchor_mask2] + layer_list.append(OrderedDict([("yolo_23", YOLOLayer(anchor_tmp2, num_classes))])) + + self.module_list = nn.ModuleList([nn.Sequential(layer) for layer in layer_list]) + self.yolo_layer_index = get_yolo_layer_index(self.module_list) + + def forward(self, x): + img_size = x.shape[-1] + output = [] + + x = self.module_list[0](x) # layer0 to layer8 + x_route8 = x + x = self.module_list[1](x) # layer9 to layer13 + x_route13 = x + x = self.module_list[2](x) # layer14, layer15 + x = self.module_list[3][0](x, img_size) # yolo_16 + output.append(x) + x = self.module_list[5](x_route13) # layer18, layer19 + x = torch.cat([x, x_route8], 1) # route + x = self.module_list[7](x) # layer21, layer22 + x = self.module_list[8][0](x, img_size) # yolo_23 + output.append(x) + + if self.training: + return output + else: + io, p = list(zip(*output)) # inference output, training output + return torch.cat(io, 1), p + + +if __name__ == "__main__": + dummy_input = torch.Tensor(5, 3, 416, 416) + model = Yolov3(num_classes=80) + params = list(model.parameters()) + k = 0 + for i in params: + l = 1 + for j in i.size(): + l *= j + # print("该层的结构: {}, 参数和: {}".format(str(list(i.size())), str(l))) + k = k + l + print("----------------------") + print("总参数数量和: " + str(k)) + print("-----------yolo layer") + for index in model.yolo_layer_index: + print(model.module_list[index]) + + print("-----------train") + model.train() + for res in model(dummy_input): + print("res:", np.shape(res)) + + print("-----------eval") + model.eval() + inference_out, train_out = model(dummy_input) + print("inference_out:", np.shape(inference_out)) + for o in train_out: + print("train_out:", np.shape(o)) diff --git a/components/hand_keypoints/handpose_x.py b/components/hand_keypoints/handpose_x.py new file mode 100644 index 0000000..2df2eb5 --- /dev/null +++ b/components/hand_keypoints/handpose_x.py @@ -0,0 +1,136 @@ +#-*-coding:utf-8-*- +# date:2021-03-09 +# Author: Eric.Lee +# function: handpose_x 21 keypoints 2D + +import os +import torch +import cv2 +import numpy as np +import json + +import torch +import torch.nn as nn + +import time +import math +from datetime import datetime + +from hand_keypoints.models.resnet import resnet18,resnet34,resnet50,resnet101 +from hand_keypoints.models.squeezenet import squeezenet1_1,squeezenet1_0 + +from hand_keypoints.models.resnet import resnet18,resnet34,resnet50,resnet101 +from hand_keypoints.models.squeezenet import squeezenet1_1,squeezenet1_0 +from hand_keypoints.models.shufflenetv2 import ShuffleNetV2 +from hand_keypoints.models.shufflenet import ShuffleNet +from hand_keypoints.models.mobilenetv2 import MobileNetV2 +from torchvision.models import shufflenet_v2_x1_5 ,shufflenet_v2_x1_0 , shufflenet_v2_x2_0 +from hand_keypoints.models.rexnetv1 import ReXNetV1 + + +from hand_keypoints.utils.common_utils import * + +def draw_bd_handpose_c(img_,hand_,x,y,thick = 3): + # thick = 2 + colors = [(0,215,255),(255,115,55),(5,255,55),(25,15,255),(225,15,55)] + # + cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['1']['x']+x), int(hand_['1']['y']+y)), colors[0], thick) + cv2.line(img_, (int(hand_['1']['x']+x), int(hand_['1']['y']+y)),(int(hand_['2']['x']+x), int(hand_['2']['y']+y)), colors[0], thick) + cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick) + cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick) + + cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick) + cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick) + cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick) + cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick) + + cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick) + cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick) + cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick) + cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick) + + cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick) + cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick) + cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick) + cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick) + + cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick) + cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick) + cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick) + cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick) + +# +class handpose_x_model(object): + def __init__(self, + model_path = './components/hand_keypoints/weights/ReXNetV1-size-256-wingloss102-0.1063.pth', + img_size= 256, + num_classes = 42,# 手部关键点个数 * 2 : 21*2 + model_arch = "rexnetv1", + ): + # print("handpose_x loading : ",model_path) + self.use_cuda = torch.cuda.is_available() + self.device = torch.device("cuda:0" if self.use_cuda else "cpu") # 可选的设备类型及序号 + self.img_size = img_size + #----------------------------------------------------------------------- + + if model_arch == 'resnet_50': + model_ = resnet50(num_classes = num_classes,img_size = self.img_size) + elif model_arch == 'resnet_18': + model_ = resnet18(num_classes = num_classes,img_size = self.img_size) + elif model_arch == 'resnet_34': + model_ = resnet34(num_classes = num_classes,img_size = self.img_size) + elif model_arch == 'resnet_101': + model_ = resnet101(num_classes = num_classes,img_size = self.img_size) + elif model_arch == "squeezenet1_0": + model_ = squeezenet1_0(pretrained=True, num_classes=num_classes) + elif model_arch == "squeezenet1_1": + model_ = squeezenet1_1(pretrained=True, num_classes=num_classes) + elif model_arch == "shufflenetv2": + model_ = ShuffleNetV2(ratio=1., num_classes=num_classes) + elif model_arch == "shufflenet_v2_x1_5": + model_ = shufflenet_v2_x1_5(pretrained=False,num_classes=num_classes) + elif model_arch == "shufflenet_v2_x1_0": + model_ = shufflenet_v2_x1_0(pretrained=False,num_classes=num_classes) + elif model_arch == "shufflenet_v2_x2_0": + model_ = shufflenet_v2_x2_0(pretrained=False,num_classes=num_classes) + elif model_arch == "shufflenet": + model_ = ShuffleNet(num_blocks = [2,4,2], num_classes=num_classes, groups=3) + elif model_arch == "mobilenetv2": + model_ = MobileNetV2(num_classes=num_classes) + elif model_arch == "rexnetv1": + model_ = ReXNetV1(num_classes=num_classes) + else: + print(" no support the model") + #----------------------------------------------------------------------- + model_ = model_.to(self.device) + model_.eval() # 设置为前向推断模式 + + # 加载测试模型 + if os.access(model_path,os.F_OK):# checkpoint + chkpt = torch.load(model_path, map_location=self.device) + model_.load_state_dict(chkpt) + print('handpose_x model loading : {}'.format(model_path)) + + self.model_handpose = model_ + + def predict(self, img, vis = False): + with torch.no_grad(): + + if not((img.shape[0] == self.img_size) and (img.shape[1] == self.img_size)): + img = cv2.resize(img, (self.img_size,self.img_size), interpolation = cv2.INTER_CUBIC) + + img_ = img.astype(np.float32) + img_ = (img_-128.)/256. + + img_ = img_.transpose(2, 0, 1) + img_ = torch.from_numpy(img_) + img_ = img_.unsqueeze_(0) + + if self.use_cuda: + img_ = img_.cuda() # (bs, 3, h, w) + + pre_ = self.model_handpose(img_.float()) + output = pre_.cpu().detach().numpy() + output = np.squeeze(output) + + return output diff --git a/components/hand_keypoints/models/mobilenetv2.py b/components/hand_keypoints/models/mobilenetv2.py new file mode 100644 index 0000000..7cd6062 --- /dev/null +++ b/components/hand_keypoints/models/mobilenetv2.py @@ -0,0 +1,105 @@ +"""mobilenetv2 in pytorch + + + +[1] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen + + MobileNetV2: Inverted Residuals and Linear Bottlenecks + https://arxiv.org/abs/1801.04381 +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LinearBottleNeck(nn.Module): + + def __init__(self, in_channels, out_channels, stride, t=6, class_num=100): + super().__init__() + + self.residual = nn.Sequential( + nn.Conv2d(in_channels, in_channels * t, 1), + nn.BatchNorm2d(in_channels * t), + nn.ReLU6(inplace=True), + + nn.Conv2d(in_channels * t, in_channels * t, 3, stride=stride, padding=1, groups=in_channels * t), + nn.BatchNorm2d(in_channels * t), + nn.ReLU6(inplace=True), + + nn.Conv2d(in_channels * t, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + + self.stride = stride + self.in_channels = in_channels + self.out_channels = out_channels + + def forward(self, x): + + residual = self.residual(x) + + if self.stride == 1 and self.in_channels == self.out_channels: + residual += x + + return residual + +class MobileNetV2(nn.Module): + + def __init__(self, num_classes=100,dropout_factor = 1.0): + super().__init__() + + self.pre = nn.Sequential( + nn.Conv2d(3, 32, 1, padding=1), + nn.BatchNorm2d(32), + nn.ReLU6(inplace=True) + ) + + self.stage1 = LinearBottleNeck(32, 16, 1, 1) + self.stage2 = self._make_stage(2, 16, 24, 2, 6) + self.stage3 = self._make_stage(3, 24, 32, 2, 6) + self.stage4 = self._make_stage(4, 32, 64, 2, 6) + self.stage5 = self._make_stage(3, 64, 96, 1, 6) + self.stage6 = self._make_stage(3, 96, 160, 1, 6) + self.stage7 = LinearBottleNeck(160, 320, 1, 6) + + self.conv1 = nn.Sequential( + nn.Conv2d(320, 1280, 1), + nn.BatchNorm2d(1280), + nn.ReLU6(inplace=True) + ) + + self.conv2 = nn.Conv2d(1280, num_classes, 1) + + self.dropout = nn.Dropout(dropout_factor) + + def forward(self, x): + x = self.pre(x) + x = self.stage1(x) + x = self.stage2(x) + x = self.stage3(x) + x = self.stage4(x) + x = self.stage5(x) + x = self.stage6(x) + x = self.stage7(x) + x = self.conv1(x) + x = F.adaptive_avg_pool2d(x, 1) + x = self.dropout(x) + x = self.conv2(x) + x = x.view(x.size(0), -1) + + return x + + def _make_stage(self, repeat, in_channels, out_channels, stride, t): + + layers = [] + layers.append(LinearBottleNeck(in_channels, out_channels, stride, t)) + + while repeat - 1: + layers.append(LinearBottleNeck(out_channels, out_channels, 1, t)) + repeat -= 1 + + return nn.Sequential(*layers) + +def mobilenetv2(): + return MobileNetV2() diff --git a/components/hand_keypoints/models/my_model.py b/components/hand_keypoints/models/my_model.py new file mode 100644 index 0000000..d161357 --- /dev/null +++ b/components/hand_keypoints/models/my_model.py @@ -0,0 +1,67 @@ +#-*-coding:utf-8-*- +# date:2020-08-08 +# Author: X.L.Eric +# function: my model + +import torch +import torch.nn as nn +import torch.nn.functional as F +class MY_Net(nn.Module): + def __init__(self,num_classes):# op 初始化 + super(MY_Net, self).__init__() + self.cov = nn.Conv2d(3, 32, 3) + self.relu = nn.ReLU(inplace=True) + layers1 = [] + # Conv2d : in_channels, out_channels, kernel_size, stride, padding + layers1.append(nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size = 3,stride=1,padding = 0)) + layers1.append(nn.BatchNorm2d(64,affine=True)) + layers1.append(nn.ReLU(inplace=True)) + layers1.append(nn.AvgPool2d(kernel_size=3, stride=2, padding=1)) + self.layers1 = nn.Sequential(*layers1) + layers2 = [] + layers2.append(nn.Conv2d(64, 128, 3)) + layers2.append(nn.BatchNorm2d(128,affine=True)) + layers2.append(nn.ReLU(inplace=True)) + layers2.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + self.layers2 = nn.Sequential(*layers2) + layers3 = [] + layers3.append(nn.Conv2d(128, 256, 3,stride=2)) + layers3.append(nn.BatchNorm2d(256,affine=True)) + layers3.append(nn.ReLU(inplace=True)) + layers3.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + self.layers3 = nn.Sequential(*layers3) + layers4 = [] + layers4.append(nn.Conv2d(256, 512, 3,stride=2)) + layers4.append(nn.BatchNorm2d(512,affine=True)) + layers4.append(nn.ReLU(inplace=True)) + layers4.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + layers4.append(nn.Conv2d(512, 512, 1,stride=1)) + self.layers4 = nn.Sequential(*layers4) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1))# 自适应均值池化 + self.fc = nn.Linear(in_features = 512 , out_features = num_classes)# 全连接 fc + + def forward(self, x):# 模型前向推断 + x = self.cov(x) + x = self.relu(x) + x = self.layers1(x) + x = self.layers2(x) + x = self.layers3(x) + x = self.layers4(x) + x = self.avgpool(x) + x = x.reshape(x.size(0), -1) + x = self.fc(x) + return x + +if __name__ == "__main__": + #输入批次图片(batchsize,channel,height,width):8 ,3*256*256 + dummy_input = torch.randn([8, 3, 256,256]) + model = MY_Net(num_classes = 100)# 分类数为 100 类 + print('model:\n',model)# 打印模型op + output = model(dummy_input)# 模型前向推断 + # 模型前向推断输出特征尺寸 + print('model inference feature size: ',output.size()) + print(output) + + output_ = F.softmax(output,dim = 1) + # + print(output_) diff --git a/components/hand_keypoints/models/resnet.py b/components/hand_keypoints/models/resnet.py new file mode 100644 index 0000000..eda0241 --- /dev/null +++ b/components/hand_keypoints/models/resnet.py @@ -0,0 +1,263 @@ +import torch +import torch.nn as nn +import math +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, img_size=224,dropout_factor = 1.): + self.inplanes = 64 + self.dropout_factor = dropout_factor + super(ResNet, self).__init__() + # 26 + # 586 train_sequence + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + # see this issue: https://github.com/xxradon/PytorchToCaffe/issues/16 + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + + assert img_size % 32 == 0 + pool_kernel = int(img_size / 32) + self.avgpool = nn.AvgPool2d(pool_kernel, stride=1, ceil_mode=True) + + self.dropout = nn.Dropout(self.dropout_factor) + + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + + x = self.dropout(x) + + x = self.fc(x) + + return x + + +def load_model(model, pretrained_state_dict): + model_dict = model.state_dict() + pretrained_dict = {k: v for k, v in pretrained_state_dict.items() if + k in model_dict and model_dict[k].size() == pretrained_state_dict[k].size()} + model.load_state_dict(pretrained_dict, strict=False) + if len(pretrained_dict) == 0: + print("[INFO] No params were loaded ...") + else: + for k, v in pretrained_state_dict.items(): + if k in pretrained_dict: + print("==>> Load {} {}".format(k, v.size())) + else: + print("[INFO] Skip {} {}".format(k, v.size())) + return model + + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + if pretrained: + # model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) + print("Load pretrained model from {}".format(model_urls['resnet18'])) + pretrained_state_dict = model_zoo.load_url(model_urls['resnet18']) + model = load_model(model, pretrained_state_dict) + return model + + +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + if pretrained: + # model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) + print("Load pretrained model from {}".format(model_urls['resnet34'])) + pretrained_state_dict = model_zoo.load_url(model_urls['resnet34']) + model = load_model(model, pretrained_state_dict) + return model + + +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + # model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) + print("Load pretrained model from {}".format(model_urls['resnet50'])) + pretrained_state_dict = model_zoo.load_url(model_urls['resnet50']) + model = load_model(model, pretrained_state_dict) + return model + + +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + # model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) + print("Load pretrained model from {}".format(model_urls['resnet101'])) + pretrained_state_dict = model_zoo.load_url(model_urls['resnet101']) + model = load_model(model, pretrained_state_dict) + return model + + +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) + if pretrained: + # model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) + print("Load pretrained model from {}".format(model_urls['resnet152'])) + pretrained_state_dict = model_zoo.load_url(model_urls['resnet152']) + model = load_model(model, pretrained_state_dict) + return model + +if __name__ == "__main__": + input = torch.randn([32, 3, 256,256]) + model = resnet34(False, num_classes=2, img_size=256) + output = model(input) + print(output.size()) diff --git a/components/hand_keypoints/models/resnet_50.py b/components/hand_keypoints/models/resnet_50.py new file mode 100644 index 0000000..20783e4 --- /dev/null +++ b/components/hand_keypoints/models/resnet_50.py @@ -0,0 +1,194 @@ +import torch +import torch.nn as nn + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000,dropout_factor = 1., zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=nn.BatchNorm2d): + super(ResNet, self).__init__() + if norm_layer is None: + print('BatchNorm2d') + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + + self.dropout = nn.Dropout(dropout_factor) + + self.fc = nn.Linear(512 * block.expansion, num_classes) + + + # ---------------------------------------------------------------------------------- + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + + x = self.layer2(x) + + x = self.layer3(x) + + x = self.layer4(x) + + x = self.avgpool(x) + + x = x.reshape(x.size(0), -1) + + x = self.dropout(x) + + x = self.fc(x) + + return x + +def _resnet(arch, block, layers, **kwargs): + model = ResNet(block, layers, **kwargs) + return model + +def resnet50(**kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" `_ + """ + print('Bottleneck:{}'.format(Bottleneck)) + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3],**kwargs) +if __name__ == "__main__": + dummy_input = torch.randn([32, 3, 128,128]) + num_classes = 100 + model = resnet50(num_classes = num_classes,dropout_factor=0.5) + + print(model) + output = model(dummy_input) + print(output.size()) diff --git a/components/hand_keypoints/models/rexnetv1.py b/components/hand_keypoints/models/rexnetv1.py new file mode 100644 index 0000000..664b9d8 --- /dev/null +++ b/components/hand_keypoints/models/rexnetv1.py @@ -0,0 +1,183 @@ +""" +ReXNet +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn as nn +from math import ceil + +# Memory-efficient Siwsh using torch.jit.script borrowed from the code in (https://twitter.com/jeremyphoward/status/1188251041835315200) +# Currently use memory-efficient Swish as default: +USE_MEMORY_EFFICIENT_SWISH = True + +if USE_MEMORY_EFFICIENT_SWISH: + @torch.jit.script + def swish_fwd(x): + return x.mul(torch.sigmoid(x)) + + + @torch.jit.script + def swish_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + return grad_output * (x_sigmoid * (1. + x * (1. - x_sigmoid))) + + + class SwishJitImplementation(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return swish_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return swish_bwd(x, grad_output) + + + def swish(x, inplace=False): + return SwishJitImplementation.apply(x) + +else: + def swish(x, inplace=False): + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + + +class Swish(nn.Module): + def __init__(self, inplace=True): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + + +def ConvBNAct(out, in_channels, channels, kernel=1, stride=1, pad=0, + num_group=1, active=True, relu6=False): + out.append(nn.Conv2d(in_channels, channels, kernel, + stride, pad, groups=num_group, bias=False)) + out.append(nn.BatchNorm2d(channels)) + if active: + out.append(nn.ReLU6(inplace=True) if relu6 else nn.ReLU(inplace=True)) + + +def ConvBNSwish(out, in_channels, channels, kernel=1, stride=1, pad=0, num_group=1): + out.append(nn.Conv2d(in_channels, channels, kernel, + stride, pad, groups=num_group, bias=False)) + out.append(nn.BatchNorm2d(channels)) + out.append(Swish()) + + +class SE(nn.Module): + def __init__(self, in_channels, channels, se_ratio=12): + super(SE, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Conv2d(in_channels, channels // se_ratio, kernel_size=1, padding=0), + nn.BatchNorm2d(channels // se_ratio), + nn.ReLU(inplace=True), + nn.Conv2d(channels // se_ratio, channels, kernel_size=1, padding=0), + nn.Sigmoid() + ) + + def forward(self, x): + y = self.avg_pool(x) + y = self.fc(y) + return x * y + + +class LinearBottleneck(nn.Module): + def __init__(self, in_channels, channels, t, stride, use_se=True, se_ratio=12, + **kwargs): + super(LinearBottleneck, self).__init__(**kwargs) + self.use_shortcut = stride == 1 and in_channels <= channels + self.in_channels = in_channels + self.out_channels = channels + + out = [] + if t != 1: + dw_channels = in_channels * t + ConvBNSwish(out, in_channels=in_channels, channels=dw_channels) + else: + dw_channels = in_channels + + ConvBNAct(out, in_channels=dw_channels, channels=dw_channels, kernel=3, stride=stride, pad=1, + num_group=dw_channels, active=False) + + if use_se: + out.append(SE(dw_channels, dw_channels, se_ratio)) + + out.append(nn.ReLU6()) + ConvBNAct(out, in_channels=dw_channels, channels=channels, active=False, relu6=True) + self.out = nn.Sequential(*out) + + def forward(self, x): + out = self.out(x) + if self.use_shortcut: + out[:, 0:self.in_channels] += x + + return out + + +class ReXNetV1(nn.Module): + def __init__(self, input_ch=16, final_ch=180, width_mult=1.0, depth_mult=1.0, num_classes=1000, + use_se=True, + se_ratio=12, + dropout_factor=0.2, + bn_momentum=0.9): + super(ReXNetV1, self).__init__() + + layers = [1, 2, 2, 3, 3, 5] + strides = [1, 2, 2, 2, 1, 2] + use_ses = [False, False, True, True, True, True] + + layers = [ceil(element * depth_mult) for element in layers] + strides = sum([[element] + [1] * (layers[idx] - 1) + for idx, element in enumerate(strides)], []) + if use_se: + use_ses = sum([[element] * layers[idx] for idx, element in enumerate(use_ses)], []) + else: + use_ses = [False] * sum(layers[:]) + ts = [1] * layers[0] + [6] * sum(layers[1:]) + + self.depth = sum(layers[:]) * 3 + stem_channel = 32 / width_mult if width_mult < 1.0 else 32 + inplanes = input_ch / width_mult if width_mult < 1.0 else input_ch + + features = [] + in_channels_group = [] + channels_group = [] + + # The following channel configuration is a simple instance to make each layer become an expand layer. + for i in range(self.depth // 3): + if i == 0: + in_channels_group.append(int(round(stem_channel * width_mult))) + channels_group.append(int(round(inplanes * width_mult))) + else: + in_channels_group.append(int(round(inplanes * width_mult))) + inplanes += final_ch / (self.depth // 3 * 1.0) + channels_group.append(int(round(inplanes * width_mult))) + + ConvBNSwish(features, 3, int(round(stem_channel * width_mult)), kernel=3, stride=2, pad=1) + + for block_idx, (in_c, c, t, s, se) in enumerate(zip(in_channels_group, channels_group, ts, strides, use_ses)): + features.append(LinearBottleneck(in_channels=in_c, + channels=c, + t=t, + stride=s, + use_se=se, se_ratio=se_ratio)) + + pen_channels = int(1280 * width_mult) + ConvBNSwish(features, c, pen_channels) + + features.append(nn.AdaptiveAvgPool2d(1)) + self.features = nn.Sequential(*features) + self.output = nn.Sequential( + nn.Dropout(dropout_factor), + nn.Conv2d(pen_channels, num_classes, 1, bias=True)) + + def forward(self, x): + x = self.features(x) + x = self.output(x).squeeze() + return x diff --git a/components/hand_keypoints/models/shufflenet.py b/components/hand_keypoints/models/shufflenet.py new file mode 100644 index 0000000..0cd8d2c --- /dev/null +++ b/components/hand_keypoints/models/shufflenet.py @@ -0,0 +1,254 @@ +"""shufflenet in pytorch + + + +[1] Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun. + + ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices + https://arxiv.org/abs/1707.01083v2 +""" + +from functools import partial + +import torch +import torch.nn as nn + + +class BasicConv2d(nn.Module): + + def __init__(self, input_channels, output_channels, kernel_size, **kwargs): + super().__init__() + self.conv = nn.Conv2d(input_channels, output_channels, kernel_size, **kwargs) + self.bn = nn.BatchNorm2d(output_channels) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + +class ChannelShuffle(nn.Module): + + def __init__(self, groups): + super().__init__() + self.groups = groups + + def forward(self, x): + batchsize, channels, height, width = x.data.size() + channels_per_group = int(channels / self.groups) + + #"""suppose a convolutional layer with g groups whose output has + #g x n channels; we first reshape the output channel dimension + #into (g, n)""" + x = x.view(batchsize, self.groups, channels_per_group, height, width) + + #"""transposing and then flattening it back as the input of next layer.""" + x = x.transpose(1, 2).contiguous() + x = x.view(batchsize, -1, height, width) + + return x + +class DepthwiseConv2d(nn.Module): + + def __init__(self, input_channels, output_channels, kernel_size, **kwargs): + super().__init__() + self.depthwise = nn.Sequential( + nn.Conv2d(input_channels, output_channels, kernel_size, **kwargs), + nn.BatchNorm2d(output_channels) + ) + + def forward(self, x): + return self.depthwise(x) + +class PointwiseConv2d(nn.Module): + def __init__(self, input_channels, output_channels, **kwargs): + super().__init__() + self.pointwise = nn.Sequential( + nn.Conv2d(input_channels, output_channels, 1, **kwargs), + nn.BatchNorm2d(output_channels) + ) + + def forward(self, x): + return self.pointwise(x) + +class ShuffleNetUnit(nn.Module): + + def __init__(self, input_channels, output_channels, stage, stride, groups): + super().__init__() + + #"""Similar to [9], we set the number of bottleneck channels to 1/4 + #of the output channels for each ShuffleNet unit.""" + self.bottlneck = nn.Sequential( + PointwiseConv2d( + input_channels, + int(output_channels / 4), + groups=groups + ), + nn.ReLU(inplace=True) + ) + + #"""Note that for Stage 2, we do not apply group convolution on the first pointwise + #layer because the number of input channels is relatively small.""" + if stage == 2: + self.bottlneck = nn.Sequential( + PointwiseConv2d( + input_channels, + int(output_channels / 4), + groups=groups + ), + nn.ReLU(inplace=True) + ) + + self.channel_shuffle = ChannelShuffle(groups) + + self.depthwise = DepthwiseConv2d( + int(output_channels / 4), + int(output_channels / 4), + 3, + groups=int(output_channels / 4), + stride=stride, + padding=1 + ) + + self.expand = PointwiseConv2d( + int(output_channels / 4), + output_channels, + groups=groups + ) + + self.relu = nn.ReLU(inplace=True) + self.fusion = self._add + self.shortcut = nn.Sequential() + + #"""As for the case where ShuffleNet is applied with stride, + #we simply make two modifications (see Fig 2 (c)): + #(i) add a 3 × 3 average pooling on the shortcut path; + #(ii) replace the element-wise addition with channel concatenation, + #which makes it easy to enlarge channel dimension with little extra + #computation cost. + if stride != 1 or input_channels != output_channels: + self.shortcut = nn.AvgPool2d(3, stride=2, padding=1) + + self.expand = PointwiseConv2d( + int(output_channels / 4), + output_channels - input_channels, + groups=groups + ) + + self.fusion = self._cat + + def _add(self, x, y): + return torch.add(x, y) + + def _cat(self, x, y): + return torch.cat([x, y], dim=1) + + def forward(self, x): + shortcut = self.shortcut(x) + + shuffled = self.bottlneck(x) + shuffled = self.channel_shuffle(shuffled) + shuffled = self.depthwise(shuffled) + shuffled = self.expand(shuffled) + + output = self.fusion(shortcut, shuffled) + output = self.relu(output) + + return output + +class ShuffleNet(nn.Module): + + def __init__(self, num_blocks = [2,4,2], num_classes=100, groups=3, dropout_factor = 1.0): + super().__init__() + + if groups == 1: + out_channels = [24, 144, 288, 567] + elif groups == 2: + out_channels = [24, 200, 400, 800] + elif groups == 3: + out_channels = [24, 240, 480, 960] + elif groups == 4: + out_channels = [24, 272, 544, 1088] + elif groups == 8: + out_channels = [24, 384, 768, 1536] + + self.conv1 = BasicConv2d(3, out_channels[0], 3, padding=1, stride=1) + self.input_channels = out_channels[0] + + self.stage2 = self._make_stage( + ShuffleNetUnit, + num_blocks[0], + out_channels[1], + stride=2, + stage=2, + groups=groups + ) + + self.stage3 = self._make_stage( + ShuffleNetUnit, + num_blocks[1], + out_channels[2], + stride=2, + stage=3, + groups=groups + ) + + self.stage4 = self._make_stage( + ShuffleNetUnit, + num_blocks[2], + out_channels[3], + stride=2, + stage=4, + groups=groups + ) + + self.avg = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(out_channels[3], num_classes) + self.dropout = nn.Dropout(dropout_factor) + + def forward(self, x): + x = self.conv1(x) + x = self.stage2(x) + x = self.stage3(x) + x = self.stage4(x) + x = self.avg(x) + x = x.view(x.size(0), -1) + x = self.dropout(x) + x = self.fc(x) + + return x + + def _make_stage(self, block, num_blocks, output_channels, stride, stage, groups): + """make shufflenet stage + + Args: + block: block type, shuffle unit + out_channels: output depth channel number of this stage + num_blocks: how many blocks per stage + stride: the stride of the first block of this stage + stage: stage index + groups: group number of group convolution + Return: + return a shuffle net stage + """ + strides = [stride] + [1] * (num_blocks - 1) + + stage = [] + + for stride in strides: + stage.append( + block( + self.input_channels, + output_channels, + stride=stride, + stage=stage, + groups=groups + ) + ) + self.input_channels = output_channels + + return nn.Sequential(*stage) + +def shufflenet(): + return ShuffleNet([4, 8, 4]) diff --git a/components/hand_keypoints/models/shufflenetv2.py b/components/hand_keypoints/models/shufflenetv2.py new file mode 100644 index 0000000..a94f9a7 --- /dev/null +++ b/components/hand_keypoints/models/shufflenetv2.py @@ -0,0 +1,157 @@ +"""shufflenetv2 in pytorch + + + +[1] Ningning Ma, Xiangyu Zhang, Hai-Tao Zheng, Jian Sun + + ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design + https://arxiv.org/abs/1807.11164 +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def channel_split(x, split): + """split a tensor into two pieces along channel dimension + Args: + x: input tensor + split:(int) channel size for each pieces + """ + assert x.size(1) == split * 2 + return torch.split(x, split, dim=1) + +def channel_shuffle(x, groups): + """channel shuffle operation + Args: + x: input tensor + groups: input branch number + """ + + batch_size, channels, height, width = x.size() + channels_per_group = int(channels // groups) + + x = x.view(batch_size, groups, channels_per_group, height, width) + x = x.transpose(1, 2).contiguous() + x = x.view(batch_size, -1, height, width) + + return x + +class ShuffleUnit(nn.Module): + + def __init__(self, in_channels, out_channels, stride): + super().__init__() + + self.stride = stride + self.in_channels = in_channels + self.out_channels = out_channels + + if stride != 1 or in_channels != out_channels: + self.residual = nn.Sequential( + nn.Conv2d(in_channels, in_channels, 1), + nn.BatchNorm2d(in_channels), + nn.ReLU(inplace=True), + nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels), + nn.BatchNorm2d(in_channels), + nn.Conv2d(in_channels, int(out_channels / 2), 1), + nn.BatchNorm2d(int(out_channels / 2)), + nn.ReLU(inplace=True) + ) + + self.shortcut = nn.Sequential( + nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels), + nn.BatchNorm2d(in_channels), + nn.Conv2d(in_channels, int(out_channels / 2), 1), + nn.BatchNorm2d(int(out_channels / 2)), + nn.ReLU(inplace=True) + ) + else: + self.shortcut = nn.Sequential() + + in_channels = int(in_channels / 2) + self.residual = nn.Sequential( + nn.Conv2d(in_channels, in_channels, 1), + nn.BatchNorm2d(in_channels), + nn.ReLU(inplace=True), + nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels), + nn.BatchNorm2d(in_channels), + nn.Conv2d(in_channels, in_channels, 1), + nn.BatchNorm2d(in_channels), + nn.ReLU(inplace=True) + ) + + + def forward(self, x): + + if self.stride == 1 and self.out_channels == self.in_channels: + shortcut, residual = channel_split(x, int(self.in_channels / 2)) + else: + shortcut = x + residual = x + + shortcut = self.shortcut(shortcut) + residual = self.residual(residual) + x = torch.cat([shortcut, residual], dim=1) + x = channel_shuffle(x, 2) + + return x + +class ShuffleNetV2(nn.Module): + + def __init__(self, ratio=1., num_classes=100, dropout_factor = 1.0): + super().__init__() + if ratio == 0.5: + out_channels = [48, 96, 192, 1024] + elif ratio == 1: + out_channels = [116, 232, 464, 1024] + elif ratio == 1.5: + out_channels = [176, 352, 704, 1024] + elif ratio == 2: + out_channels = [244, 488, 976, 2048] + else: + ValueError('unsupported ratio number') + + self.pre = nn.Sequential( + nn.Conv2d(3, 24, 3, padding=1), + nn.BatchNorm2d(24) + ) + + self.stage2 = self._make_stage(24, out_channels[0], 3) + self.stage3 = self._make_stage(out_channels[0], out_channels[1], 7) + self.stage4 = self._make_stage(out_channels[1], out_channels[2], 3) + self.conv5 = nn.Sequential( + nn.Conv2d(out_channels[2], out_channels[3], 1), + nn.BatchNorm2d(out_channels[3]), + nn.ReLU(inplace=True) + ) + + self.fc = nn.Linear(out_channels[3], num_classes) + + self.dropout = nn.Dropout(dropout_factor) + + def forward(self, x): + x = self.pre(x) + x = self.stage2(x) + x = self.stage3(x) + x = self.stage4(x) + x = self.conv5(x) + x = F.adaptive_avg_pool2d(x, 1) + x = x.view(x.size(0), -1) + x = self.dropout(x) + x = self.fc(x) + + return x + + def _make_stage(self, in_channels, out_channels, repeat): + layers = [] + layers.append(ShuffleUnit(in_channels, out_channels, 2)) + + while repeat: + layers.append(ShuffleUnit(out_channels, out_channels, 1)) + repeat -= 1 + + return nn.Sequential(*layers) + +def shufflenetv2(): + return ShuffleNetV2() diff --git a/components/hand_keypoints/models/squeezenet.py b/components/hand_keypoints/models/squeezenet.py new file mode 100644 index 0000000..377fe4f --- /dev/null +++ b/components/hand_keypoints/models/squeezenet.py @@ -0,0 +1,153 @@ +import math +import numpy as np +import torch +import torch.nn as nn +import torch.nn.init as init +import torch.utils.model_zoo as model_zoo + + +__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1'] + + +model_urls = { + 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', + 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', +} + + +class Fire(nn.Module): + + def __init__(self, inplanes, squeeze_planes, + expand1x1_planes, expand3x3_planes): + super(Fire, self).__init__() + self.inplanes = inplanes + self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) + self.squeeze_activation = nn.ReLU(inplace=True) + self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, + kernel_size=1) + self.expand1x1_activation = nn.ReLU(inplace=True) + self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, + kernel_size=3, padding=1) + self.expand3x3_activation = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.squeeze_activation(self.squeeze(x)) + return torch.cat([ + self.expand1x1_activation(self.expand1x1(x)), + self.expand3x3_activation(self.expand3x3(x)) + ], 1) + + +class SqueezeNet(nn.Module): + + def __init__(self, version=1.0, num_classes=1000,dropout_factor = 1.): + super(SqueezeNet, self).__init__() + if version not in [1.0, 1.1]: + raise ValueError("Unsupported SqueezeNet version {version}:" + "1.0 or 1.1 expected".format(version=version)) + self.num_classes = num_classes + if version == 1.0: + self.features = nn.Sequential( + nn.Conv2d(3, 96, kernel_size=7, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(96, 16, 64, 64), + Fire(128, 16, 64, 64), + Fire(128, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(256, 32, 128, 128), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(512, 64, 256, 256), + ) + else: + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=3, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(64, 16, 64, 64), + Fire(128, 16, 64, 64), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(128, 32, 128, 128), + Fire(256, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + Fire(512, 64, 256, 256), + ) + # Final convolution is initialized differently form the rest + final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) + self.classifier = nn.Sequential( + nn.Dropout(p=dropout_factor), + final_conv, + nn.ReLU(inplace=True), + nn.AdaptiveAvgPool2d(1) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + if m is final_conv: + init.normal(m.weight.data, mean=0.0, std=0.01) + else: + init.kaiming_uniform(m.weight.data) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + x = self.features(x) + # print("features(x):", x.size()) + x = self.classifier(x) + # print("features(x):", x.size()) + return x.view(x.size(0), self.num_classes) + + +def squeezenet1_0(pretrained=False, **kwargs): + r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level + accuracy with 50x fewer parameters and <0.5MB model size" + `_ paper. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = SqueezeNet(version=1.0, **kwargs) + model_dict = model.state_dict() + if pretrained: + pretrained_state_dict = model_zoo.load_url(model_urls['squeezenet1_0']) + pretrained_dict = {k: v for k, v in pretrained_state_dict.items() if + k in model_dict and model_dict[k].size() == pretrained_state_dict[k].size()} + model.load_state_dict(pretrained_dict,strict=False) + return model + + +def squeezenet1_1(pretrained=False, **kwargs): + r"""SqueezeNet 1.1 model from the `official SqueezeNet repo + `_. + SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters + than SqueezeNet 1.0, without sacrificing accuracy. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = SqueezeNet(version=1.1, **kwargs) + model_dict = model.state_dict() + if pretrained: + pretrained_state_dict = model_zoo.load_url(model_urls['squeezenet1_0']) + pretrained_dict = {k: v for k, v in pretrained_state_dict.items() if + k in model_dict and model_dict[k].size() == pretrained_state_dict[k].size()} + model.load_state_dict(pretrained_dict,strict=False) + return model + + +if __name__ == "__main__": + from thop import profile + dummy = torch.from_numpy(np.random.random([16, 3, 256, 256]).astype(np.float32)) + model = squeezenet1_0(pretrained=True, num_classes=42,dropout_factor = 0.5) + print(model) + flops, params = profile(model, inputs=(dummy, )) + model.eval() + output = model(dummy) + print(output.size()) + print("flops: {}, params: {}".format(flops, params)) diff --git a/components/hand_keypoints/utils/common_utils.py b/components/hand_keypoints/utils/common_utils.py new file mode 100644 index 0000000..1e5b172 --- /dev/null +++ b/components/hand_keypoints/utils/common_utils.py @@ -0,0 +1,132 @@ +#-*-coding:utf-8-*- +# date:2020-04-11 +# Author: Eric.Lee +# function: common utils + +import os +import shutil +import cv2 +import numpy as np +import json + +def mkdir_(path, flag_rm=False): + if os.path.exists(path): + if flag_rm == True: + shutil.rmtree(path) + os.mkdir(path) + print('remove {} done ~ '.format(path)) + else: + os.mkdir(path) + +def plot_box(bbox, img, color=None, label=None, line_thickness=None): + tl = line_thickness or round(0.002 * max(img.shape[0:2])) + 1 + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])) + cv2.rectangle(img, c1, c2, color, thickness=tl)# 目标的bbox + if label: + tf = max(tl - 2, 1) + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] # label size + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 # 字体的bbox + cv2.rectangle(img, c1, c2, color, -1) # label 矩形填充 + # 文本绘制 + cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 4, [225, 255, 255],thickness=tf, lineType=cv2.LINE_AA) + +class JSON_Encoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + else: + return super(JSON_Encoder, self).default(obj) + +def draw_landmarks(img,output,draw_circle): + img_width = img.shape[1] + img_height = img.shape[0] + dict_landmarks = {} + for i in range(int(output.shape[0]/2)): + x = output[i*2+0]*float(img_width) + y = output[i*2+1]*float(img_height) + if 41>= i >=33: + if 'left_eyebrow' not in dict_landmarks.keys(): + dict_landmarks['left_eyebrow'] = [] + dict_landmarks['left_eyebrow'].append([int(x),int(y),(0,255,0)]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (0,255,0),-1) + elif 50>= i >=42: + if 'right_eyebrow' not in dict_landmarks.keys(): + dict_landmarks['right_eyebrow'] = [] + dict_landmarks['right_eyebrow'].append([int(x),int(y),(0,255,0)]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (0,255,0),-1) + elif 67>= i >=60: + if 'left_eye' not in dict_landmarks.keys(): + dict_landmarks['left_eye'] = [] + dict_landmarks['left_eye'].append([int(x),int(y),(255,0,255)]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (255,0,255),-1) + elif 75>= i >=68: + if 'right_eye' not in dict_landmarks.keys(): + dict_landmarks['right_eye'] = [] + dict_landmarks['right_eye'].append([int(x),int(y),(255,0,255)]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (255,0,255),-1) + elif 97>= i >=96: + cv2.circle(img, (int(x),int(y)), 2, (0,0,255),-1) + elif 54>= i >=51: + if 'bridge_nose' not in dict_landmarks.keys(): + dict_landmarks['bridge_nose'] = [] + dict_landmarks['bridge_nose'].append([int(x),int(y),(0,170,255)]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (0,170,255),-1) + elif 32>= i >=0: + if 'basin' not in dict_landmarks.keys(): + dict_landmarks['basin'] = [] + dict_landmarks['basin'].append([int(x),int(y),(255,30,30)]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (255,30,30),-1) + elif 59>= i >=55: + if 'wing_nose' not in dict_landmarks.keys(): + dict_landmarks['wing_nose'] = [] + dict_landmarks['wing_nose'].append([int(x),int(y),(0,255,255)]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (0,255,255),-1) + elif 87>= i >=76: + if 'out_lip' not in dict_landmarks.keys(): + dict_landmarks['out_lip'] = [] + dict_landmarks['out_lip'].append([int(x),int(y),(255,255,0)]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (255,255,0),-1) + elif 95>= i >=88: + if 'in_lip' not in dict_landmarks.keys(): + dict_landmarks['in_lip'] = [] + dict_landmarks['in_lip'].append([int(x),int(y),(50,220,255)]) + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (50,220,255),-1) + else: + if draw_circle: + cv2.circle(img, (int(x),int(y)), 2, (255,0,255),-1) + + return dict_landmarks + +def draw_contour(image,dict): + for key in dict.keys(): + # print(key) + _,_,color = dict[key][0] + + if 'basin' == key or 'wing_nose' == key: + pts = np.array([[dict[key][i][0],dict[key][i][1]] for i in range(len(dict[key]))],np.int32) + # print(pts) + cv2.polylines(image,[pts],False,color) + + else: + points_array = np.zeros((1,len(dict[key]),2),dtype = np.int32) + for i in range(len(dict[key])): + x,y,_ = dict[key][i] + points_array[0,i,0] = x + points_array[0,i,1] = y + + # cv2.fillPoly(image, points_array, color) + cv2.drawContours(image,points_array,-1,color,thickness=1) diff --git a/components/hand_keypoints/utils/model_utils.py b/components/hand_keypoints/utils/model_utils.py new file mode 100644 index 0000000..48cc3d3 --- /dev/null +++ b/components/hand_keypoints/utils/model_utils.py @@ -0,0 +1,61 @@ +#-*-coding:utf-8-*- +# date:2020-04-11 +# Author: Eric.Lee +# function: model utils + +import os +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import random + +def get_acc(output, label): + total = output.shape[0] + _, pred_label = output.max(1) + num_correct = (pred_label == label).sum().item() + return num_correct / float(total) + +def set_learning_rate(optimizer, lr): + for param_group in optimizer.param_groups: + param_group['lr'] = lr + +def set_seed(seed = 666): + np.random.seed(seed) + random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + cudnn.deterministic = True + +def split_trainval_datasets(ops): + print(' --------------->>> split_trainval_datasets ') + train_split_datasets = [] + train_split_datasets_label = [] + + val_split_datasets = [] + val_split_datasets_label = [] + for idx,doc in enumerate(sorted(os.listdir(ops.train_path), key=lambda x:int(x.split('.')[0]), reverse=False)): + # print(' %s label is %s \n'%(doc,idx)) + + data_list = os.listdir(ops.train_path+doc) + random.shuffle(data_list) + + cal_split_num = int(len(data_list)*ops.val_factor) + + for i,file in enumerate(data_list): + if '.jpg' in file: + if i < cal_split_num: + val_split_datasets.append(ops.train_path+doc + '/' + file) + val_split_datasets_label.append(idx) + else: + train_split_datasets.append(ops.train_path+doc + '/' + file) + train_split_datasets_label.append(idx) + + print(ops.train_path+doc + '/' + file,idx) + + print('\n') + print('train_split_datasets len {}'.format(len(train_split_datasets))) + print('val_split_datasets len {}'.format(len(val_split_datasets))) + + return train_split_datasets,train_split_datasets_label,val_split_datasets,val_split_datasets_label diff --git a/lib/hand_lib/cfg/handpose.cfg b/lib/hand_lib/cfg/handpose.cfg new file mode 100644 index 0000000..7ae9d41 --- /dev/null +++ b/lib/hand_lib/cfg/handpose.cfg @@ -0,0 +1,11 @@ +detect_model_path=./components/hand_detect/weights/latest_416-2021-02-19.pt +detect_model_arch=yolo_v3 +detect_conf_thres=0.5 +detect_nms_thres=0.45 + +handpose_x_model_path=./components/hand_keypoints/weights/ReXNetV1-size-256-wingloss102-0.1063.pth +handpose_x_model_arch=rexnetv1 + +camera_id = 0 +vis_gesture_lines = True +charge_cycle_step = 32 diff --git a/lib/hand_lib/cores/hand_pnp.py b/lib/hand_lib/cores/hand_pnp.py new file mode 100644 index 0000000..48a2450 --- /dev/null +++ b/lib/hand_lib/cores/hand_pnp.py @@ -0,0 +1,164 @@ +#-*-coding:utf-8-*- +''' + DpCas-Light +|||| ||||| |||| || ||||||| +|| || || || || || |||| || || +|| || || || || || || || || +|| || || || || ||====|| |||||| +|| || ||||| || || ||======|| || +|| || || || || || || || || +|||| || |||| || || ||||||| + +/--------------------- HandPose_X ---------------------/ +''' +# date:2019-12-10 +# Author: Eric.Lee +# function: handpose :rotation & translation + +import cv2 +import numpy as np +# 人脸外轮廓 +def get_face_outline(img_crop,face_crop_region,obj_crop_points,face_w,face_h): + face_mask = np.zeros((1,27,2),dtype = np.int32) + for m in range(obj_crop_points.shape[0]): + if m <=16: + x = int(face_crop_region[0]+obj_crop_points[m][0]*face_w) + y = int(face_crop_region[1]+obj_crop_points[m][1]*face_h) + # face_mask.append((x,y)) + face_mask[0,m,0]=x + face_mask[0,m,1]=y + + for k in range(16,26): + m = 42-k + x = int(face_crop_region[0]+obj_crop_points[m][0]*face_w) + y = int(face_crop_region[1]+obj_crop_points[m][1]*face_h) + # face_mask.append((x,y)) + face_mask[0,k+1,0]=x + face_mask[0,k+1,1]=y + # print(x,y) + return face_mask + +# 人脸公共模型三维坐标 +object_pts = np.float32([ + [0., 0.4,0.],#掌心 + [0., 5.,0.],#hand 根部 + # [-2, 2.5,0.],#thumb 第一指节 + # [-4, 0.5,0.],#thumb 第二指节 + [-2.7, -4.5, 0.],# index 根部 + [0., -5., 0.],# middle 根部 + [2.6, -4., 0.], # ring 根部 + [5.2, -3., 0.],# pink 根部 + ] + ) + +# object_pts = np.float32([[-2.5, -7.45, 0.5],# pink 根部 +# +# [-1.2, -7.45, 0.5], # ring 根部 +# +# +# [1.2, -7.5, 0.5],# middle 根部 +# +# [2.5, -7.45, 0.5],# index 根部 +# [4.2, -3.45, 0.5],# thumb 第二指节 +# [2.5, -2.0, 0.5],# thumb 根部 +# [0.00, -0.0,0.5],#hand 根部 +# ] +# ) + +# xyz 立体矩形框 +# reprojectsrc = np.float32([[3.0, 11.0, 2.0], +# [3.0, 11.0, -4.0], +# [3.0, -7.0, -4.0], +# [3.0, -7.0, 2.0], +# [-3.0, 11.0, 2.0], +# [-3.0, 11.0, -4.0], +# [-3.0, -7.0, -4.0], +# [-3.0, -7.0, 2.0]]) + +reprojectsrc = np.float32([[5.0, 8.0, 2.0], + [5.0, 8.0, -2.0], + [5.0, -8.0, -2.0], + [5.0, -8.0, 2.0], + [-5.0, 8.0, 2.0], + [-5.0, 8.0, -2.0], + [-5.0, -8.0, -2.0], + [-5.0, -8.0, 2.0]]) + +# reprojectsrc = np.float32([[6.0, 4.0, 2.0], +# [6.0, 4.0, -4.0], +# [6.0, -3.0, -4.0], +# [6.0, -3.0, 2.0], +# [-6.0, 4.0, 2.0], +# [-6.0, 4.0, -4.0], +# [-6.0, -3.0, -4.0], +# [-6.0, -3.0, 2.0]]) + +# reprojectsrc = np.float32([[6.0, 6.0, 6.0], +# [6.0, 6.0, -6.0], +# [6.0, -6.0, -6.0], +# [6.0, -6.0, 6.0], +# [-6.0, 6.0, 6.0], +# [-6.0, 6.0, -6.0], +# [-6.0, -6.0, -6.0], +# [-6.0, -6.0, 6.0]]) + +# 立体矩形框连线,连接组合 +line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0], + [4, 5], [5, 6], [6, 7], [7, 4], + [0, 4], [1, 5], [2, 6], [3, 7]] + + +def get_hand_pose(shape,img,vis = True): + h,w,_=img.shape + K = [w, 0.0, w//2, + 0.0, w, h//2, + 0.0, 0.0, 1.0] + # Assuming no lens distortion + D = [0, 0, 0.0, 0.0, 0] + + cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)# 相机矩阵 + # dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)#相机畸变矩阵,默认无畸变 + dist_coeffs = np.float32([0.0, 0.0, 0.0, 0.0, 0.0]) + # 选取的人脸关键点的二维图像坐标 + # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36], + # shape[39], shape[42], shape[45], + # shape[27],shape[31], shape[35],shape[30],shape[33]]) + + image_pts = np.float32([shape[0], shape[1], shape[2], shape[3], shape[4], shape[5] + ] + ) + + # PNP 计算图像二维和三维实际关系,获得旋转和偏移矩阵 + _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs) + # _, rotation_vec, translation_vec = cv2.solvePnPRansac(object_pts, image_pts, cam_matrix, dist_coeffs) + + + # print("translation_vec:",translation_vec) + #print('translation_vec : {}'.format(translation_vec)) + + # 映射矩形框 + reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,dist_coeffs) + + reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2))) + + # calc euler angle + rotation_mat, _ = cv2.Rodrigues(rotation_vec)#旋转向量转为旋转矩阵 + pose_mat = cv2.hconcat((rotation_mat, translation_vec))# 拼接操作 旋转 + 偏移 + _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)#欧拉角估计 + + if vis: + for i,line_pair in enumerate(line_pairs):# 显示立体矩形框 + x1 = int(reprojectdst[line_pair[0]][0]) + y1 = int(reprojectdst[line_pair[0]][1]) + + x2 = int(reprojectdst[line_pair[1]][0]) + y2 = int(reprojectdst[line_pair[1]][1]) + + if line_pair[0] in [0,3,4,7] and line_pair[1] in [0,3,4,7]: + cv2.line(img,(x1,y1),(x2,y2),(255,0,0),2) + elif line_pair[0] in [1,2,5,6] and line_pair[1] in [1,2,5,6]: + cv2.line(img,(x1,y1),(x2,y2),(250,150,0),2) + else: + cv2.line(img,(x1,y1),(x2,y2),(0,90,255),2) + + return reprojectdst, euler_angle,translation_vec diff --git a/lib/hand_lib/cores/handpose_fuction.py b/lib/hand_lib/cores/handpose_fuction.py new file mode 100644 index 0000000..275d1c5 --- /dev/null +++ b/lib/hand_lib/cores/handpose_fuction.py @@ -0,0 +1,317 @@ +#-*-coding:utf-8-*- +''' +DpCas-Light +|||| |||| |||| || |||| +|| || || || || || |||| || || +|| || || || || || || || || +|| || || || || ||====|| |||| +|| || |||| || || ||======|| || +|| || || || || || || || || +|||| || |||| || || |||| + +/------------------ HandPose_X ------------------/ +''' +# date:2021-03-09 +# Author: Eric.Lee +# function: pipline + +import cv2 +import numpy as np +from hand_keypoints.handpose_x import handpose_x_model,draw_bd_handpose_c +import math +from cores.tracking_utils import tracking_bbox +from cores.hand_pnp import get_hand_pose +import numpy as np +''' + 求解二维向量的角度 +''' +def vector_2d_angle(v1,v2): + v1_x=v1[0] + v1_y=v1[1] + v2_x=v2[0] + v2_y=v2[1] + try: + angle_=math.degrees(math.acos((v1_x*v2_x+v1_y*v2_y)/(((v1_x**2+v1_y**2)**0.5)*((v2_x**2+v2_y**2)**0.5)))) + except: + angle_ =65535. + if angle_ > 180.: + angle_ = 65535. + return angle_ +''' + 获取对应手相关向量的二维角度 +''' +def hand_angle(hand_,x=0,y=0): + angle_list = [] + #---------------------------- thumb 大拇指角度 + angle_ = vector_2d_angle( + ((int(hand_['0']['x']+x)- int(hand_['2']['x']+x)),(int(hand_['0']['y']+y)-int(hand_['2']['y']+y))), + ((int(hand_['3']['x']+x)- int(hand_['4']['x']+x)),(int(hand_['3']['y']+y)- int(hand_['4']['y']+y))) + ) + angle_list.append(angle_) + #---------------------------- index 食指角度 + angle_ = vector_2d_angle( + ((int(hand_['0']['x']+x)-int(hand_['6']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['6']['y']+y))), + ((int(hand_['7']['x']+x)- int(hand_['8']['x']+x)),(int(hand_['7']['y']+y)- int(hand_['8']['y']+y))) + ) + angle_list.append(angle_) + #---------------------------- middle 中指角度 + angle_ = vector_2d_angle( + ((int(hand_['0']['x']+x)- int(hand_['10']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['10']['y']+y))), + ((int(hand_['11']['x']+x)- int(hand_['12']['x']+x)),(int(hand_['11']['y']+y)- int(hand_['12']['y']+y))) + ) + angle_list.append(angle_) + #---------------------------- ring 无名指角度 + angle_ = vector_2d_angle( + ((int(hand_['0']['x']+x)- int(hand_['14']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['14']['y']+y))), + ((int(hand_['15']['x']+x)- int(hand_['16']['x']+x)),(int(hand_['15']['y']+y)- int(hand_['16']['y']+y))) + ) + angle_list.append(angle_) + #---------------------------- pink 小拇指角度 + angle_ = vector_2d_angle( + ((int(hand_['0']['x']+x)- int(hand_['18']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['18']['y']+y))), + ((int(hand_['19']['x']+x)- int(hand_['20']['x']+x)),(int(hand_['19']['y']+y)- int(hand_['20']['y']+y))) + ) + angle_list.append(angle_) + + return angle_list +''' + # 二维约束的方法定义手势,由于受限于没有大量的静态手势数据集原因 + # fist five gun love one six three thumbup yeah + # finger id: thumb index middle ring pink +''' +def h_gesture(img,angle_list): + thr_angle = 65. + thr_angle_thumb = 53. + thr_angle_s = 49. + gesture_str = None + if 65535. not in angle_list: + if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "fist" + elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "gun" + elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "one" + elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): + gesture_str = "three" + elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "thumbUp" + elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "yeah" + + return gesture_str + +#------------------------------------- +''' + 手部跟踪算法:采用边界框的IOU方式 +''' +def hand_tracking(data,hands_dict,track_index): + if data is None: + hands_dict = {} + track_index = 0 + hands_dict,track_index = tracking_bbox(data,hands_dict,track_index) # 目标跟踪 + return hands_dict,track_index + +#------------------------------------- +''' + DpCas-Light + /------------------ HandPose_X ------------------/ + 1) 手的21关键点回归检测 + 2) 食指和大拇指的捏和放开判断,即点击(click)判断 +''' +def handpose_track_keypoints21_pipeline(img,hands_dict,hands_click_dict,track_index,algo_img = None,handpose_model = None,gesture_model = None, icon=None,vis = False,dst_thr = 35,angle_thr = 16.): + + hands_list = [] + + if algo_img is not None: + + for idx,id_ in enumerate(sorted(hands_dict.keys(), key=lambda x:x, reverse=False)): + + x_min,y_min,x_max,y_max,score,iou_,cnt_,ui_cnt = hands_dict[id_] + + cv2.putText(img, 'ID {}'.format(id_), (int(x_min+2),int(y_min+15)),cv2.FONT_HERSHEY_COMPLEX, 0.45, (255, 0, 0),5) + cv2.putText(img, 'ID {}'.format(id_), (int(x_min+2),int(y_min+15)),cv2.FONT_HERSHEY_COMPLEX, 0.45, (173,255,73)) + + + # x_min,y_min,x_max,y_max,score = bbox + w_ = max(abs(x_max-x_min),abs(y_max-y_min)) + if w_< 60: + continue + w_ = w_*1.26 + + x_mid = (x_max+x_min)/2 + y_mid = (y_max+y_min)/2 + + x1,y1,x2,y2 = int(x_mid-w_/2),int(y_mid-w_/2),int(x_mid+w_/2),int(y_mid+w_/2) + + x1 = np.clip(x1,0,img.shape[1]-1) + x2 = np.clip(x2,0,img.shape[1]-1) + + y1 = np.clip(y1,0,img.shape[0]-1) + y2 = np.clip(y2,0,img.shape[0]-1) + + bbox_ = x1,y1,x2,y2 + gesture_name = None + pts_ = handpose_model.predict(algo_img[y1:y2,x1:x2,:]) + + plam_list = [] + pts_hand = {} + for ptk in range(int(pts_.shape[0]/2)): + xh = (pts_[ptk*2+0]*float(x2-x1)) + yh = (pts_[ptk*2+1]*float(y2-y1)) + pts_hand[str(ptk)] = { + "x":xh, + "y":yh, + } + if ptk in [0,1,5,9,13,17]: + plam_list.append((xh+x1,yh+y1)) + if ptk == 0: #手掌根部 + hand_root_ = int(xh+x1),int(yh+y1) + if ptk == 4: # 大拇指 + thumb_ = int(xh+x1),int(yh+y1) + if ptk == 8: # 食指 + index_ = int(xh+x1),int(yh+y1) + if vis: + if ptk == 0:# 绘制腕关节点 + cv2.circle(img, (int(xh+x1),int(yh+y1)), 9, (250,60,255),-1) + cv2.circle(img, (int(xh+x1),int(yh+y1)), 5, (20,180,255),-1) + cv2.circle(img, (int(xh+x1),int(yh+y1)), 4, (255,50,60),-1) + cv2.circle(img, (int(xh+x1),int(yh+y1)), 3, (25,160,255),-1) + + # 计算食指和大拇指中心坐标 + choose_pt = (int((index_[0]+thumb_[0])/2),int((index_[1]+thumb_[1])/2)) + # 计算掌心 + plam_list = np.array(plam_list) + plam_center = (np.mean(plam_list[:,0]),np.mean(plam_list[:,1])) + + # 绘制掌心坐标圆 + if vis: + cv2.circle(img, (int(plam_center[0]),int(plam_center[1])), 12, (25,160,255),9) + cv2.circle(img, (int(plam_center[0]),int(plam_center[1])), 12, (255,190,30),2) + + # 计算食指大拇指的距离 + dst = np.sqrt(np.square(thumb_[0]-index_[0]) +np.square(thumb_[1]-index_[1])) + # 计算大拇指和手指相对手掌根部的角度: + angle_ = vector_2d_angle((thumb_[0]-hand_root_[0],thumb_[1]-hand_root_[1]),(index_[0]-hand_root_[0],index_[1]-hand_root_[1])) + # 判断手的点击click状态,即大拇指和食指是否捏合 + click_state = False + if dst0) and ((y2_-y1_)>0): + img_reco_crop = cv2.resize(algo_img[y1_:y2_,x1_:x2_,:], (130,130)) #待识别区域块 + print("------------------------>>> start object_recognize_model ") + + if img_reco_crop is not None: # 绘制识别区域在左下角 + h,w,_ = img.shape + img[(h-131):(h-1),(w-131):(w-1),:] = img_reco_crop + cv2.rectangle(img, (w-131,h-131), (w-1,h-1), (225,66,66), 5) + #----------------------------------------- + info_dict["double_en_pts"] = True + + cv2.rectangle(img, (x1_,y1_), (x2_,y2_), (225,255,62), 5) + cv2.rectangle(img, (x1_,y1_), (x2_,y2_), (100,180,255), 2) + cv2.putText(img, ' recognize{}'.format(""), (x1_,y1_),cv2.FONT_HERSHEY_COMPLEX, 0.65, (255, 0, 0),5) + cv2.putText(img, ' recognize{}'.format(""), (x1_,y1_),cv2.FONT_HERSHEY_COMPLEX, 0.65, (0,33,255),1) + + else: + info_dict["double_en_pts"] = False + return img_reco_crop + +''' + 判断各手的click状态是否稳定(点击稳定充电环),即click是否持续一定阈值 + 注意:charge_cycle_step 充电步长越大,触发时间越短 +''' +def judge_click_stabel(img,handpose_list,charge_cycle_step = 32): + flag_click_stable = True + for i in range(len(handpose_list)): + _,_,_,dict_ = handpose_list[i] + id_ = dict_["id"] + click_cnt_ = dict_["click_cnt"] + pt_ = dict_["choose_pt"] + if click_cnt_ > 0: + # print("double_en_pts --->>> id : {}, click_cnt : <{}> , pt : {}".format(id_,click_cnt_,pt_)) + # 绘制稳定充电环 + # 充电环时间控制 + charge_cycle_step = charge_cycle_step # 充电步长越大,触发时间越短 + fill_cnt = int(click_cnt_*charge_cycle_step) + if fill_cnt < 360: + cv2.ellipse(img,pt_,(16,16),0,0,fill_cnt,(255,255,0),2) + else: + cv2.ellipse(img,pt_,(16,16),0,0,fill_cnt,(0,150,255),4) + # 充电环未充满,置为 False + if fill_cnt<360: + flag_click_stable = False + else: + flag_click_stable = False + return flag_click_stable + +''' + 绘制手click 状态时的大拇指和食指中心坐标点轨迹 +''' +def draw_click_lines(img,gesture_lines_dict,vis = False): + # 绘制点击使能后的轨迹 + if vis : + for id_ in gesture_lines_dict.keys(): + if len(gesture_lines_dict[id_]["pts"]) >=2: + for i in range(len(gesture_lines_dict[id_]["pts"])-1): + pt1 = gesture_lines_dict[id_]["pts"][i] + pt2 = gesture_lines_dict[id_]["pts"][i+1] + cv2.line(img,pt1,pt2,gesture_lines_dict[id_]["line_color"],2,cv2.LINE_AA) diff --git a/lib/hand_lib/cores/tracking_utils.py b/lib/hand_lib/cores/tracking_utils.py new file mode 100644 index 0000000..641322f --- /dev/null +++ b/lib/hand_lib/cores/tracking_utils.py @@ -0,0 +1,89 @@ +#-*-coding:utf-8-*- +''' + DpCas-Light +|||| ||||| |||| || ||||||| +|| || || || || || |||| || || +|| || || || || || || || || +|| || || || || ||====|| |||||| +|| || ||||| || || ||======|| || +|| || || || || || || || || +|||| || |||| || || ||||||| + +/--------------------- HandPose_X ---------------------/ +''' +import copy +def compute_iou_tk(rec1, rec2): + """ + computing IoU + :param rec1: (y0, x0, y1, x1), which reflects + (top, left, bottom, right) + :param rec2: (y0, x0, y1, x1) + :return: scala value of IoU + """ + # computing area of each rectangles + + S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1]) + S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1]) + + # computing the sum_area + sum_area = S_rec1 + S_rec2 + + # find the each edge of intersect rectangle + left_line = max(rec1[1], rec2[1]) + right_line = min(rec1[3], rec2[3]) + top_line = max(rec1[0], rec2[0]) + bottom_line = min(rec1[2], rec2[2]) + + # judge if there is an intersect + if left_line >= right_line or top_line >= bottom_line: + return 0. + else: + intersect = (right_line - left_line) * (bottom_line - top_line) + return (intersect / (sum_area - intersect)) * 1.0 + +def tracking_bbox(data,hand_dict,index,iou_thr = 0.5): + + track_index = index + reg_dict = {} + Flag_ = True if hand_dict else False + if Flag_ == False: + # print("------------------->>. False") + for bbox in data: + x_min,y_min,x_max,y_max,score = bbox + reg_dict[track_index] = (x_min,y_min,x_max,y_max,score,0.,1,1) + track_index += 1 + + if track_index >= 65535: + track_index = 0 + else: + # print("------------------->>. True ") + for bbox in data: + xa0,ya0,xa1,ya1,score = bbox + is_track = False + for k_ in hand_dict.keys(): + xb0,yb0,xb1,yb1,_,_,cnt_,bbox_stanbel_cnt = hand_dict[k_] + + iou_ = compute_iou_tk((ya0,xa0,ya1,xa1),(yb0,xb0,yb1,xb1)) + # print((ya0,xa0,ya1,xa1),(yb0,xb0,yb1,xb1)) + # print("iou : ",iou_) + if iou_ > iou_thr: # 跟踪成功目标 + UI_CNT = 1 + if iou_ > 0.888: + UI_CNT = bbox_stanbel_cnt + 1 + reg_dict[k_] = (xa0,ya0,xa1,ya1,score,iou_,cnt_ + 1,UI_CNT) + is_track = True + # print("is_track : " ,cnt_ + 1) + if is_track == False: # 新目标 + reg_dict[track_index] = (xa0,ya0,xa1,ya1,score,0.,1,1) + track_index += 1 + if track_index >=65535: #索引越界归零 + track_index = 0 + + if track_index>=100: + track_index = 0 + + hand_dict = copy.deepcopy(reg_dict) + + # print("a:",hand_dict) + + return hand_dict,track_index diff --git a/lib/hand_lib/utils/utils.py b/lib/hand_lib/utils/utils.py new file mode 100644 index 0000000..76728a7 --- /dev/null +++ b/lib/hand_lib/utils/utils.py @@ -0,0 +1,15 @@ +import os + +"""Parses the data configuration file""" +def parse_data_cfg(path): + print('data_cfg : ',path) + options = dict() + with open(path, 'r') as fp: + lines = fp.readlines() + for line in lines: + line = line.strip() + if line == '' or line.startswith('#'): + continue + key, value = line.split('=') + options[key.strip()] = value.strip() + return options diff --git a/main.py b/main.py new file mode 100644 index 0000000..2c4d3cb --- /dev/null +++ b/main.py @@ -0,0 +1,41 @@ +#-*-coding:utf-8-*- +''' + DpCas-Light +|||| ||||| |||| || ||||||| +|| || || || || || |||| || || +|| || || || || || || || || +|| || || || || ||====|| |||||| +|| || ||||| || || ||======|| || +|| || || || || || || || || +|||| || |||| || || ||||||| + +/-------------------- HandPose_X --------------------/ +''' +# date:2020-10-19.7.23.24 +# Author: Eric.Lee +# function: main + +import os +import warnings +warnings.filterwarnings("ignore") +import sys +sys.path.append("./components/") # 添加模型组件路径 + +from applications.handpose_local_app import main_handpose_x #加载 handpose 应用 + +def demo_logo(): + print("\n/*********************************/") + print("/---------------------------------/\n") + print(" WELCOME : DpCas-Light ") + print(" << HandPose_X >> ") + print(" Copyright 2021 Eric.Lee2021 ") + print(" Apache License 2.0 ") + print("\n/---------------------------------/") + print("/*********************************/\n") + +if __name__ == '__main__': + demo_logo() + cfg_file = "./lib/hand_lib/cfg/handpose.cfg" + main_handpose_x(cfg_file)#加载 handpose 应用 + + print(" well done ~") diff --git a/materials/audio/sentences/IdentifyingObjectsWait.mp3 b/materials/audio/sentences/IdentifyingObjectsWait.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..8eec79fe82d75f35e293cbe1b9c51314c22bd09f GIT binary patch literal 17280 zcmdtpWmFu^x-jg)o!}0^-Q9z`TX1&^P9Ow#cXwxCaCZ+3?he5vNU$UXnJ+x+d-rT6Z?T{U;rKyGb^0sjvkkXIMoIl3vDE;|&W!iR?+be=)vDM;lx z%2wVzoh{7WkVAAKx0RP8+(T^78uA?Jq0HmCs{E{Q5GPia0`01V=2KX(N&sZDt&}aG z)vojnKl=?@(yVaR$_kz4hrZ)gub*$9TF*;w5?lT9MqACNj76K@>d#!{HC?^X1qkOU zjN#Wof*;;7eGZ?wg6u;ct$&@j(7kQGJo*J`7`w9DG%vFiq40Zt2CMl31gnA83w9&- zsbqP*$dtLM-O!@h9>SJ^f+XQVR0DFSp#$W*R31_7$cC>jCVopStGwKgfKJLX;!vvg zvk&wO^!#^ORgLrK-_x}vxg1V3j&XnHxU04$#M_l8M1Ut{TioM#8M(KF?HVe*H}jbo zLrmCtU;NBk(i4i0?4|Oe^|;XS^f*qQ^WOTLjPG&T`!%T?VhPu9w2@xq6v%oi&f9Wp z=_l-uX9{dJovo1Z(1vtSm&C43Z!d__h+KIo_+EVrV0Vq#W@W-sJk}WJp)22Ix6xhp zB-U+Cq&iv-Vf(U^kOO&XhV=R9n@Uk*5mGyIk6D_90UtKsH)aboN38pX- zs8;eB3=OzytTvgM3JBvb8oc{*AGCz-kuVXTdVWKk$DQ^gD1mE&=^Hv9*K(GtXZ;RF zSW@1Qlpv(ct))Eb_)NOjf32sfu8<@QU$WG8y!gbWS2Ekcu5ryA4cfeIowyP(cv?Zs#nI1<4*Qho>n_6Rl?j#kpgs31hd!VR8GVnKVka8xr)@3vBB zLvkyLZ-g}ZsadFm!G@jV#LB3;OOmO~$pgY?zRbz7C}qo#OneRCea%>ZyAhlHWiTRf z_PhJ?qZq5k9$U(U_bOm74U@&@xA(QdNG6s#sl(97!F4tUiU=x|Qj}#7=~aL!#n-?p zxA4LhGCtf8qfc)L1}t*v_rpUhh&pU=c9{Wqp#{#+d`40W_Sw0ZLdf;+dxc z0k1A$s&%|_o%z{waCqq8C=X;1gZ~{G@`CCYFh2vLLVI6nTRe{$qV39BnZ8DU3*Eqc_J< z?LM-Q!R3~h)Zn~7GQRO;biqiy8^KgT|LRvJ4)PAUi*1nP&~G_+> zjP0CZ(UOz8uilruVZD};d*e~r``PL3*fO>{YY^}}KD^8)UA>sHBDR#s^p{;*}gO7&z2kp0C*dn5Ff*m#a$b#md`jzEXzqG5g%URGes$*RPh=tu zYGmru9dD2Tf! zf=ZAGT#{nnK!sx`k-WOb=@#)EF}^PNVPklpwj3#pemJv6opkyVBF1n6|Hiv8NjGV@^?ut2(qD0r^ z0%!yi!i52V3sQ2FHsQYKQe!9c-vxMz!(aJ^Irf{dvhP0Zt_$v`e!w1enN!`Ln5Q~rtLotOSt-=cB!?RA8(O1qEQfrBjS|xw1fStr1T=Mem~C=S zxNA3lh6;n%<Yh;=!1wzAN@bL=~ zGM!(B!l`S8ZWF;`od|q%9Kc0Mv(#_--J0_1isc(zuJ_y4WEbm3gw;=R4b!mB#0+G2 z>@5Y-vC7=I1}i#+KdHCIks9(OBz9cb)InZ8LGIt|*7r5!JS8%2zQs|o#YI8mL`B8z z`fY+Ii4%z{=Z;3|V^(OH(;OU=E_dM6URkzNt$Vhu$K6E=dH!%{JM-lz2hG_C9Z@>F z%O-p-hy1f-dY2xz)r*D%ER>)H)=O8saGNo}N6Xqf^X$;q#}6;sL7%KG;WD2WQXBDk zq{o5Td3b%0m!cAOq}F=ru*;2b@Vi79^|aY0#7aW!>#OS{vpwUU@iHMEEJEW9m$hFd zHDXBCfWVTFJ%7VW1)U9jBOO(BcLG;tbUPEin3f;{b~cQu2aWkxNXiV7pPEsh3uvO) z!Xz;DJGxb;aYT2g|3gZ~i#N4M%{Nnb zI)(?XuIY%QPf{sxibxObzMItpDk*&S$Qt9|O3Fz0cJ z;3d|ZMu`O@`bCPe;6v+6z}ja>iPw87GK`q_5<)D5tQou8Ujd6bVk9dHUicsJ!$`4^ zQdyXY+FzJ4EuPsi-`(58Ztm$y4&9hFp}W3Si0g5f>P~!nq8hsG%ccsHWPe9g8CQ z;=#GJo&#r&AJB!D@rwmGXgB!6g$wk?uC=*QJojpuxC1n3O zpExvv5Yfd!o9t>uA(aK=hxfQNCk*)10 zev%`!4F43{=C(~=vt2_!|GhRm`I~`4^YY{tNqiN9LODJL`y8%2wH8SIz3EP!ah}qV zi~(na<1rQis9pD%ccuDAH0jP|%`;Ima>o(LkoNk+T(?5X;z1j_`HTG=9$3AyLF=@c z?yF4qkS@FaM`c5&+zDH+Zd|u7a`W`IHO>t`(<9l5@l3MBTBHPjBnK_atSS$ zAWlAD>_}Nkk4nG$#!ks1>ihNF9I~ZhfFzDu=;y?aheB1SYD)1CVfvO{X=**-=BsO- z`2g?8m_zqPpj?e6Y6sNH*gGwAG|zlnv$}ef^B&@THy`Jr%1f?U8Rc+-FWPZ`!*x4` zEDIO1B&bC%?ezF8t!Mi243Ivk~h0p%5tJt zw;*jk85ebWZK&}zL3W7*6nIKur3>DBTO^8UKbk-!pEHd)uoU~BH~JoqkGF5NAT#FD z`m7*&#&K4;p&{l!S`iVlBc{y2^x@Y?G0lNF$zPT0U9rA;J-Kh zEy(>#F%JIKb;ykUqt$RN!rRbYf6;4w*Pcq_Y9kr0pK}on{5DPhsBH4<9$Z-B+BRve z4Q6tb&$qm{;p8eo%7ior>YVJP<>uz5PF0??mM7JgaZy9r+fh|f9=laTR*llT=hja0 zr!>XF>e8}`l_hjoI=YRKg#B>#x)||;x=Q&vQCWDNqEVD_)7|+f^ z-qU)dt^0Py7{YZa-$Og;AB=`t_0Iu7h!+zKa3mU|wBQv_zd?6J1;Ae4HgTd|G5-&!Qp@8|C~5nhbT!#2W%6kz~A= zvS?jxEM(ELJDtXY=>q*fg8!o70Atx+V?%1vxp0X$Ob`h47E7R=kw#vEfe^L zk)@jl@`0r!EDMH#=1*0UxP@s-qH)PuD@4|qQI@Uwak%Mg>4=6AJnT#A9<-sGC_D^# zse`n@Gdg7p0zm_q?)ca(obY&8sRbjBH4!S+U}oXmHn;n+*j96Gw5M{-+=xaLd>+N` z&nymv;3M0LDw^+KSUy*M{)P3|JPM+8fM*KHn+06E(h5*b-|t*DE%Uzwm}oZ*>3OGd z9g$vnr)}2kyNh}gf0UuYPCl>tFmBhBb115Tc(--`1YayTVZDSGY&l$F{Z-EwF}9w^ zg#udtHfC5&%qfU);RSXd&TwdaztDVE-hMoe@p`TnCvu?9>K?j3w$vO7wF>>1dbL3} z=L~`T>q>Jj@CY`i1Qj1QWSw4@itK^$Gd5B>kK4~u!_-dp_3xia%R<5nJb)Q$+;M7# zqqL_&B44u8Z%TiVUTUY^e$5Ol2EUQWa3g$euXIIAJWyZuEHn&#AQuQ~@WO^|RGmKZ z4MFdd#3vOcC=1gv`zSB==DG%!Se?gf0}Vcrcq^uo!@eyzSAa%B5jS#H1S9E=#UYN#(QdpRpDl-7buJCgaO-AUSlwCJ{Uq`%59R@dX16(M*efy>bnH|UT5+9% z_w}EAeD>pzso#`Q7Csuxyi;QVxT8eg^PF3oVAG0IW4yY!DEC^oP1bCo({l1y@sn>u zi?Td%?8|dmg@5{&L?|V*)qbKFve1e#pwB8#C z`A=x$omoEG<;0iqWlz!;Bz%6ixBte@>B{z6U<3pJbm#&EkRngNI^J_J+7yZ1!dWVg zEV8lHM`>(NP<4@F?coqZ07Wqfz!oZ{`vr*aUKxCTMU= zc2{kbU<#HijHv&P?j)hAw-+_FGjkz%&UbqsCYH>@H}rss6q~I3>T;uxVFik=OGmoT zvSWcdp^#f4N_lFPYI{Gi)JA;tkVlz{1b1eS8_ATyum~OrE6d@IKR(dC->qO=3z#5Up*z?_{}!pwJnlZN(^>;j2@veU7vRMSQfW;xI`+QQDsdB> z7?-5aNyCjeeIZVShf^)8TCvj%sv7d&)W8)f=z1=~Nxn2-g6Bsptm%QH`<+_WYl#C( zfHZ0^Y>8M#b|0pT4=2VNC@;-`96S^J-SM0eN{}&+fO|&6z_XJ2o~46GN&MB7O|yFm zG@h51`rRFD-!OyE+%Dhgh0&}ckD1p`XP@{&eP15dbH^qigO;$q zn38jT8~y1{&q*+5rqRK1kx{qra*V_hD|Yf`z6RW?U);BHI@rAIro`k_S5mDBxzk8D z8Ru}{oGpU`G!95Eo7c?isGYjSdw%gho~!*I#raqOJ=Dd0;}|!Fx5J$5{;R8+v~hk} zbZ|I-Dw{{2+!Qk??@&BubY|Ryv5_H~9zzQgv+8kM&u{XpAuJLSxRPEdfT(jSu=XIP z;3BJPGEGTLZktg*tL_tjxX|`uwm3t4M#|D{d@sQg{(=Q(HS(Ar3AgTXuKNeKmU+3% zIYg`y`R{_PKJzZ^-9CX!Yv1(WIa>RM+oUye84N08^ps)G-wZkjnfomk-4sPzno zLtDsNY)P>C#pkw`iAR(INZ2gTXM>vA&}e@2UJT>qJ2Oljn(9JzrF4RyzMrh`Qf2GY zV&KiqThtThc&GQEJxkj1(eYzKLs!SLaeMIT<-=0RJaPu(MPf*{&)o`t7-uTs+q>y$ zioso%+WBBdRH^#xy{Yc+pBJg$Y%rIn^On}2w=BK7v>Ui&@~@;D>pYl|q@fUFmr^8u zyh~u5=Rm~eH>9Jep$9f+I)zLK~}UTV%6hM?uxsBy(3gW{*I7nQvp;BRDs#cNvkYly{l^5lro%F5BI9I zECj;kP)lIrS?mv@3s2y^RS`}FjGx>BVIU?(0p%Nr;b~%u-MjRj0Vaex`G$da?(Ji|JJYz11m%rJnTdR2ui^>#Bj^NxLJAU zp4u(*-?`jv%Hr=t*p&5~GDvVXJL7Oe2yR|oi*$E*X)p5%n^R%xwP0Io6L_x3vegN; zh)Xeiz{%Gnm<&17M9*;zOWW;0wZSY-4~IHhENU|W$T6Y>A_h6N`h$DHeXL3VK5mq?_J?ZZ*!xe2jQl>OX5DSj*?~qjQg#u~^J8+)6OPU-Hs> zxN5z+H8SX@6a<^^5}&huWyR~o;i8BViD2m6^{fdbUR^tk!|AX{F(0%$Qkyrd$ZfS$W@;wr`VF1QfHW)0DpY-L{Ox&9PIG-_QA!hM zhm#hqRr_Ix5KsNX-T?XasHXP!8rhGEbjj%i_@NS3;Be}CQK||mJRS7VkhbXR`tQ(h z&LYIrr0|fRNdYpsKLbh{nvG;j4YN|&wl%y~7^>((GS$6lN)u>aT@Zd|yiEV`%%>vS z7HGo6V@eQx(w8Kr&|4KM*l~Ndc@C#0k#s8`<81qm^^$bEj?))9BnXmvLioh{47)FR zFIJI8+xE}UQ>f&WIMAUbD8bNDvB*i1DRv`P$LXOl8V2~-fB%)?iN$wq%e0t9Wb|i=C6jwNGzHt*Yf@E zI^#dRWy*_IuhrNm`RPAggY7Tv{6<^_oK{sEra8Br?H`82!I$T#Ldtyeu7T8S zo9}Jyj|^^0Yd58tHJ$Q!U z{a=!RdsZ~9udZ%NnC(HsA>;t5oGPIRMX#tM40hG;5pywvnd5n7sR;5jO{y4nX*&*G zzlQUZt!Eg)S72tZDGF#b>(t>yQgvMV9?qE2OWcnwjzuj#8MSp?CJ@LA28508b2E-M zR^*7sK_1dj0OZfXMAz!CQVM+*MT~v-QEkG$^k~F}z)I?~T*cDe2E=K+lOgKyIY;&R zK5w=hM=#2IjUWYS#g-~>=ea~ho_PrUyEZu{m(v+#`VmpxDzK6s>{gdJU;;L=+K7|< zQrn6hH&!o+flnL-_v)IV?Ye2{9+VcvQjCfUSu;dWs78(`EuXY4?$h5U`P~fU-wdZb zop51Goy>l#T^mEkG=Z(*Q1&U^b7RmEy9`y9sxUCHN0WZ{_}$KxzqR%>{|fu6FC4X& z!m&Fe^0kp#Bq=4i)WAJU$OwUD?v3&gzQ(6_KScsqp4Yg@%66G6oOd8>2k%~(h*q2GLMM3}q%^56hL-{70$tl4Hs4 zr54zWm41b;Mc6Kh725e0|59%L>;2-Y!5cLm!g>iVY)c!6unDJ%P^eI{JRjVYJEzyr zl37gF!GRlbE0SSFgF_ixiub2C2IgXnE}xvM;vg;FYu4JDdlVR>Ta2dV@Ptn_^Q0q9 z*N%VoG8~@`%$HmQD&*NwZAk!?ffiHN7G=k|iitIX9egM>`^9E{-JLuq@uulmqHcSO zlquiRag%Z?1uDMZ8NgS~7z-MQ#gt!|sjjv;27|`QLn$t#!cDjw<+5s>W8p?o^+FO=Jn-1lD1D}oTAt`b` zGV)0GhUfd#0m*VZ6dy!BG9uPipJUYIRo&ikQcY_OZvWT;3sq8j9rNq?g|O!W=W(BE zNS#M^v04<3`W;c2>x@zH8|TP}BK+ zXVc`wRDaorNf8H6>*n7IN3<1HB@FQuC|NC@=ga1BUAurm@t*M$>qs$zz>;l>RArh0 zs4#urF&QcXE?J9InnYy!Hb&u|iRgYpuE8IT6%Arp**bN!&V>y!9$PuKu?=DcZQ4mZ zv@OJLMsOaJhD*-wD_pJRphy=+09C(iXf%lo)53(~Xin2Nw>}@I$sM@~4M7Dz=qR0B zZ7ZiZE2o9`NSg1R`U0FJs%)^VaHrO2@7!~w+?>3mmT=P*`)y2*m;rv$C$yYc@VSwa{Xf=IMT=`M zeD2YesA&uDfSi}Pazz*Hc#+?UunaI7(>?R`t1X6Yu2{`_fBy7WSC}>%F%Z6Q_Iqwf zgA+8S+vwtw9JJ4wH(2DBGgPSesWK`jbRbAs77(?H(C=Q4 zw)qu*ulGJ#ab1+%@tXM7w&Rux(cUfv|JYO;nCD*FlS$RW9?^#Kr0eo}b=5G${Lo@w zOWevS^!<8%gpl`MW$!KqOky`~*jZZ3;gvoGUu9NlEj@89-x2v#ZYP^;wWm4~4FR$v zf`9s7H6RTYEDF($Hb40Oa*leAAndUfWRehJe7g*VMV-VPF4Vm7-9g$D5qzX#J|#A- z!3b#!s7sn`mR#UsxwgV`u?b(#OI4}!4y z|4pQaW1vpo`fESHES-I)Pxk}@d$Qbis&?L76?9qv-k5JNpYQEA?PO1mW-p{D+|@gdsHZ3Sj=0azv-F0 z*Zx^^e4sXbrPfG5YkAEO{a>m8j3v?Q$@XJj$K|5#+D!HAhET4mAFR z$x4r9&WBpRrsXe;q3D7!ixgL55}PrOB8vsQt0lu2X8#dYVz$)lY0)d$P<;CnTLzSN zf@qcU&H4VE!?pyDAU6lDsEFU?4Se$DgvDBX-_iyjOEr4LBaS|eG1yuNhIJyv=*FMB zspnDZOi{>T--;GC?jdyBO_kRx=X9=bUju>&vknn1!^~yt<3#_#tWI6Vk6%z*{STM+ z|NMA$?J<2K@-?17&HHy>!#k!*A7^faMm0i(5!=-h-m2$nl=9!4(=X%<+Lclheov#M zbM%|CBubwvf!RdJntB7)*bLX=9Ct2VTL#R*jseElZDZizwYIvwo#?EH2&K=CRvrjP zbu4%Ed*=XgUtMWYIm~gKkuoMnPL&V}L3)EURz{?I{M$Uf3WvBjk0CPcf9wKSF15-% zq@1n9`o6l_$dh04dVRw4S)l~_B|~6?5nyOhkxSW~=VV8s~xgD}<2 zFe}-d%~sJcQ?hJhfh<_t+Mo3m2Z*4IS?rLYaowPx=9ly$&}f?RB>#*=7Gj&Fl4M$K zl!6}a*=5l@@i_Y!;NtMicW%=HKFmb~Rd`|`9*>){H)jD0We){hx_Si*Pxtv)v!172 zH!vZ)JbQeNJ60U9aj1%Xdw03a)*kYqf$+L-67WYtN1@hz2E3)e9`K3T^8G` zZM)bBJXDfEb|G4SbuH6}c$szY$z)}9sZ3lEh0`8i@dHZv#YhwH9*^oZyF4Q5C=z80 z$GUN;V8?vPC)|ANG!&-CnHAo~;U(!VA6^p%CCM2Q;3BpVi=>_X$f)$iSPmPqdCy2D zobG86Vjk46ylAsmv>(PD&?L~ULIgJe+LZQf7(7S#)m9lL?m{=5=pN}K#BxwWZRfd3 zxtgn{6wqbiVruDN;rE+WW^PmGr;oXU`F&Zgm6OPw&Iq&2y?|D{yd_R!b>TYD8_AH5 z=hxwYa;eHDko~c$6nyNTE%@q!Xj=A-C>|zKE ziR9800#2P9PRR;+J~8ID3Lo!3Y(Trlej~8@zA7yH6pvt#?TQyph0>Akns;>ViN_-* ziDE&?p-`a-;@gY1Cb+IS%^Jw-&^VkpX&9Fw&F8JfLB8U5xNk zXfB-E>M#Rxzpx_Jl zuGxHD(UCa^Q89`D6za5a{M6(p!I)RNrcLw&8?5kE9Fa1bsF;Z4_Ru-Y2$|PeZpDmz zRN(~>$VMr}G?(-wygZfn8tviT3-(Zkb`&(|%* z9XM`+n#tK>t1f2r7L<3~enarT<8+S3_ocL@tn*PX*~?T#LnywH%3QcS-J9{P(yQXp zRIOUF#+0HW0?5N^OurQ)mEV%g+clPE8l+Sv*WUkR{rL{!Kg#RB+H041EW^mjbC3Sm zaf;?vyya6SYIW0=^T{Ku>zQ?$jnnfW!YVo!@6C8c>>DfblU)Tf(<@8jMg=l>9Mh_}|7=OPNwBXNi1IwAa~m9PDfkUC^^+Hf9@jD+08 z4iC+u&qo>fYdHJRUOpN!y&E1(Io9}v-fslO%N@(Py}o8r(u-ZEoRRqVacX-tF8wFJ zEmhi7m$|P?O-;-GHQ={S()QiF^3WcQa6Pq0m(>bFPotT8x(DbqFmW<-29T4Q?moyf zTWU=L=h5U%l!N9$WYSz=YS-s#1v!gAyK4A%3+&?LQHM2zAF+Xhe7{v^zgn+PYr7ue z@%1r-C}AFKN*SU1+4oz5P$fv6H8TObNQ=OIyLfwzVFnueN;BHLg;*@RTGN4e-fsk|gg_yMQsiQ*bL32l_)Z`APqhWPHbRCY-`2q5)<1u)gJoGIdyBS+N?GVVCf|J6TLqc+={%buY zhVcd%*112~fFFhZ;iaeR`^^-@=*#v#T!w z0}N38X^F?uU3txc%=m8Ov0E!8M>L}gwR|~9U7`v)1gbT6M-I4ISq}=mO8aq;YzNlE z>QN*~pw45WoJz%@0q_t+FRxUrU)r8~_4lvS!UXnm z-Qp~KZ7<;4py0AT16_bN(hw!Do1KgAGiNZ39LVe@YK{a+=P}NClkLj8qLaDhu1(a7T`&SGLdiebJB2e@1>Z2aUrL%lekp;d;oXusA9O){& z>a>kU_k7@6qE2p;cV}rGk8yV2r#zA0@*Q0~tQvoQ2#xHvo;>$9923C@ogTVto7fYg znhS`B9*WTT(S1AazPE-5&AhsNxU%P`O;-D%h6*;@(%Uw@z zuB_B7Tlv}d@QE?_PYN)Sk*SZRSvm5SSP4L2s*e_;x!eFIA0kQPHC4(E*TaYwbH=8K zYsxrMPhB6AtHPp9YQRg&25!{WVuDknC`nl$7})R00a&7J7QAwII9L zFiS1vm`Cf>7K$R%8nY8XJU;pxPm8u!mnU6y?cnyHB8HtZ-W7B3m$waBQ?hno-Sob! z*`p36Qp$;}#WWq~_}Iw7azP8HbBRl%KtRcR<#f(*R~saz_<`=Bz?gP;WOG3O5HmZv zpnX`a$DZS^APk`3px0zt`Dd7E3X(UotzkKbtTLM9av9!^{;!$5raVj6L+oTY?a44G zS02X3cRVx|IPq%ab)iP_K+I_=G5&`Bh42C+R@Wse&@ z344KE@MLI(u&wY^^Z%Gb{{K0(S630WpiE--S{VI*$g};+sT8hZm(!2+jW?SUh)___ zu=5*TD}G($Lp=D={XY%_Cm!?c>SdSln{Z7p;_PFYTO`n+ax=HAOB#EQ1rX z0RZTD{fkCLuMNokT>T(s_>M%sJ8WJ=*by#1X{@O!MHFZRrtp?3G?f|0j1YhVMKm7$ z>Z&F!mie=8E(Q%n4@ajk;$y&MUMzP)x|7w?HFT)))B}3hL4C2x{^fee=VrUk zzuIQ{XMz6W@Zm-}U#C)IS7*j2Sqsm$yeOv{1_2W_lWF5fhewlh`{k1>$@tQ{>=D>v zNi46mLzz=^Yl6rj_O|(auMu3-=O|dNF)vIn5%gW3&Gb2Gig76o-OF)XT2sB{;pM_k zQ1Y8}$xoN0O_H69&A*3)KTMo3Z>B5I9^wXQIKOYOS5=)9aJ%D5okGi!((yIWadx{~ z5lXo7eCU34K_nGNy-afZpYF(%=(VKL}Q;3GmB66@tGI0_2^rsl4r z7}3urmJ&F#TRr%QUWM5(ws-3rg?T_l#Sj5UU$zBCWX*`;gPcuNZUzS|%cPH^(EJpRYP|YyLC>E(l zENH4NF2UKW29@3IcrcxzOfm>Z~aF{BNfbSVW53}Llf=;UcO?!J<BNbK-+a_%XC zG->NLUmXIIh1p+SZRCS8%hN;flLaG^Rd;^VMo4-*mI>s)5c<)S8Q#y$27COOK1QaP zYULW>6cwA}VLB>OO3V`~{u_6Rk)}2h+Hr^m^|ee%YzUls zTjq9M&BVI&GaBvqu9(P zGY)OLys=_jx}^Hb%&;Ku$WqIi%E+a%c0BKRC@Gp+ps>Vn5r2`GKgXPytpzRugfD}U zsOHd4F;io1f>)nGJ@$DyP^F2;x{+`fjtTFMkFm)rpvp>?>yD)c0$I^kt(ZIFf$fF^ zf@z}@u^7c0Cu1!BWz7A1oBe0yS63_fr|t1xUn&2;-Gtb_;kIRzJ|>ts&heQ&RiVst z;fv<4rV4fL$oee1#Atcjm>N583dc4HDaNtJG3Gd|B{+FA+O!#%VDp)8+|eF$=tRLI zP^Ms3{Ef0O#uA@Qaw3jOG*@yOTm(Y~qTiLc`NeA2NB%P=8d`BxTB7!y#KKZHPHoEY}$JxeQT(XGyDB+2a~a&*sj0-q=rMlOdx#A?q;LaE#uHueT6zA zk4~u!HDv;)aZ&``HE2qK{#+eL;hSA(RU2PUlTC4urfr%KBjrNA$tP~{4{(>gn>##v zpg9pZ(5I9+>)hCd!&If4npV0*?~4lYssC+1gm8A_Nonk(cE9#RfB@@Gz>h{b|I$w< zX7%_SH7ybSz-v!5rI}pDM187s)s2tow>265Cj3QJ%u7&=D+wMNf}y4nu2X6_#Z4&f z!>HtN3hYv=HtL7MZ71!6MVbQ*(qlqxL0KJVcUT7P*plYcm2zzc8SnDUIVaDVCu%h# z)rufTHd&PJ^zUiRjF@(QyMtYN{Z{&p!k|SOU2Fek2^J~;rSOyJKzb6q2~&<*vgNK% z_;KN}=K!rKyil6pE!-%POvqsSQn@tRYK{7)IU1^8g30VNEl>X{CG_nF%TO z;c4M#Ua}DOJ^BVDCIHljPm48|jdjj*;hdBtb(1uzAR)BVJd(39*Rr@{Xb=pg6{fI{ zxiiPc%wFJj%NUJ>ER_XNP)73j^L&UT{NJ|M6>HbF+|8cM^S_N7|5OPlE_V|^Wl z2;J3Qlo0wJmJ6v%3+xy7p>c zHhSFrhwE?e|9@9}ZLca)0q0udPgv0Z-OW&R)UD%A$#N(Zd<4LBKA5tz@jVgdqWY75 z(v_OmF=YTS%R-D+{?SvGf7VK7Uz?U31i=Q5xwQU0roSnE84zn{}L z62r_T7dCm4`W&QtFvlB38c>kl!&c@|JoW0TCT*C`{u4Kh(^>S>C7r2^?yl_UO+R_r z9*xVV;&YXD@b!2d4Z4UTl5T3m=*m1k0f3knpS;@R(`cKauf^MC>!Cg{ZW7Y{p6pz&VPCWeMr5$@#?x@ zMfoAeJt!;Ulr*A_ca4JBh~*9IdfKlNukGd~S0#rclqVpYv;;iG^$pB&Tsq&r$SN8U z`dDAMHijkF@P3OJezrs}w^XHglq`sVJ8NZq;pJ!yySegasov5zjtz1BMh`JFL6icO z6!CZ=m4I#+xG=0AFhmjOK?R4F#sB$%%2l*L)D3!OE~B?2;~rYP$TVIMx^(EMsR6J0G0be#mU@(F-riq9+hxp;dr8XrgcggzXMpoe6 zkl@m*YnFDw+N#$lLT$2Ke;^7maE?JBOJ z#p-LhhOfK~IY(lvb-A%$1fx+$k1D9O8j64TVU$Gb@}?@^AwS3xo0#F3Wv$)I#^bfu zw8;4q$g#Bb~`YXG9k%MUIO!EK&Or%Z%l~84E>CaAE3( z!?vJ4+j5m@=*b6$TR^r(6y;-3j4p)DYBH1xm8yt9MfQG7Vipi$k5^C^^S*vfs;+J}kFR~U^q#G@~FaL~}~Pok;HNjgzV zL=@~^IZ=P9nnnGHT5bs}@l;VM7g;r8-3Os0mD{yj&hu!3m3ixiHLFdlvKgpx=R2Um znqpkGqhXTf`Vtw_oPK|~)RoW)7`Kg$syVRAp=-YC>P(ku^$Wh29&aEID?&kI7(A=t zUu0^Ea|2<)bbAH!NO(CMrW8aE6P?bLq^+(&Jlw>0&O+xKJYUxQ#Bo?*fzFhJI#^K!T)am& z^L-zlDv)A9+>xc(*;IT?k)3ReU23_Cf`kgI(Wmi@nf!m!*hO}DD1$u=q0$QDS-TAy zV_;9p%q@y&tv{>5@x5N->|Sj=$NkhATXN`BD7<)cYQ z6$T-;yK=A<^5LjP$~F+=j=Mh0e(x~nO;fkaVI3TDT5tJy+7=Z)=4w>EwOk-h70NEY z*2n+_qeMdBQvM~G^v`~5sAyyoX&`(pJ>tQu%bxxlqh9wAd>+@w0xlV9xoPJwo1vYO z?875f(Ar`b#82D0c-vsno(~G~)3q$e`$MD3B5NLZUjT z1|JkeHHH+F6LBZM4elD{DnC{ov#lc7T`ZJVly=eeIJGS({jmcsO05HG;JGI`r6=q# zPSWREIWU1tqPjs^^a@A<8}sWN$m({7k_Jv@$wKpHLmx3@gNogQ!c`a56EN=a@>9KB z#s$*1z#s*((D}7yq}ukoA;0;Prj6$&Q{79aBvtQpe%mmr$>!AkK?Dh1_4$f@hV3+RPGXPbnW2P8)0;y&f8Ga-$DBL`Ket zK-n&W^1HC~eY~qPc2(nu%~Tvr2w8e~J4y*~g2eOp$J2;_=;(vKvpl|c$}Er%z>@&T za^zFo{CMkX4e-JHy}+l7&0{w1&Gmmg)qgj>{_Xm!tNHDzf>W=#62;%@|4$K({;`0W*BO5v`hvQZ=&&slJ~2c=`kb(YPtMYZyzq z7SD%tLe4cSDm~^(%jG}y&5c8}eXpmb_5}3Y9-LNoYGX^Fa2cgWK|(^F$`ea45kZ=G qR1mO=QCo|sLt9FL2pFP6qa_9W-@4k#7eraeCJgS^AmGr0q4UD%+F38Qzll>OkvPd zskeUkz8pZODou~M)!yOW|F~u0lRFTGFVT1aN0AJPf_1KMOT>ATkNv}EdAs-3i9$gd5#I_{8u0`+LO>=5aUFj%@rrEGupRu{xaqQaU z^68_0m8K{M9e5;h(kGUlE;HKD&Yhe1RTDSzdr69$~wXa#ydR zR^GFM{*o+>&Zwg|85GSQBU@yu7_mx%f!@+pM4oVOOoswd81a8*22G$WMMY*3-`}>T zGuo+;Y>xS?X8js+_1;fU?MT^@tw+Jg;*s&FLeh?w?+rW9^r;q`v3*)9F;>-PN{g7F zli|Xz)asO{uiHP&Ty~nk>S84Xo;stn=VhR4h!n}1S-(2l=8?^Y`4pZrG>JgCaF=y3 zW-yp7p~fr__~or5{sF^eXI@7`&U=P{>peo^|OnFRgbJB zbTuDf&=6gQGz#v-qeiq#W24XIP>=_Vqrz!YaW~^CbIV!V2}iREu{rlF>Bz*hWOC6b z7oLGdrzQ<8-@O2dGqP<2;GT(oa9iQhDzGC7?6qxTc8iKn$Y-oSWflWj7OR`G z7c0N%&j+=g(2PZ6uQ&SRSAXO$U#pQO{{vZATkh4cHv7Z z^$jU>A!wk9Sy;L^eVYspng=Quf;>voXR%Y8Dbdh`dXRaxEFEqGS>(iaqT;OYKuZ{W zH!VUlr|XxBaTK}RABgEiOz})i@%j$4q15EnH>cOt$8aEjGBzHoM`= z!>HY7mjb;xutR8I`iZ7lMo3=allxIsy={%4=wxbhD+*V)pSE$^z5hjR`NU?(56Ekl zY+9Hye`y`Dox9uqR$C&?4X{qn=0Hi$MY7%6YvNKm?72^u#}bkxdlk)`kz6&7KDl3Q zce0)F5D`N$i&M$`bzkYYq{+2Jh)Chn=6vFJ{=PpR+bWg2kUy4gbLl|{&|)?HH35c0 zSJ6)#E}7n>;B=b4?RB#DAokiupHH;BZ|Dxg&NN|y)$&InxFX$Jj10&R>0q1iaG7vr zP*2yFjnh-iYJPUvv%z_L0e4b!s1q)(9|iF{BWpgg8^hoexOTeM$VwbIV&8wP5majH zOdHbyV?hMQ>;!Z056 znIZ_O!D;~v%hJ=C%M_vJuQn%HVt>0$pJRKOOs_lBkQ6#OM!Hzv!PbfkKNub)(Hro( z@JRXMV{f$bcAy8CK|XQ#+2uy=yx$x&Xo3f@IIJ3Ah1XqeC+-YR zu`2|M(rBF@NLW8sHHW!P7Es_L1Wmky-fgdXTZ?Vmzl$AH^hv2^mV(8yQQXGd++SGH z&sAZ66O|*wt_Oz;)Sh;5EF$g+8n-5QfPv03ma#3^j1S8R6$fxl7pFzl~v=fixc3G zwMBhL7UN17RZj{Y%k0_JM{}NADl{)`JdB8cW1m-Mmxx7}=Qs#Swa|t)wh##dfzed< zWRBuBCVt8d)ebzXjnJ(|8WQWMP;P70B& z9DE$6C@*(v(foO2JWr_Nz~J~F5Xj#z;4iXHeJHVn>0&~brX~BZFf=e>WhB+g{K+V+ z&#ra4mGcC{wb{>cEqXh_DEU51auP#EB7LT3a&t%Mq^TsL#FufeckLQlwgxbk4jx0} zr|=2o<=78S;F68fB~^@#9hNQ_L4cqiT0t<;g;Tx1m!V#CSjx8Ba1MGFtQ`^qFLj+x zF)>~WkO=G#+zTiYAkz>@#lg~@4Vl6`?3T&IFwLCGv4xz zI>=(xSn|qKqDEsHA_6>BF%l&FMF0FYMHV3kDQ=@CBS)aWfDis5H1S$!(VECX`Kv_Z z+nd`i!39-TFnNCofxQsbS=|B3`}&z2c5m@eIa34G1op3I)j=5!sh*kB)vzSi^_zP; zJh@NAJwVlQ+2nWujPnHg@J!2$<@`vF$2ygx)lI>KKv!gHtTM*bF{;=wIcyXgvF&gK ze@Rdt43$-ld)`%}D$(bD~p2WFY zzQU?FhMC@HO<-p+2E%>amT90;jkS4cHOnQRqh@a6zJP?@-(=sK;ag$`RPPGN3|oJf1lPbbHkiLWl~l}I_9`>$&AFO~Z2+F|p>AL^E0 z*!go1*51QIGw(rUxW4Sel(Jl3FPy1eY+Vr#599UBg?bxOfCxY~KopQ486(a(7tn+H zL_~*!loDGwwq59~QHpJR)N#Vce`R&Tf*pI_y%zIoE{dUK@&wvW`c*381>jif-Akjp zYI8>lTyLZoFaWYdbk^XsIL5P$PZlsGI9QDO3rIskoO@UY60)4X;A`$||D_>@A5gM+ zFEjpC-rBX3UTI;K>p{FA5*s5eBkkEW_-bCRk?cp*Vpfny<+S=&&sLpmf9O$HPE?d0 z?vY%Cv(05J&%(nIR!WKUg`~Noy)P}<0rcnC@1JJ6k(L&ynMV6iW_y^?%uWlVrP${Z zV-=s~7&dvQ_b;L7J=brbxXxQqQ4zGTy)wkvif})i*>qC^(tYWJ5sksPHY7vvVGM8T z;o9P?w9~irw@-o)fu=oQTO}pRIZ5@lBZUGH{&4-Z|NYBRIj6zeqn_;epYp}{ma^1$ zVfNv(F)!w|YN)RD((4hQ<_HOttT)Fl(7yJ1B-flTKwa|>JZ5sC!f{cvAsjgo&h7A)WG6a~$0hxFu9CW$J;(LNfr;ld` z)ShC{W&d)d45-Y>PJdvtz<$PSSQDc0%NVXYM2Uz-4U^u&Fy4NZ{0*~lBPVuT!k7P! z>o4GE*8**S&8p!5;?rM(f443~s!Gg9c96WtUV7v8!kVluuo-N27S97*QO!3EE2v`4 zY4DLYs)|qq*FKaNYeR40B}Rwx9?T-i1>4V3Tx>a`ud0*tD(+I*G|bq{T3iVgA^x!U%Z9u2k;wK0rP+J!bX+i`O_{KzvJtQJ$t@&@9mv;n~#!3OxVV z-7mMSY?Y?GBvo&Ipy_CSL_DI&14pyfG)|Ds2e0LTsF90d8ySgw<#10tC{GXxADX}& z3!iA2{4l-ygD&8Zih_Vt)qGv@*elzB@k79DF-2Q01d#>?cEL%-`jf&v1wqPkq(DP^ z=PHkW>ni^F$B{MaMWky{qmf=v4i5zKdgl5)B^Nr0^F!tAdw#5HFf#?<;tE zyDn>D8(uk;rgvdj%9mS4IB3DvoOS{R!nkM;Xt(BQ5n%}I6EGN-4H((!<`(#b2p!1T z>QP*KAgr(rOSu_wPSsCR<0R${hJf8WczoNCR~PQWYVmUEmdrA(Dxv z1XML(kQ~ovvQ!vmLnIOV=$AyFnSx0jB<{&5nUfUa)%Q8hT(+B?(?g0giPtmSoU;ch z&AE8xke*!+>UQ|Pe#?ekls4~5R^jZ*8q)n}t}2D`%t^6EeTqhIzKP=v5w}+m){b-K zV8xnRX|?uQMRR!X(-tT8XpA`I(Zi*Gmn}FNN8C1f@+m6>f5v}FWhBQo;@exE(Y%VR zM+~dYJ_iQ}Z+aMdV}de@2w-c*NwPAfgOH-s-r-_^7!P1b+l zh-_p2va<`@;#3zxp)c>&^ak0{;0j|etwM>UOw@?SN=G-36Ie66DGehmk8&!6uBbwX zXln#hNtdZfTXH`3>~iOS!=0V`!IcaVozpd1`V~aSe$oj@oBGi0b2>8RCrtBuOdI1I^;DueNP(dAWWua~{3XNRiT)!49Q+bhst=L%?8Yv~%L^hG&Sp!9|O`K*05tWBD0{W1` z(hlE&@edX6-|jx~V#yC96aMKuN{f|&zQJz|fmcAu*T(9&3(DvFHM#g!LwRTwY=yG- z$eh)W6v^UZfkY&Ll8XIg)sli-2ksNLRT>C0q+q^ap^PYx-3UrkC~^Dj*E>5mj1cbd?8+t!65BC+7uGE*>J+?!rkx+4 z$j^nIj%^W8%KDNzW^!yA-wXp6p6f1fNqd-=L-H&9u=8W^}L?(tqKy6?&6Mz~cdtj~j=R&5V%`n8C{KzH=n z!1|aG#7qo$p3jys<7pe%bw+ZB`t&b>n+y;Ma9>eM0UKYup&S03j0J_ zQrPCO@Q~&8Np1O;g5ZGY;DMNmPC(N72pAzfSCrZuezQlto^n4p-gTGlBOI9CYOSoF zD@iBtvh&wqc@QFrlltCWyJOtB)Y~iD)JI5Av|s^Mn%@(&qZ0DE(DP))Qk6e9@2dxI z8Ez>ytO{u8)zMwq;k7N3ebK?!U*3xDzK2#~tMSY6_me-bK;O5awmkHIIX8KB#ZyE# zbU%%SVzK_^Z1`{2`63_;%LH!Q`X9H@rO%+DHdFRoRDqKV+LaA5qH}>5T|8<*1EU%9 z!ONg8f|-J4tai&T6CXaWz283~jT$#2Ef3>d6fAY>7p8Hx9^ZAr&UvZ!l;`cko(j>V zA0P^T@gi1_m;mBDc1&$y45?Z}5b-e<C21O{p^A&qv1y= ztU9&5J5xX9P)M0IZeJFlcYfbo!f9fA42DDnnkoTMj!fG$0QMy?$Rsqh_<{wSSo0HOsZKeYMwMHIF*tyR1K1v z=53dJDfhL_;a6z0g%%G}&qLvd*!lepwq?Tp_ zS*d^$!hq#-+;JSvp$Q6=hgc^S_u~@lnk$!OnK0D;1u-M;Ku7-Mm5==c6tcKGSxh0E(OBl#@Ay-B@ zPP1M__qk!~w6M>u@`ShL&4Nd*-NjoDXwSz*vMpDZt?kLHtkn=bF1{-!RjG}Njw3B- zt!GisvrC7vGHgd*Hk!vFb!pqY+L$aY z6t<3C691^NBm~Di?|j5{H<}<>QXi#-m))@uNE}Nn0&w6*B&F&Z(!F*d*$r42Ad1rs{vj%nXPyRiaQqRr=P$`S0Niq-$B;0#CVQ1Wfvf=` z1*$gMB^LgSINVCe99hks4l6r0%IsK*(g5c7ik-RQ>9*tfJMP*FIx7=gFa7bs|L*$f zzu$Ov^}Ryr@Ag|o!2M@aE%e$^)|w&Sbg`eSjK2jArpgZ3@+q)1f~%GY(1531%3NHZ zr|ix$!!gtqJQ?7`6DAL0Fyk_NjJ*g*fA@5S`QmLR$>xpzH6xQzA2t8eZ_V4NoILk$ zjUbR3W$V|<%IC!jq`-XqDcEv;1a#hlhAu8M#KH_IOuhA@LZZ7a=t0z;tzx35+r_8pfZ_4_iU?cg6m? zB7ktL{>5$dU2gYqcs0#oqcSp-n?5I^qSJ!Q*U(cn1)H;Wvs!$o%n9cwzv~>Em0K}G zCP`1Q-cf8M3*+_Vy9Wb@2O-@N;K0wH+;|8Pm1g#@r=NJgRhm^5EE+$SS)>Xr5Jr_Z zSbT|Mm8J4e&XDR(jlO4SR#m}uzkKuB5$w0P)h(?WyBX0Nx0c73;y1Ift9g0axITH153|$dfd-^a&f9#S+T$Ql`Wher{o=^-_5IB4?RM^c$^<^Ocl~p| zpwclIQAXLLFbuIXbMm{;*DiK@yVS1NHvas?#C1~y?yvf87iXw0=m!Bw*Is?@Z4x0p z>@4lfK_8^(aPRrpMOB75#tFX2APf+l<5Mp&y`LH%TZzU;H{H=^{q*2v%UzD${kSPS zMEmV#*}2luFZpVy=l*MB%qJ)mx(lt?aO?z5|sS)RGnXw?hweh@ud*j-TXSpJNr#7!dvfLwDJ06Uv?e&mY6DF~27N9#X% zFUznYXYJlM6tMN)ciX+{BYIW0<*@fwIr58%>r-ViTfTOBIJk<$9aXsR2zi?e46v^5 z{$IBtmFK~EP~5Ql_sQs>>;{_X<}kQDI))x^hD+$1koeb0F!KU&^RksDy>MYZXLdt} z=%K33a0OZk(2lt#di7FH3jI4?od-=)T=eR4{;H2Ia(^6kf9(o?9#z4aPxwjE?Hfl60l-G^edFTG;A*J8w!k72*Vi$hE&&t-(@DnU6YY*ney%@%3+TTW;aQaaRfnv5pRl1G#6Q)x(7b6DwG5pv_ zmb(nv)lE$Srb`gvfPvVL65b%t=spcXhAt&Cmc&sBT}F{ z2jjT%p6nGQJN8S0Wf@-~q9S7PoHYD3x>>EAs<5zw3W#u8@}i}6#;ov@4X(R;F~z9x z!SDfSCD<_lIQD~>^IM!X0)-GRH6WIgaq0Uao$2*stdeU!k)qaE|7TLaDs(N-^3#u{i7%mf$ zffAgrfAAM*9^?dyB)Qa3j4B_^CxjJc1N(x?U5=zdYwDX1?DQ+1kKb08p*)dYp9Hsf zYW=4w-%5RSw>(#Mw?`75FhQ|Q-W8W~X_}8Q`QXB;ubf^o`%{Sma|QGY8B0%dTWf*} z@7<(=m#Pzr5G_nl1?3lM_%n&r@~Uf4@Pk;x34aOL5+Rt!CQK`BJ=h3 z{Bn>g)7=F+F>VRiZ@EZR-0&TS?XVWvvCCm`}wX2p7S zN1`z!Y43;Qm*@ZFnK^JPn?4%SOOyZoVwAD~Ya|Xvv^l}}11CBHr<)?=55fP{(f>AJ zjS35xL59cOLLQe_{nK;URXTa@{-^O5>QOcT*F~5)4){^n1eVAAEu<{VX`G%=>aoB% zc=3|PwdswQqUt=XwJ^8{;WS4gZV}o;JI(2lh{YW*9)6H-_s0^9zttd|kERKdm_uaA zmYDtSB9=XS#G8|~uaf|?>dtAe8yQOLUP6d7)quH=a#GiK@TGe=u2(p?oM3ZAA<>XDCPuVqC6LOLNh*hz4QL)=62UTJZoV&hTYw^`@ z_@7;W1Fl;_X|NrYeRh4J7pI&z93N3@yXtbeBLtTt!!Zdx2rUzf6R|O&#mY_0KZ?t) z;)xzM``iwsh#E61`A#5xce**G92X@LzEw@06A}nFDHEiqa}OWjCq##?J+-7g+U{;8 zVakM=m`R>!roXwrxhmS-)1Gq~ri1VsjAB7%lR= zRLE5HCc_Gn{NlVScE$EnxidPADBG*8`QOUdpHuhjdSE-C4`5xJBl(+j{$*GULQQjQ zPF;69hFL_!LV=1a%~qOH3Q<_C;=6#A8xF)m)nyVWlZzv9`0m1`38#|z8BPibM+@c~ zHRR`(w8VuqxU{RR*fdB1Ci|r9yza3_S5lL<#I~sDcrk3^MR|+gmTapbzwcG3+4Nl9 z+3c{zSe-Z2Hg|lfDFkm5M-=^VWlsK2U2PO_^TUR#a>9SX{EIbh8FS}bszec-cv!fQ zOvxy;tf5&O8$?AuKgcdk_U?z5LiE}x8bm!kCzHeH&I(1$lJKuU+pbvKkpZmYlOnHT zQw-}DUUB43epBzN+TFhQuhHNbQif=wyK}&JCVGVi8-H4rahaelilsPO@nyq*ZKyum zo^zl2hwE?K-~TXv&Q}%b9e+-@sZ{3wF!#T9BkNeK**~?c;$ap7t`wq|&jxn&*DX`o zZEk#r<(ZiS_u7@vW*HK}KaVak@UrkZP9!PDjyqpVE?pXN4^)J5fe-?grm6E)J zKnzVnzW%cbe;-5E^H@82T3uBGMWd+FAHPnGG@!t@BA%)%v;&F8nxVgZ*Mdz}-n{Hf z)z7o}rHNa057D`a_oHbaod(4iBmpIs0Ld|6UXEx%`~1=(QJaph2I!H>m~ zQx6HnDtkiF9~%tuRHCJAjQD*oF)6&3f5CAGE7v03LT|{FcH8u2b|q|{&#?jvrOKYK z*_;pM%Lhq@eJDdwssYv3=;BoNi&v)~1AmMD$k+d>p8jjnv+I@(7hgnq4Xy2e_qyLX zgFbez?hB{EJFkfSa{J+G6pkrp?ts5s(CgMyWM~~rA5=6l!ay2F(p@XJ)^!ixaR#To zBITvCE7YFDAzc6m8m#&?&1ltZkk`TUG54TA?=WBl7V%d!@p??+JwHTnf^S<9l86l6 zp*@<-Qu>eY+mw*-vo{zT4=Me9&HdhjAE8QIF;A4qO*yQLP(WoGs&h?)dAY$#OAS89 zVx5P(4c|zfg}5#5joz6t6%|vBS|f&j&@QBLU&j+>Ik5&H?O1~=u@I0#bJe$ZmJNr8 zHGdq>eBcDl1tMhHR)BH@ zKMtQ#WbyqaCVZ(SQ?Bta5ze+A{N-zLs)wPqM;=fnXX(4CC$@&f?C_Je zb?Aj}#;V_!KOJ9x`UBhluAcrh>$wicsZEuFQ7yWkx;BmUVVR#Tm|ccPO+txq z(RySNS1&v~6a^t5!r+g5{a^L;PXhAyOlxmF?w?(q6cyb;hH?s6j%i(=Z*25!NjQys zPcv(ZCPw<_qg>YPSyZ#fXvu};kt5~Gtxw0PkP~#}*tj=w0?$JFVER|HkozLY7w8p0 zMjw)8t3|PU0V&!b08CT9F1#Bjs!E(X$t?q!B|TfO*G9No1Zu7gXM?zzn(2~j4oisW zi&qi!mcgJRHfDSzN^TyXp3{bog-XtT^s&L7nXVB_Ia<7b%k^jKvumDK_32%=sWkRK z;Xj8&>Fhoo9e;aN_4*#7jx;8fsP8`-K22CE)n53C?S+MU%QhA{Hx?7aeDF#&4^Qhf zH5+X}?ft#Z6^;+EkMJqHF_^-g2yE-^y!5mRIF!*sTKNfxPr?_5`VV@(F(t3R}=YMN0^~WIJU8B zLo*)-CT9>`yY?LW0L%?d$M`oRah}Zm92*@#wJSS>3Q_6h9<-a}VQhnVh{Cl-;GK28 zj`z+0IZca#9;wx?akU+y8j8GZHd6AtbT&>AV^LA}etMp$^%g-_NfCGya2SRgE>Dak z4O*k^%IX10_bG9go#($cxB{0rY zXtxhy&mZ{$@a}Zl*LK>jh^GKWAw}%gPTJZ!g5=AMQV}r?uZZxY)W^?vXZBO%B2s$b z#}7o;-oG=29;)7H=ZQGm+h6~5C~oVK=R5Zk>@l=FbZGs=joiS1vx3ZVe4b~%nbgRc z6w1ycKWf^{PMhseB)_!@JtVtkJvMLQHto8ws<~(Wa_`FPdEvA)>rsX6ex@Lbyh1!i zVgW0i3Cq5@7A8e9zg_{mEph;E9o@&mBU96&u4%PI=aA*z8u^x?;k9v$*!!HX{#QY& z-oNt|iHc26#ApY|w^MaHg*e%0AV!OEh<8je7RAIlN$|R>6 zViZIzIkT*3*5<|+3Z-k5ZGZ$y78yj!mgkr|e~+Qsrm}^PqAsIm)FC~VC<%h3H~DT0 z$x~0uDG3nIy?*cT#)TT;E!EoI?nCy;UU`FLg8k*gb@n$cz4&lOJk=Q{bK_K#F?{r) zf6N#1@bUbLvAS^%pouC%bW{KnVKY#mu3BE=_d6#y&K6iR;XggQ9@(i7c^Uf^i4@ta z7aHeA@{kEuk^ClG?F9uzS!~|Vj?ZEigf>+%t9>Zz^wjavc~YX z>`^fl{#Ow8wM??E>=Ka66>;C}iwh|`* literal 0 HcmV?d00001 -- GitLab