提交 82126f1c 编写于 作者: Eric.Lee2021's avatar Eric.Lee2021 🚴🏻

update

上级 03851a28
......@@ -36,7 +36,7 @@ from manopth import manolayer
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' Project Hand Pose 3D Inference')
parser.add_argument('--model_path', type=str, default = './if_package/e3d_handposex-resnet_50-size-128-loss-wing_loss-20210619.pth',
parser.add_argument('--model_path', type=str, default = './if_package/e3d_handposex-resnet_50-size-128-loss-wing_loss-20210620.pth',
help = 'model_path') # e3d handpose 模型路径
parser.add_argument('--detect_model_path', type=str, default = './if_package/hand_detect_416-20210606.pt',
help = 'model_path') # detect 模型路径
......@@ -159,6 +159,10 @@ if __name__ == "__main__":
img_yolo_x = img_o.copy()
hand_bbox =hand_detect_model.predict(img_yolo_x,vis = False) # 检测手,获取手的边界框
if len(hand_bbox) == 1:
#----------------------------------
finger_index = None # 食指UI的二维坐标
finger_thumb = None # 大拇UI指的二维坐标
#----------------------------------
x_min,y_min,x_max,y_max,_ = hand_bbox[0]
w_ = max(abs(x_max-x_min),abs(y_max-y_min))
......@@ -180,6 +184,8 @@ if __name__ == "__main__":
img_show = img.copy() # 用于显示使用
pts_2d_ = handpose_2d_model.predict(img.copy()) # handpose_2d predict
pts_2d_hand = {}
kps_min_x,kps_min_y,kps_max_x,kps_max_y = 65535.,65535.,0.,0.
for ptk in range(int(pts_2d_.shape[0]/2)):
xh = pts_2d_[ptk*2+0]*float(img.shape[1])
......@@ -188,9 +194,25 @@ if __name__ == "__main__":
"x":xh,
"y":yh,
}
kps_min_x = (xh+x1) if (xh+x1)<kps_min_x else kps_min_x
kps_min_y = (yh+y1) if (yh+y1)<kps_min_y else kps_min_y
kps_max_x = (xh+x1) if (xh+x1)>kps_max_x else kps_max_x
kps_max_y = (yh+y1) if (yh+y1)>kps_max_y else kps_max_y
if ptk == 3 or ptk == 4:
if finger_thumb is None:
finger_thumb = (int(xh+x1),int(yh+y1))
else:
finger_thumb = (int((xh+x1+finger_thumb[0])/2),int((yh+y1+finger_thumb[1])/2))
if ptk == 7 or ptk == 8:
if finger_index is None:
finger_index = (int(xh+x1),int(yh+y1))
else:
finger_index = (int((xh+x1+finger_index[0])/2),int((yh+y1+finger_index[1])/2))
# cv2.circle(img_show, finger_index, 9, (25,160,255),-1)
if ops.vis:
cv2.circle(img_show, (int(xh),int(yh)), 4, (255,50,60),-1)
cv2.circle(img_show, (int(xh),int(yh)), 3, (25,160,255),-1)
hand2d_kps_bbox = (int(kps_min_x),int(kps_min_y),int(kps_max_x),int(kps_max_y)) # 手关键点边界框
if ops.vis:
draw_bd_handpose_c(img_show,pts_2d_hand,0,0,2)
cv2.namedWindow("handpose_2d",0)
......@@ -278,7 +300,7 @@ if __name__ == "__main__":
pose_R = torch.from_numpy(pose_R).float()
# reconstruction
hand_verts, j3d_recon = mano(pose_R, opt_tensor_shape.float())
hand_verts[:,:,:] = hand_verts[:,:,:]*(0.573)
hand_verts[:,:,:] = hand_verts[:,:,:]*(0.503)
# print(j3d_recon.size())
mesh.triangles = open3d.utility.Vector3iVector(mano.th_faces)
......@@ -311,13 +333,19 @@ if __name__ == "__main__":
# print(j3d_.shape,j3d_)
test_pcd.points = open3d.utility.Vector3dVector(j3d_[0]) # 定义点云坐标位置
else:
# test_pcd.points = open3d.utility.Vector3dVector(hand_verts)
# 778*3
print("hand_verts shape : {}".format(hand_verts.shape))
# a = np.concatenate((
# hand_verts[38:40,:],
# hand_verts[40:100,:]
# ),axis=0)
test_pcd.points = open3d.utility.Vector3dVector(hand_verts)
pre_joints[:,1] *=-1.
pre_joints = pre_joints*70
pre_joints[:,1] -= 40
pre_joints[:,0] -= 0
# print("pre_joints",pre_joints.shape)
test_pcd.points = open3d.utility.Vector3dVector(pre_joints)
# test_pcd.points = open3d.utility.Vector3dVector(pre_joints)
# test_pcd.points = open3d.utility.Vector3dVector(pre_joints[1,:].reshape(1,3))
# rgb = np.asarray([250,0,250])
# rgb_t = np.transpose(rgb)
......@@ -360,7 +388,13 @@ if __name__ == "__main__":
#----------
cv2.namedWindow("image_3d",0)
cv2.imshow("image_3d",image_3d)
scale_ = ((x_max-x_min)/(xx2-xx1) + (y_max-y_min)/(yy2-yy1))/2.*1.01
# cv2.rectangle(img_yolo_x, (hand2d_kps_bbox[0],hand2d_kps_bbox[1]),
# (hand2d_kps_bbox[2],hand2d_kps_bbox[3]), (0,165,255), 3) # 绘制2d 手关键点
# scale_ = ((x_max-x_min)/(xx2-xx1) + (y_max-y_min)/(yy2-yy1))/2.*1.01
scale_ = ((hand2d_kps_bbox[2]-hand2d_kps_bbox[0])/(xx2-xx1)
+ (hand2d_kps_bbox[3]-hand2d_kps_bbox[1])/(yy2-yy1))/2.*1.2
w_3d_ = (xx2-xx1)*scale_
h_3d_ = (yy2-yy1)*scale_
x_mid_3d = (xx1+xx2)/2.
......@@ -377,6 +411,7 @@ if __name__ == "__main__":
crop_mask = np.expand_dims(crop_mask, 2)
try:
#------------
img_ff = img_yolo_x[int(y_mid - h_r ):int(y_mid + h_r ),int(x_mid - w_r ):int(x_mid + w_r ),:]*(1.-crop_mask) + crop_*crop_mask
img_yolo_x[int(y_mid - h_r ):int(y_mid + h_r ),int(x_mid - w_r ):int(x_mid + w_r ),:] = img_ff.astype(np.uint8)
except:
......@@ -387,7 +422,7 @@ if __name__ == "__main__":
depth_z = (model_hand_h/real_hand_h + model_hand_w/real_hand_w)/2.# 相对深度 z
#
cv2.putText(img_yolo_x, " Relative Depth_Z :{:.3f} ".format(depth_z), (4,42),cv2.FONT_HERSHEY_DUPLEX, 1.1, (55, 0, 220),7)
cv2.putText(img_yolo_x, " Relative Depth_Z :{:.3f} ".format(depth_z), (4,42),cv2.FONT_HERSHEY_DUPLEX, 1.1, (25, 180, 250),2)
cv2.putText(img_yolo_x, " Relative Depth_Z :{:.3f} ".format(depth_z), (4,42),cv2.FONT_HERSHEY_DUPLEX, 1.1, (25, 180, 250),1)
cv2.namedWindow("img_yolo_x",0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册