diff --git a/docs/en_US/tutorials/motion_driving.md b/docs/en_US/tutorials/motion_driving.md index c3ae687612f6a12e03823b08509f6312df490893..834a94cd8dac5ee58c22341f71d6de401eb58f37 100644 --- a/docs/en_US/tutorials/motion_driving.md +++ b/docs/en_US/tutorials/motion_driving.md @@ -17,14 +17,16 @@ cd applications/ python -u tools/first-order-demo.py \ --driving_video ../docs/imgs/fom_dv.mp4 \ --source_image ../docs/imgs/fom_source_image.png \ - --relative --adapt_scale + --ratio 0.5 \ + --relative --adapt_scalae ``` **params:** - driving_video: driving video, the motion of the driving video is to be migrated. -- source_image: source_image, the image will be animated according to the motion of the driving video. +- source_image: source_image, support single people and multipeople in the image, the image will be animated according to the motion of the driving video. - relative: indicate whether the relative or absolute coordinates of the key points in the video are used in the program. It is recommended to use relative coordinates. If absolute coordinates are used, the characters will be distorted after animation. - adapt_scale: adapt movement scale based on convex hull of keypoints. +- ratio: The pasted face percentage of generated image, this parameter should be adjusted in the case of multipeople image in which the adjacent faces are close ## Animation results diff --git a/docs/zh_CN/tutorials/motion_driving.md b/docs/zh_CN/tutorials/motion_driving.md index 7439160b747038503af07d7fd62ca02c37dde033..ed0e156b36376347b83e90a2e18658c528cbb7d0 100644 --- a/docs/zh_CN/tutorials/motion_driving.md +++ b/docs/zh_CN/tutorials/motion_driving.md @@ -26,9 +26,10 @@ python -u tools/first-order-demo.py \ **参数说明:** - driving_video: 驱动视频,视频中人物的表情动作作为待迁移的对象 -- source_image: 原始图片,视频中人物的表情动作将迁移到该原始图片中的人物上 +- source_image: 原始图片,支持单人图片和多人图片,视频中人物的表情动作将迁移到该原始图片中的人物上 - relative: 指示程序中使用视频和图片中人物关键点的相对坐标还是绝对坐标,建议使用相对坐标,若使用绝对坐标,会导致迁移后人物扭曲变形 - adapt_scale: 根据关键点凸包自适应运动尺度 +- ratio: 贴回驱动生成的人脸区域占原图的比例, 用户需要根据生成的效果调整该参数,尤其对于多人脸距离比较近的情况下需要调整改参数 ## 生成结果展示 diff --git a/ppgan/apps/first_order_predictor.py b/ppgan/apps/first_order_predictor.py index 794300272ee88de3fa66375d377254a2b05554e3..45c458111fa3827630aa64afe463adbff2f6417b 100644 --- a/ppgan/apps/first_order_predictor.py +++ b/ppgan/apps/first_order_predictor.py @@ -270,9 +270,9 @@ class FirstOrderPredictor(BasePredictor): cx = rect[0] + int(bw / 2) margin = max(bh, bw) y1 = max(0, cy - margin) - x1 = max(0, cx - margin) + x1 = max(0, cx - int(0.8 * margin)) y2 = min(h, cy + margin) - x2 = min(w, cx + margin) + x2 = min(w, cx + int(0.8 * margin)) results.append([x1, y1, x2, y2]) boxes = np.array(results) return boxes diff --git a/ppgan/faceutils/face_detection/api.py b/ppgan/faceutils/face_detection/api.py index 0d503ee90d2ffbafb98d84cd1c3d85cbe7ecd870..7511880549051ce43f6e91a322c0e3d4d32426fa 100644 --- a/ppgan/faceutils/face_detection/api.py +++ b/ppgan/faceutils/face_detection/api.py @@ -84,3 +84,19 @@ class FaceAlignment: results.append((x1, y1, x2, y2)) return results + + def get_detections_for_image(self, images): + images = images[..., ::-1] + detected_faces = self.face_detector.detect_from_batch(images.copy()) + results = [] + + for i, d in enumerate(detected_faces[0]): + if len(d) == 0: + results.append(None) + continue + d = np.clip(d, 0, None) + + x1, y1, x2, y2 = map(int, d[:-1]) + results.append((x1, y1, x2, y2)) + + return results