diff --git a/README_cn.md b/README_cn.md
index 0865e6745aff5d3304cc9c0aa3428431306ec759..ebeab2ec12da265fb1ab48489fcff5e559cc1513 100644
--- a/README_cn.md
+++ b/README_cn.md
@@ -18,6 +18,7 @@ PaddleDetection模块化地实现了多种主流目标检测算法,提供了
+
### 产品动态
diff --git a/configs/keypoint/README.md b/configs/keypoint/README.md
index 4b193986d9cd5619ed7e128d229a7802ba3c842a..b4285bc9bc6748fcc2adf2fc96c85154083e8a15 100644
--- a/configs/keypoint/README.md
+++ b/configs/keypoint/README.md
@@ -76,5 +76,5 @@ python deploy/python/keypoint_infer.py --model_dir=output_inference/higherhrnet_
python deploy/python/keypoint_infer.py --model_dir=output_inference/hrnet_w32_384x288/ --image_file=./demo/hrnet_demo.jpg --use_gpu=True --threshold=0.5
#keypoint top-down模型 + detector 检测联合部署推理(联合推理只支持top-down方式)
-python deploy/python/keypoint_det_unite_infer.py --det_model_dir=output_inference/ppyolo_r50vd_dcn_2x_coco/ --keypoint_model_dir=output_inference/hrnet_w32_384x288/ --video_file=../video/xxx.mp4
+python deploy/python/keypoint_det_unite_infer.py --det_model_dir=output_inference/ppyolo_r50vd_dcn_2x_coco/ --keypoint_model_dir=output_inference/hrnet_w32_384x288/ --video_file=../video/xxx.mp4 --use_gpu=True
```
diff --git a/deploy/python/infer.py b/deploy/python/infer.py
index ee44f61b14e6365315a48273a0f0e5c22ce95dba..47a18d8005e2f80edda85e23f5e11de5f0acc682 100644
--- a/deploy/python/infer.py
+++ b/deploy/python/infer.py
@@ -191,7 +191,7 @@ class DetectorSOLOv2(Detector):
cpu_threads=1,
enable_mkldnn=False):
self.pred_config = pred_config
- self.predictor, self.config = load_predictor(
+ self.predictor, self.config = load_predictor(
model_dir,
run_mode=run_mode,
min_subgraph_size=self.pred_config.min_subgraph_size,
@@ -541,8 +541,8 @@ def main():
detector.det_times.info(average=True)
else:
mems = {
- 'cpu_rss': detector.cpu_mem / len(img_list),
- 'gpu_rss': detector.gpu_mem / len(img_list),
+ 'cpu_rss_mb': detector.cpu_mem / len(img_list),
+ 'gpu_rss_mb': detector.gpu_mem / len(img_list),
'gpu_util': detector.gpu_util * 100 / len(img_list)
}
@@ -550,16 +550,16 @@ def main():
model_dir = FLAGS.model_dir
mode = FLAGS.run_mode
model_info = {
- 'model_name': model_dir.strip('/').split('/')[-1],
- 'precision': mode.split('_')[-1]
+ 'model_name': model_dir.strip('/').split('/')[-1],
+ 'precision': mode.split('_')[-1]
}
data_info = {
'batch_size': 1,
'shape': "dynamic_shape",
'data_num': perf_info['img_num']
}
- det_log = PaddleInferBenchmark(
- detector.config, model_info, data_info, perf_info, mems)
+ det_log = PaddleInferBenchmark(detector.config, model_info,
+ data_info, perf_info, mems)
det_log('Det')
diff --git a/deploy/python/keypoint_det_unite_infer.py b/deploy/python/keypoint_det_unite_infer.py
index b27abe32af690c6db5176f142fc1416f7ecdd2d5..dd80b290ff89377562dfb7149ed05a9257fd079b 100644
--- a/deploy/python/keypoint_det_unite_infer.py
+++ b/deploy/python/keypoint_det_unite_infer.py
@@ -13,7 +13,6 @@
# limitations under the License.
import os
-
from PIL import Image
import cv2
import numpy as np
@@ -52,7 +51,7 @@ def get_person_from_rect(images, results):
org_rects = []
for rect in valid_rects:
rect_image, new_rect, org_rect = expand_crop(images, rect)
- if rect_image is None:
+ if rect_image is None or rect_image.size == 0:
continue
image_buff.append([rect_image, new_rect])
org_rects.append(org_rect)
@@ -113,13 +112,13 @@ def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):
os.makedirs(FLAGS.output_dir)
out_path = os.path.join(FLAGS.output_dir, video_name)
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
- index = 1
+ index = 0
while (1):
ret, frame = capture.read()
if not ret:
break
- print('detect frame:%d' % (index))
index += 1
+ print('detect frame:%d' % (index))
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = detector.predict(frame2, FLAGS.det_threshold)
@@ -136,7 +135,7 @@ def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):
keypoint_res = {}
keypoint_res['keypoint'] = [
np.vstack(keypoint_vector), np.vstack(score_vector)
- ]
+ ] if len(keypoint_vector) > 0 else [[], []]
keypoint_res['bbox'] = rect_vecotr
im = draw_pose(
frame,
@@ -189,8 +188,6 @@ def main():
# predict from image
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
topdown_unite_predict(detector, topdown_keypoint_detector, img_list)
- detector.det_times.info(average=True)
- topdown_keypoint_detector.det_times.info(average=True)
if __name__ == '__main__':
diff --git a/deploy/python/keypoint_infer.py b/deploy/python/keypoint_infer.py
index 61a9fb8aea97a8dd33d8912bc19576e26806cf08..49d8b344118fcb5da15eff9b97bae8980edba586 100644
--- a/deploy/python/keypoint_infer.py
+++ b/deploy/python/keypoint_infer.py
@@ -28,7 +28,8 @@ from keypoint_postprocess import HrHRNetPostProcess, HRNetPostProcess
from keypoint_visualize import draw_pose
from paddle.inference import Config
from paddle.inference import create_predictor
-from utils import argsparser, Timer, get_current_memory_mb, LoggerHelper
+from utils import argsparser, Timer, get_current_memory_mb
+from benchmark_utils import PaddleInferBenchmark
from infer import get_test_images, print_arguments
# Global dictionary
@@ -66,7 +67,7 @@ class KeyPoint_Detector(object):
cpu_threads=1,
enable_mkldnn=False):
self.pred_config = pred_config
- self.predictor = load_predictor(
+ self.predictor, self.config = load_predictor(
model_dir,
run_mode=run_mode,
min_subgraph_size=self.pred_config.min_subgraph_size,
@@ -129,7 +130,7 @@ class KeyPoint_Detector(object):
MaskRCNN's results include 'masks': np.ndarray:
shape: [N, im_h, im_w]
'''
- self.det_times.preprocess_time.start()
+ self.det_times.preprocess_time_s.start()
inputs = self.preprocess(image)
np_boxes, np_masks = None, None
input_names = self.predictor.get_input_names()
@@ -137,7 +138,7 @@ class KeyPoint_Detector(object):
for i in range(len(input_names)):
input_tensor = self.predictor.get_input_handle(input_names[i])
input_tensor.copy_from_cpu(inputs[input_names[i]])
- self.det_times.preprocess_time.end()
+ self.det_times.preprocess_time_s.end()
for i in range(warmup):
self.predictor.run()
output_names = self.predictor.get_output_names()
@@ -152,7 +153,7 @@ class KeyPoint_Detector(object):
inds_k.copy_to_cpu()
]
- self.det_times.inference_time.start()
+ self.det_times.inference_time_s.start()
for i in range(repeats):
self.predictor.run()
output_names = self.predictor.get_output_names()
@@ -166,12 +167,12 @@ class KeyPoint_Detector(object):
masks_tensor.copy_to_cpu(), heat_k.copy_to_cpu(),
inds_k.copy_to_cpu()
]
- self.det_times.inference_time.end(repeats=repeats)
+ self.det_times.inference_time_s.end(repeats=repeats)
- self.det_times.postprocess_time.start()
+ self.det_times.postprocess_time_s.start()
results = self.postprocess(
np_boxes, np_masks, inputs, threshold=threshold)
- self.det_times.postprocess_time.end()
+ self.det_times.postprocess_time_s.end()
self.det_times.img_num += 1
return results
@@ -318,7 +319,7 @@ def load_predictor(model_dir,
# disable feed, fetch OP, needed by zero_copy_run
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
- return predictor
+ return predictor, config
def predict_image(detector, image_list):
@@ -347,7 +348,8 @@ def predict_video(detector, camera_id):
video_name = 'output.mp4'
else:
capture = cv2.VideoCapture(FLAGS.video_file)
- video_name = os.path.basename(os.path.split(FLAGS.video_file)[-1])
+ video_name = os.path.splitext(os.path.basename(FLAGS.video_file))[
+ 0] + '.mp4'
fps = 30
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
@@ -403,13 +405,25 @@ def main():
detector.det_times.info(average=True)
else:
mems = {
- 'cpu_rss': detector.cpu_mem / len(img_list),
- 'gpu_rss': detector.gpu_mem / len(img_list),
+ 'cpu_rss_mb': detector.cpu_mem / len(img_list),
+ 'gpu_rss_mb': detector.gpu_mem / len(img_list),
'gpu_util': detector.gpu_util * 100 / len(img_list)
}
- det_logger = LoggerHelper(
- FLAGS, detector.det_times.report(average=True), mems)
- det_logger.report()
+ perf_info = detector.det_times.report(average=True)
+ model_dir = FLAGS.model_dir
+ mode = FLAGS.run_mode
+ model_info = {
+ 'model_name': model_dir.strip('/').split('/')[-1],
+ 'precision': mode.split('_')[-1]
+ }
+ data_info = {
+ 'batch_size': 1,
+ 'shape': "dynamic_shape",
+ 'data_num': perf_info['img_num']
+ }
+ det_log = PaddleInferBenchmark(detector.config, model_info,
+ data_info, perf_info, mems)
+ det_log('KeyPoint')
if __name__ == '__main__':
diff --git a/deploy/python/keypoint_visualize.py b/deploy/python/keypoint_visualize.py
index 94793810a505987a3a754e24f055b994dbdc2135..b379bd7029817f6e6daef73492bb80039118190f 100644
--- a/deploy/python/keypoint_visualize.py
+++ b/deploy/python/keypoint_visualize.py
@@ -19,11 +19,6 @@ import numpy as np
import math
-def map_coco_to_personlab(keypoints):
- permute = [0, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]
- return keypoints[:, permute, :]
-
-
def draw_pose(imgfile,
results,
visual_thread=0.6,
@@ -39,9 +34,9 @@ def draw_pose(imgfile,
'for example: `pip install matplotlib`.')
raise e
- EDGES = [(0, 14), (0, 13), (0, 4), (0, 1), (14, 16), (13, 15), (4, 10),
- (1, 7), (10, 11), (7, 8), (11, 12), (8, 9), (4, 5), (1, 2), (5, 6),
- (2, 3)]
+ EDGES = [(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 8),
+ (7, 9), (8, 10), (5, 11), (6, 12), (11, 13), (12, 14), (13, 15),
+ (14, 16), (11, 12)]
NUM_EDGES = len(EDGES)
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
@@ -52,25 +47,28 @@ def draw_pose(imgfile,
img = cv2.imread(imgfile) if type(imgfile) == str else imgfile
skeletons, scores = results['keypoint']
+ color_set = results['colors'] if 'colors' in results else None
if 'bbox' in results:
bboxs = results['bbox']
- for idx, rect in enumerate(bboxs):
+ for j, rect in enumerate(bboxs):
xmin, ymin, xmax, ymax = rect
- cv2.rectangle(img, (xmin, ymin), (xmax, ymax), colors[0], 1)
+ color = colors[0] if color_set is None else colors[color_set[j] %
+ len(colors)]
+ cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 1)
canvas = img.copy()
for i in range(17):
- rgba = np.array(cmap(1 - i / 17. - 1. / 34))
- rgba[0:3] *= 255
for j in range(len(skeletons)):
if skeletons[j][i, 2] < visual_thread:
continue
+ color = colors[i] if color_set is None else colors[color_set[j] %
+ len(colors)]
cv2.circle(
canvas,
tuple(skeletons[j][i, 0:2].astype('int32')),
2,
- colors[i],
+ color,
thickness=-1)
to_plot = cv2.addWeighted(img, 0.3, canvas, 0.7, 0)
@@ -78,7 +76,6 @@ def draw_pose(imgfile,
stickwidth = 2
- skeletons = map_coco_to_personlab(skeletons)
for i in range(NUM_EDGES):
for j in range(len(skeletons)):
edge = EDGES[i]
@@ -96,7 +93,9 @@ def draw_pose(imgfile,
polygon = cv2.ellipse2Poly((int(mY), int(mX)),
(int(length / 2), stickwidth),
int(angle), 0, 360, 1)
- cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
+ color = colors[i] if color_set is None else colors[color_set[j] %
+ len(colors)]
+ cv2.fillConvexPoly(cur_canvas, polygon, color)
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
if returnimg:
return canvas
diff --git a/docs/images/mot_pose_demo_640x360.gif b/docs/images/mot_pose_demo_640x360.gif
new file mode 100644
index 0000000000000000000000000000000000000000..a4b6fe0178c891c3840e2b647e26d5b9010da741
Binary files /dev/null and b/docs/images/mot_pose_demo_640x360.gif differ