diff --git a/configs/face_detection/README_en.md b/configs/face_detection/README_en.md index bf798b3c0b01654239df07338c6dc7174958024f..f7295f32da996cb2502ae0094b6a8dda540f3920 100644 --- a/configs/face_detection/README_en.md +++ b/configs/face_detection/README_en.md @@ -110,6 +110,59 @@ legend_name = 'Paddle-BlazeFace'; matlab -nodesktop -nosplash -nojvm -r "run wider_eval.m;quit;" ``` +### Use by Python Code +In order to support development, here is an example of using the Paddle Detection whl package to make predictions through Python code. +```python +import cv2 +import paddle +import numpy as np +from ppdet.core.workspace import load_config +from ppdet.engine import Trainer +from ppdet.metrics import get_infer_results +from ppdet.data.transform.operators import NormalizeImage, Permute + + +if __name__ == '__main__': + # prepare for the parameters + config_path = 'PaddleDetection/configs/face_detection/blazeface_1000e.yml' + cfg = load_config(config_path) + weight_path = 'PaddleDetection/output/blazeface_1000e.pdparams' + infer_img_path = 'PaddleDetection/demo/hrnet_demo.jpg' + cfg.weights = weight_path + bbox_thre = 0.8 + paddle.set_device('gpu') + # create the class object + trainer = Trainer(cfg, mode='test') + trainer.load_weights(cfg.weights) + trainer.model.eval() + normaler = NormalizeImage(mean=[123, 117, 104], std=[127.502231, 127.502231, 127.502231], is_scale=False) + permuter = Permute() + # read the image file + im = cv2.imread(infer_img_path) + im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) + # prepare for the data dict + data_dict = {'image': im} + data_dict = normaler(data_dict) + data_dict = permuter(data_dict) + h, w, c = im.shape + data_dict['im_id'] = paddle.Tensor(np.array([[0]])) + data_dict['im_shape'] = paddle.Tensor(np.array([[h, w]], dtype=np.float32)) + data_dict['scale_factor'] = paddle.Tensor(np.array([[1., 1.]], dtype=np.float32)) + data_dict['image'] = paddle.Tensor(data_dict['image'].reshape((1, c, h, w))) + data_dict['curr_iter'] = paddle.Tensor(np.array([0])) + # do the prediction + outs = trainer.model(data_dict) + # to do the postprocess to get the final bbox info + for key in ['im_shape', 'scale_factor', 'im_id']: + outs[key] = data_dict[key] + for key, value in outs.items(): + outs[key] = value.numpy() + clsid2catid, catid2name = {0: 'face'}, {0: 0} + batch_res = get_infer_results(outs, clsid2catid) + bbox = [sub_dict for sub_dict in batch_res['bbox'] if sub_dict['score'] > bbox_thre] + print(bbox) +``` + ## Citations