diff --git a/configs/face_detection/README.md b/configs/face_detection/README.md index 3f957def6b8919c0b189ae7a2ef849ddec14dfef..a73cf78113f4650bbbef45d9e98a7574665eac93 100644 --- a/configs/face_detection/README.md +++ b/configs/face_detection/README.md @@ -87,8 +87,8 @@ optimized network structure. | BlazeFace | Original | 640 | 8 | 32w | **0.915** | **0.892** | **0.797** | [model](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_original.tar) | | BlazeFace | Lite | 640 | 8 | 32w | 0.909 | 0.885 | 0.781 | [model](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_lite.tar) | | BlazeFace | NAS | 640 | 8 | 32w | 0.837 | 0.807 | 0.658 | [model](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_nas.tar) | -| FaceBoxes | Original | 640 | 8 | 32w | 0.875 | 0.848 | 0.568 | [model](https://paddlemodels.bj.bcebos.com/object_detection/faceboxes_original.tar) | -| FaceBoxes | Lite | 640 | 8 | 32w | 0.898 | 0.872 | 0.752 | [model](https://paddlemodels.bj.bcebos.com/object_detection/faceboxes_lite.tar) | +| FaceBoxes | Original | 640 | 8 | 32w | 0.878 | 0.851 | 0.576 | [model](https://paddlemodels.bj.bcebos.com/object_detection/faceboxes_original.tar) | +| FaceBoxes | Lite | 640 | 8 | 32w | 0.901 | 0.875 | 0.760 | [model](https://paddlemodels.bj.bcebos.com/object_detection/faceboxes_lite.tar) | **NOTES:** - Get mAP in `Easy/Medium/Hard Set` by multi-scale evaluation in `tools/face_eval.py`. @@ -103,8 +103,8 @@ configs file and set `lite_edition: true`. | BlazeFace | Original | 640 | **0.992** | **0.762** | | BlazeFace | Lite | 640 | 0.990 | 0.756 | | BlazeFace | NAS | 640 | 0.981 | 0.741 | -| FaceBoxes | Original | 640 | 0.985 | 0.731 | -| FaceBoxes | Lite | 640 | 0.987 | 0.741 | +| FaceBoxes | Original | 640 | 0.987 | 0.736 | +| FaceBoxes | Lite | 640 | 0.988 | 0.751 | **NOTES:** - Get mAP by multi-scale evaluation on the FDDB dataset. diff --git a/ppdet/modeling/architectures/blazeface.py b/ppdet/modeling/architectures/blazeface.py index 0a02e70ce480b3eac8636f28615a9e3d68209b07..9b292b6a9c79f840aaa17ea896a9ae6eda58b522 100644 --- a/ppdet/modeling/architectures/blazeface.py +++ b/ppdet/modeling/architectures/blazeface.py @@ -131,7 +131,8 @@ class BlazeFace(object): fixed_sizes=min_size, fixed_ratios=[1.], clip=False, - offset=0.5) + offset=0.5, + steps=[self.steps[i]] * 2) else: box, var = fluid.layers.prior_box( input, diff --git a/ppdet/modeling/architectures/faceboxes.py b/ppdet/modeling/architectures/faceboxes.py index ba9d3e09b1dbf418f0b6f036493d1af989a8ce67..0e7c0dec430800ddaa4e6ea28e0cdc502566c8be 100644 --- a/ppdet/modeling/architectures/faceboxes.py +++ b/ppdet/modeling/architectures/faceboxes.py @@ -54,7 +54,8 @@ class FaceBoxes(object): output_decoder=SSDOutputDecoder().__dict__, densities=[[4, 2, 1], [1], [1]], fixed_sizes=[[32., 64., 128.], [256.], [512.]], - num_classes=2): + num_classes=2, + steps=[8., 16., 32.]): super(FaceBoxes, self).__init__() self.backbone = backbone self.num_classes = num_classes @@ -63,6 +64,7 @@ class FaceBoxes(object): self.output_decoder = SSDOutputDecoder(**output_decoder) self.densities = densities self.fixed_sizes = fixed_sizes + self.steps = steps def build(self, feed_vars, mode='train'): im = feed_vars['image'] @@ -114,7 +116,8 @@ class FaceBoxes(object): fixed_sizes=fixed_sizes, fixed_ratios=[1.], clip=False, - offset=0.5) + offset=0.5, + steps=[self.steps[i]] * 2) num_boxes = box.shape[2]