diff --git a/docs/featured_model/FACE_DETECTION.md b/docs/featured_model/FACE_DETECTION.md index 4a141857c87f83cc9abe69f4f50820f07622e333..19db65d3f0469433e37ceda9a8ab3151fa363985 100644 --- a/docs/featured_model/FACE_DETECTION.md +++ b/docs/featured_model/FACE_DETECTION.md @@ -254,6 +254,12 @@ wget https://dataset.bj.bcebos.com/wider_face/wider_face_train_bbx_lmk_gt.txt (2)使用`configs/face_detection/blazeface_keypoint.yml`配置文件进行训练与评估,使用方法与上一节内容一致。 +### 模型评估 + +| 网络结构 | 输入尺寸 | 图片个数/GPU | 学习率策略 | Easy Set | Medium Set | Hard Set | 下载 | 配置文件 | +|:------------:|:----:|:-------:|:-------:|:---------:|:----------:|:---------:|:--------:|:--------:| +| BlazeFace Keypoint | 640 | 16 | 16w | 0.852 | 0.816 | 0.662 | [模型](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_keypoint.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/master/configs/face_detection/blazeface_keypoint.yml) | + ![](../images/12_Group_Group_12_Group_Group_12_84.jpg) ## 算法细节 @@ -285,7 +291,7 @@ wget https://dataset.bj.bcebos.com/wider_face/wider_face_train_bbx_lmk_gt.txt - 使用密度先验盒(density_prior_box)可提高检测精度。 **版本信息:** -- 原始版本: 参考原始论文复现; +- 原始版本: 参考原始论文进行修改; - Lite版本: 使用更少的网络层数和通道数,具体可参考[代码](https://github.com/PaddlePaddle/PaddleDetection/blob/master/ppdet/modeling/architectures/faceboxes.py)。 diff --git a/docs/featured_model/FACE_DETECTION_en.md b/docs/featured_model/FACE_DETECTION_en.md index 3bcc4a89e9e9a3f2d6d3a8c8fbc73114166970e5..a2cb4a2d4c44533641b094e06c26e17a7837a0fd 100644 --- a/docs/featured_model/FACE_DETECTION_en.md +++ b/docs/featured_model/FACE_DETECTION_en.md @@ -270,6 +270,12 @@ wget https://dataset.bj.bcebos.com/wider_face/wider_face_train_bbx_lmk_gt.txt (2)Use `configs/face_detection/blazeface_keypoint.yml` configuration file for training and evaluation, the method of use is the same as the previous section. +### Evaluation + +| Architecture | Size | Img/gpu | Lr schd | Easy Set | Medium Set | Hard Set | Download | Configs | +|:------------:|:----:|:-------:|:-------:|:---------:|:----------:|:---------:|:--------:|:--------:| +| BlazeFace Keypoint | 640 | 16 | 16w | 0.852 | 0.816 | 0.662 | [download](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_keypoint.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/configs/face_detection/blazeface_keypoint.yml) | + ![](../images/12_Group_Group_12_Group_Group_12_84.jpg) diff --git a/ppdet/modeling/architectures/blazeface.py b/ppdet/modeling/architectures/blazeface.py index d740d07c09d6c5379a336a2360571982af5bf3a3..5090c90d6be6500cf5adbecd451de463c2b79409 100644 --- a/ppdet/modeling/architectures/blazeface.py +++ b/ppdet/modeling/architectures/blazeface.py @@ -41,6 +41,7 @@ class BlazeFace(object): output_decoder (object): `SSDOutputDecoder` instance min_sizes (list|None): min sizes of generated prior boxes. max_sizes (list|None): max sizes of generated prior boxes. Default: None. + steps (list|None): step size of adjacent prior boxes on each feature map. num_classes (int): number of output classes use_density_prior_box (bool): whether or not use density_prior_box instead of prior_box diff --git a/ppdet/modeling/architectures/faceboxes.py b/ppdet/modeling/architectures/faceboxes.py index b55354902238bb960e6ebf9605ec42c130e27b38..0c957501b23a8a55366b099db502fb44e4935b25 100644 --- a/ppdet/modeling/architectures/faceboxes.py +++ b/ppdet/modeling/architectures/faceboxes.py @@ -32,8 +32,8 @@ __all__ = ['FaceBoxes'] @register class FaceBoxes(object): """ - FaceBoxes: Sub-millisecond Neural Face Detection on Mobile GPUs, - see https://https://arxiv.org/abs/1708.05234 + FaceBoxes: A CPU Real-time Face Detector with High Accuracy. + see https://arxiv.org/abs/1708.05234 Args: backbone (object): backbone instance @@ -42,7 +42,8 @@ class FaceBoxes(object): this attribute should be a list or tuple of integers. fixed_sizes (list|None): the fixed sizes of generated density prior boxes, this attribute should a list or tuple of same length with `densities`. - num_classes (int): number of output classes + num_classes (int): number of output classes. + steps (list|None): step size of adjacent prior boxes on each feature map. """ __category__ = 'architecture' @@ -55,7 +56,7 @@ class FaceBoxes(object): densities=[[4, 2, 1], [1], [1]], fixed_sizes=[[32., 64., 128.], [256.], [512.]], num_classes=2, - steps=[8., 16., 32.]): + steps=[16., 32., 64.]): super(FaceBoxes, self).__init__() self.backbone = backbone self.num_classes = num_classes @@ -116,7 +117,7 @@ class FaceBoxes(object): fixed_ratios=[1.], clip=False, offset=0.5, - steps=[self.steps[i]] * 2) + steps=[self.steps[i]]) num_boxes = box.shape[2]