diff --git a/README_ch.md b/README_ch.md
index 9219857fd950c4d5a4c96ae28ad80d7c5e060cb1..74f02ecca839b53217b2189a65afaf0b012b3261 100644
--- a/README_ch.md
+++ b/README_ch.md
@@ -7,6 +7,12 @@
飞桨图像识别套件PaddleClas是飞桨为工业界和学术界所准备的一个图像识别任务的工具集,助力使用者训练出更好的视觉模型和应用落地。
**近期更新**
+- 🔥️ 2022.5.26 [飞桨产业实践范例直播课](http://aglc.cn/v-c4FAR),解读**超轻量重点区域人员出入管理方案**,欢迎报名来交流。
+
+

+
+- 2022.5.23 新增[人员出入管理范例库](https://aistudio.baidu.com/aistudio/projectdetail/4094475),具体内容可以在 AI Stuio 上体验。
+- 2022.5.20 上线[PP-HGNet](./docs/zh_CN/models/PP-HGNet.md), [PP-LCNet v2](./docs/zh_CN/models/PP-LCNetV2.md)
- 2022.4.21 新增 CVPR2022 oral论文 [MixFormer](https://arxiv.org/pdf/2204.02557.pdf) 相关[代码](https://github.com/PaddlePaddle/PaddleClas/pull/1820/files)。
- 2022.1.27 全面升级文档;新增[PaddleServing C++ pipeline部署方式](./deploy/paddleserving)和[18M图像识别安卓部署Demo](./deploy/lite_shitu)。
- 2021.11.1 发布[PP-ShiTu技术报告](https://arxiv.org/pdf/2111.00775.pdf),新增饮料识别demo
diff --git a/deploy/configs/PULC/person/inference_person_cls.yaml b/deploy/configs/PULC/person/inference_person_cls.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a70f663a792fcdcab3b7d45059f2afe0b1efbf07
--- /dev/null
+++ b/deploy/configs/PULC/person/inference_person_cls.yaml
@@ -0,0 +1,36 @@
+Global:
+ infer_imgs: "./images/PULC/person/objects365_02035329.jpg"
+ inference_model_dir: "./models/person_cls_infer"
+ batch_size: 1
+ use_gpu: True
+ enable_mkldnn: False
+ cpu_num_threads: 10
+ enable_benchmark: True
+ use_fp16: False
+ ir_optim: True
+ use_tensorrt: False
+ gpu_mem: 8000
+ enable_profile: False
+
+PreProcess:
+ transform_ops:
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 0.00392157
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ channel_num: 3
+ - ToCHWImage:
+
+PostProcess:
+ main_indicator: ThreshOutput
+ ThreshOutput:
+ threshold: 0.9
+ label_0: nobody
+ label_1: someone
+ SavePreLabel:
+ save_dir: ./pre_label/
diff --git a/deploy/configs/inference_attr.yaml b/deploy/configs/inference_attr.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b49e2af6482e72e01716faceefb8676d87c08347
--- /dev/null
+++ b/deploy/configs/inference_attr.yaml
@@ -0,0 +1,33 @@
+Global:
+ infer_imgs: "./images/Pedestrain_Attr.jpg"
+ inference_model_dir: "../inference/"
+ batch_size: 1
+ use_gpu: True
+ enable_mkldnn: False
+ cpu_num_threads: 10
+ enable_benchmark: True
+ use_fp16: False
+ ir_optim: True
+ use_tensorrt: False
+ gpu_mem: 8000
+ enable_profile: False
+
+PreProcess:
+ transform_ops:
+ - ResizeImage:
+ size: [192, 256]
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ channel_num: 3
+ - ToCHWImage:
+
+PostProcess:
+ main_indicator: Attribute
+ Attribute:
+ threshold: 0.5 #default threshold
+ glasses_threshold: 0.3 #threshold only for glasses
+ hold_threshold: 0.6 #threshold only for hold
+
\ No newline at end of file
diff --git a/deploy/images/PULC/person/objects365_01780782.jpg b/deploy/images/PULC/person/objects365_01780782.jpg
new file mode 100755
index 0000000000000000000000000000000000000000..a0dd0df59ae5a6386a04a8e0cf9cdbc529139c16
Binary files /dev/null and b/deploy/images/PULC/person/objects365_01780782.jpg differ
diff --git a/deploy/images/PULC/person/objects365_02035329.jpg b/deploy/images/PULC/person/objects365_02035329.jpg
new file mode 100755
index 0000000000000000000000000000000000000000..16d7f2d08cd87bda1b67d21655f00f94a0c6e4e4
Binary files /dev/null and b/deploy/images/PULC/person/objects365_02035329.jpg differ
diff --git a/deploy/images/Pedestrain_Attr.jpg b/deploy/images/Pedestrain_Attr.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6a87e856af8c17a3b93617b93ea517b91c508619
Binary files /dev/null and b/deploy/images/Pedestrain_Attr.jpg differ
diff --git a/deploy/python/postprocess.py b/deploy/python/postprocess.py
index d26cbaa9a8558ffb7f96115eef0a0bd9481fe47a..1107b805085531de74ca1c34d25c98a5d226d531 100644
--- a/deploy/python/postprocess.py
+++ b/deploy/python/postprocess.py
@@ -53,6 +53,34 @@ class PostProcesser(object):
return rtn
+class ThreshOutput(object):
+ def __init__(self, threshold, label_0="0", label_1="1"):
+ self.threshold = threshold
+ self.label_0 = label_0
+ self.label_1 = label_1
+
+ def __call__(self, x, file_names=None):
+ y = []
+ for idx, probs in enumerate(x):
+ score = probs[1]
+ if score < self.threshold:
+ result = {
+ "class_ids": [0],
+ "scores": [1 - score],
+ "label_names": [self.label_0]
+ }
+ else:
+ result = {
+ "class_ids": [1],
+ "scores": [score],
+ "label_names": [self.label_1]
+ }
+ if file_names is not None:
+ result["file_name"] = file_names[idx]
+ y.append(result)
+ return y
+
+
class Topk(object):
def __init__(self, topk=1, class_id_map_file=None):
assert isinstance(topk, (int, ))
@@ -159,3 +187,96 @@ class Binarize(object):
byte[:, i:i + 1] = np.dot(x[:, i * 8:(i + 1) * 8], self.unit)
return byte
+
+
+class Attribute(object):
+ def __init__(self,
+ threshold=0.5,
+ glasses_threshold=0.3,
+ hold_threshold=0.6):
+ self.threshold = threshold
+ self.glasses_threshold = glasses_threshold
+ self.hold_threshold = hold_threshold
+
+ def __call__(self, batch_preds, file_names=None):
+ # postprocess output of predictor
+ age_list = ['AgeLess18', 'Age18-60', 'AgeOver60']
+ direct_list = ['Front', 'Side', 'Back']
+ bag_list = ['HandBag', 'ShoulderBag', 'Backpack']
+ upper_list = ['UpperStride', 'UpperLogo', 'UpperPlaid', 'UpperSplice']
+ lower_list = [
+ 'LowerStripe', 'LowerPattern', 'LongCoat', 'Trousers', 'Shorts',
+ 'Skirt&Dress'
+ ]
+ batch_res = []
+ for res in batch_preds:
+ res = res.tolist()
+ label_res = []
+ # gender
+ gender = 'Female' if res[22] > self.threshold else 'Male'
+ label_res.append(gender)
+ # age
+ age = age_list[np.argmax(res[19:22])]
+ label_res.append(age)
+ # direction
+ direction = direct_list[np.argmax(res[23:])]
+ label_res.append(direction)
+ # glasses
+ glasses = 'Glasses: '
+ if res[1] > self.glasses_threshold:
+ glasses += 'True'
+ else:
+ glasses += 'False'
+ label_res.append(glasses)
+ # hat
+ hat = 'Hat: '
+ if res[0] > self.threshold:
+ hat += 'True'
+ else:
+ hat += 'False'
+ label_res.append(hat)
+ # hold obj
+ hold_obj = 'HoldObjectsInFront: '
+ if res[18] > self.hold_threshold:
+ hold_obj += 'True'
+ else:
+ hold_obj += 'False'
+ label_res.append(hold_obj)
+ # bag
+ bag = bag_list[np.argmax(res[15:18])]
+ bag_score = res[15 + np.argmax(res[15:18])]
+ bag_label = bag if bag_score > self.threshold else 'No bag'
+ label_res.append(bag_label)
+ # upper
+ upper_res = res[4:8]
+ upper_label = 'Upper:'
+ sleeve = 'LongSleeve' if res[3] > res[2] else 'ShortSleeve'
+ upper_label += ' {}'.format(sleeve)
+ for i, r in enumerate(upper_res):
+ if r > self.threshold:
+ upper_label += ' {}'.format(upper_list[i])
+ label_res.append(upper_label)
+ # lower
+ lower_res = res[8:14]
+ lower_label = 'Lower: '
+ has_lower = False
+ for i, l in enumerate(lower_res):
+ if l > self.threshold:
+ lower_label += ' {}'.format(lower_list[i])
+ has_lower = True
+ if not has_lower:
+ lower_label += ' {}'.format(lower_list[np.argmax(lower_res)])
+
+ label_res.append(lower_label)
+ # shoe
+ shoe = 'Boots' if res[14] > self.threshold else 'No boots'
+ label_res.append(shoe)
+
+ threshold_list = [0.5] * len(res)
+ threshold_list[1] = self.glasses_threshold
+ threshold_list[18] = self.hold_threshold
+ pred_res = (np.array(res) > np.array(threshold_list)
+ ).astype(np.int8).tolist()
+
+ batch_res.append([label_res, pred_res])
+ return batch_res
diff --git a/deploy/python/predict_cls.py b/deploy/python/predict_cls.py
index 574caa3e73bffee4fbf86224f5d91bc7965694b1..41b46090a7f118f401beefd12a9e9d2513cb8bfb 100644
--- a/deploy/python/predict_cls.py
+++ b/deploy/python/predict_cls.py
@@ -49,10 +49,15 @@ class ClsPredictor(Predictor):
pid = os.getpid()
size = config["PreProcess"]["transform_ops"][1]["CropImage"][
"size"]
+ if config["Global"].get("use_int8", False):
+ precision = "int8"
+ elif config["Global"].get("use_fp16", False):
+ precision = "fp16"
+ else:
+ precision = "fp32"
self.auto_logger = auto_log.AutoLogger(
model_name=config["Global"].get("model_name", "cls"),
- model_precision='fp16'
- if config["Global"]["use_fp16"] else 'fp32',
+ model_precision=precision,
batch_size=config["Global"].get("batch_size", 1),
data_shape=[3, size, size],
save_path=config["Global"].get("save_log_path",
@@ -133,13 +138,21 @@ def main(config):
continue
batch_results = cls_predictor.predict(batch_imgs)
for number, result_dict in enumerate(batch_results):
- filename = batch_names[number]
- clas_ids = result_dict["class_ids"]
- scores_str = "[{}]".format(", ".join("{:.2f}".format(
- r) for r in result_dict["scores"]))
- label_names = result_dict["label_names"]
- print("{}:\tclass id(s): {}, score(s): {}, label_name(s): {}".
- format(filename, clas_ids, scores_str, label_names))
+ if "Attribute" in config["PostProcess"]:
+ filename = batch_names[number]
+ attr_message = result_dict[0]
+ pred_res = result_dict[1]
+ print("{}:\t attributes: {}, \npredict output: {}".format(
+ filename, attr_message, pred_res))
+ else:
+ filename = batch_names[number]
+ clas_ids = result_dict["class_ids"]
+ scores_str = "[{}]".format(", ".join("{:.2f}".format(
+ r) for r in result_dict["scores"]))
+ label_names = result_dict["label_names"]
+ print(
+ "{}:\tclass id(s): {}, score(s): {}, label_name(s): {}".
+ format(filename, clas_ids, scores_str, label_names))
batch_imgs = []
batch_names = []
if cls_predictor.benchmark:
diff --git a/deploy/utils/predictor.py b/deploy/utils/predictor.py
index 7fd1d6dccb61b86f1fece2e3a909c7005f93ca8a..9a38ccd18981c1ddd5dfc75152fa1d31f71d2b06 100644
--- a/deploy/utils/predictor.py
+++ b/deploy/utils/predictor.py
@@ -42,8 +42,22 @@ class Predictor(object):
def create_paddle_predictor(self, args, inference_model_dir=None):
if inference_model_dir is None:
inference_model_dir = args.inference_model_dir
- params_file = os.path.join(inference_model_dir, "inference.pdiparams")
- model_file = os.path.join(inference_model_dir, "inference.pdmodel")
+ if "inference_int8.pdiparams" in os.listdir(inference_model_dir):
+ params_file = os.path.join(inference_model_dir,
+ "inference_int8.pdiparams")
+ model_file = os.path.join(inference_model_dir,
+ "inference_int8.pdmodel")
+ assert args.get(
+ "use_fp16", False
+ ) is False, "fp16 mode is not supported for int8 model inference, please set use_fp16 as False during inference."
+ else:
+ params_file = os.path.join(inference_model_dir,
+ "inference.pdiparams")
+ model_file = os.path.join(inference_model_dir, "inference.pdmodel")
+ assert args.get(
+ "use_int8", False
+ ) is False, "int8 mode is not supported for fp32 model inference, please set use_int8 as False during inference."
+
config = Config(model_file, params_file)
if args.use_gpu:
@@ -63,12 +77,18 @@ class Predictor(object):
config.disable_glog_info()
config.switch_ir_optim(args.ir_optim) # default true
if args.use_tensorrt:
+ precision = Config.Precision.Float32
+ if args.get("use_int8", False):
+ precision = Config.Precision.Int8
+ elif args.get("use_fp16", False):
+ precision = Config.Precision.Half
+
config.enable_tensorrt_engine(
- precision_mode=Config.Precision.Half
- if args.use_fp16 else Config.Precision.Float32,
+ precision_mode=precision,
max_batch_size=args.batch_size,
workspace_size=1 << 30,
- min_subgraph_size=30)
+ min_subgraph_size=30,
+ use_calib_mode=False)
config.enable_memory_optim()
# use zero copy
diff --git a/docs/images/PP-HGNet/PP-HGNet-block.png b/docs/images/PP-HGNet/PP-HGNet-block.png
new file mode 100644
index 0000000000000000000000000000000000000000..56b6d6121739ade55c8f365d574c4de1180b8207
Binary files /dev/null and b/docs/images/PP-HGNet/PP-HGNet-block.png differ
diff --git a/docs/images/PP-HGNet/PP-HGNet.png b/docs/images/PP-HGNet/PP-HGNet.png
new file mode 100644
index 0000000000000000000000000000000000000000..cb5b18fe4e9decc14c68e9cee9aeeed172d3a844
Binary files /dev/null and b/docs/images/PP-HGNet/PP-HGNet.png differ
diff --git a/docs/images/PP-LCNetV2/net.png b/docs/images/PP-LCNetV2/net.png
new file mode 100644
index 0000000000000000000000000000000000000000..079f5ab43f2d0da67c49f1bf33d2648ab8d3f176
Binary files /dev/null and b/docs/images/PP-LCNetV2/net.png differ
diff --git a/docs/images/PP-LCNetV2/rep.png b/docs/images/PP-LCNetV2/rep.png
new file mode 100644
index 0000000000000000000000000000000000000000..0e94220fd7cb5b1732754d7102db830af62aaf30
Binary files /dev/null and b/docs/images/PP-LCNetV2/rep.png differ
diff --git a/docs/images/PP-LCNetV2/shortcut.png b/docs/images/PP-LCNetV2/shortcut.png
new file mode 100644
index 0000000000000000000000000000000000000000..d8024d48b20b9cac0c7cbddf12df799180ff82d6
Binary files /dev/null and b/docs/images/PP-LCNetV2/shortcut.png differ
diff --git a/docs/images/PP-LCNetV2/split_pw.png b/docs/images/PP-LCNetV2/split_pw.png
new file mode 100644
index 0000000000000000000000000000000000000000..f48800a173309e0ef9d998cc06764615db5bd4db
Binary files /dev/null and b/docs/images/PP-LCNetV2/split_pw.png differ
diff --git a/docs/zh_CN/PULC/PULC_person_cls.md b/docs/zh_CN/PULC/PULC_person_cls.md
new file mode 100644
index 0000000000000000000000000000000000000000..ff3508c35c3ff9394da9f5c82e0b4001ee8394a3
--- /dev/null
+++ b/docs/zh_CN/PULC/PULC_person_cls.md
@@ -0,0 +1,332 @@
+# PaddleClas构建有人/无人分类案例
+
+此处提供了用户使用 PaddleClas 快速构建轻量级、高精度、可落地的有人/无人的分类模型教程,主要基于有人/无人场景的数据,融合了轻量级骨干网络PPLCNet、SSLD预训练权重、EDA数据增强策略、SKL-UGI知识蒸馏策略、SHAS超参数搜索策略,得到精度高、速度快、易于部署的二分类模型。
+
+------
+
+
+## 目录
+
+- [1. 环境配置](#1)
+- [2. 有人/无人场景推理预测](#2)
+ - [2.1 下载模型](#2.1)
+ - [2.2 模型推理预测](#2.2)
+ - [2.2.1 预测单张图像](#2.2.1)
+ - [2.2.2 基于文件夹的批量预测](#2.2.2)
+- [3.有人/无人场景训练](#3)
+ - [3.1 数据准备](#3.1)
+ - [3.2 模型训练](#3.2)
+ - [3.2.1 基于默认超参数训练](#3.2.1)
+ - [3.2.1.1 基于默认超参数训练轻量级模型](#3.2.1.1)
+ - [3.2.1.2 基于默认超参数训练教师模型](#3.2.1.2)
+ - [3.2.1.3 基于默认超参数进行蒸馏训练](#3.2.1.3)
+ - [3.2.2 超参数搜索训练](#3.2)
+- [4. 模型评估与推理](#4)
+ - [4.1 模型评估](#3.1)
+ - [4.2 模型预测](#3.2)
+ - [4.3 使用 inference 模型进行推理](#4.3)
+ - [4.3.1 导出 inference 模型](#4.3.1)
+ - [4.3.2 模型推理预测](#4.3.2)
+
+
+
+
+## 1. 环境配置
+
+* 安装:请先参考 [Paddle 安装教程](../installation/install_paddle.md) 以及 [PaddleClas 安装教程](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。
+
+
+
+## 2. 有人/无人场景推理预测
+
+
+
+### 2.1 下载模型
+
+* 进入 `deploy` 运行目录。
+
+```
+cd deploy
+```
+
+下载有人/无人分类的模型。
+
+```
+mkdir models
+cd models
+# 下载inference 模型并解压
+wget https://paddleclas.bj.bcebos.com/models/PULC/person_cls_infer.tar && tar -xf person_cls_infer.tar
+```
+
+解压完毕后,`models` 文件夹下应有如下文件结构:
+
+```
+├── person_cls_infer
+│ ├── inference.pdiparams
+│ ├── inference.pdiparams.info
+│ └── inference.pdmodel
+```
+
+
+
+### 2.2 模型推理预测
+
+
+
+#### 2.2.1 预测单张图像
+
+返回 `deploy` 目录:
+
+```
+cd ../
+```
+
+运行下面的命令,对图像 `./images/PULC/person/objects365_02035329.jpg` 进行有人/无人分类。
+
+```shell
+# 使用下面的命令使用 GPU 进行预测
+python3.7 python/predict_cls.py -c configs/PULC/person/inference_person_cls.yaml -o PostProcess.ThreshOutput.threshold=0.9794
+# 使用下面的命令使用 CPU 进行预测
+python3.7 python/predict_cls.py -c configs/PULC/person/inference_person_cls.yaml -o PostProcess.ThreshOutput.threshold=0.9794 -o Global.use_gpu=False
+```
+
+输出结果如下。
+
+```
+objects365_02035329.jpg: class id(s): [1], score(s): [1.00], label_name(s): ['someone']
+```
+
+
+**备注:** 真实场景中往往需要在假正类率(Fpr)小于某一个指标下求真正类率(Tpr),该场景中的`val`数据集在千分之一Fpr下得到的最佳Tpr所得到的阈值为`0.9794`,故此处的`threshold`为`0.9794`。该阈值的确定方法可以参考[3.2节](#3.2)
+
+
+
+#### 2.2.2 基于文件夹的批量预测
+
+如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。
+
+```shell
+# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False
+python3.7 python/predict_cls.py -c configs/PULC/person/inference_person_cls.yaml -o Global.infer_imgs="./images/PULC/person/"
+```
+
+终端中会输出该文件夹内所有图像的分类结果,如下所示。
+
+```
+objects365_01780782.jpg: class id(s): [0], score(s): [1.00], label_name(s): ['nobody']
+objects365_02035329.jpg: class id(s): [1], score(s): [1.00], label_name(s): ['someone']
+```
+
+其中,`someone` 表示该图里存在人,`nobody` 表示该图里不存在人。
+
+
+
+## 3.有人/无人场景训练
+
+
+
+### 3.1 数据准备
+
+进入 PaddleClas 目录。
+
+```
+cd path_to_PaddleClas
+```
+
+进入 `dataset/` 目录,下载并解压有人/无人场景的数据。
+
+```shell
+cd dataset
+wget https://paddleclas.bj.bcebos.com/data/cls_demo/person.tar
+tar -xf person.tar
+cd ../
+```
+
+执行上述命令后,`dataset/`下存在`person`目录,该目录中具有以下数据:
+
+```
+
+├── train
+│ ├── 000000000009.jpg
+│ ├── 000000000025.jpg
+...
+├── val
+│ ├── objects365_01780637.jpg
+│ ├── objects365_01780640.jpg
+...
+├── ImageNet_val
+│ ├── ILSVRC2012_val_00000001.JPEG
+│ ├── ILSVRC2012_val_00000002.JPEG
+...
+├── train_list.txt
+├── train_list.txt.debug
+├── train_list_for_distill.txt
+├── val_list.txt
+└── val_list.txt.debug
+```
+
+其中`train/`和`val/`分别为训练集和验证集。`train_list.txt`和`val_list.txt`分别为训练集和验证集的标签文件,`train_list.txt.debug`和`val_list.txt.debug`分别为训练集和验证集的`debug`标签文件,其分别是`train_list.txt`和`val_list.txt`的子集,用该文件可以快速体验本案例的流程。`ImageNet_val/`是ImageNet的验证集,该集合和`train`集合的混合数据用于本案例的`SKL-UGI知识蒸馏策略`,对应的训练标签文件为`train_list_for_distill.txt`。
+
+* **注意**:
+
+* 本案例中所使用的所有数据集均为开源数据,`train`集合为[MS-COCO数据](https://cocodataset.org/#overview)的训练集的子集,`val`集合为[Object365数据](https://www.objects365.org/overview.html)的训练集的子集,`ImageNet_val`为[ImageNet数据](https://www.image-net.org/)的验证集。数据集的筛选流程可以参考[有人/无人场景数据集筛选方法]()。
+
+
+
+### 3.2 模型训练
+
+
+
+#### 3.2.1 基于默认超参数训练
+
+
+
+##### 3.2.1.1 基于默认超参数训练轻量级模型
+
+在`ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml`中提供了基于该场景的训练配置,可以通过如下脚本启动训练:
+
+```shell
+export CUDA_VISIBLE_DEVICES=0,1,2,3
+python3 -m paddle.distributed.launch \
+ --gpus="0,1,2,3" \
+ tools/train.py \
+ -c ./ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml
+```
+
+验证集的最佳指标在0.94-0.95之间(数据集较小,容易造成波动)。
+
+**备注:**
+
+* 此时使用的指标为Tpr,该指标描述了在假正类率(Fpr)小于某一个指标时的真正类率(Tpr),是产业中二分类问题常用的指标之一。在本案例中,Fpr为千分之一。关于Fpr和Tpr的更多介绍,可以参考[这里](https://baike.baidu.com/item/AUC/19282953)。
+
+* 在eval时,会打印出来当前最佳的TprAtFpr指标,具体地,其会打印当前的`Fpr`、`Tpr`值,以及当前的`threshold`值,`Tpr`值反映了在当前`Fpr`值下的召回率,该值越高,代表模型越好。`threshold` 表示当前最佳`Fpr`所对应的分类阈值,可用于后续模型部署落地等。
+
+
+
+##### 3.2.1.2 基于默认超参数训练教师模型
+
+复用`ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml`中的超参数,训练教师模型,训练脚本如下:
+
+```shell
+export CUDA_VISIBLE_DEVICES=0,1,2,3
+python3 -m paddle.distributed.launch \
+ --gpus="0,1,2,3" \
+ tools/train.py \
+ -c ./ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml \
+ -o Arch.name=ResNet101_vd
+```
+
+验证集的最佳指标为0.96-0.98之间,当前教师模型最好的权重保存在`output/ResNet101_vd/best_model.pdparams`。
+
+
+
+##### 3.2.1.3 基于默认超参数进行蒸馏训练
+
+配置文件`ppcls/configs/PULC/PULC/Distillation/PPLCNet_x1_0_distillation.yaml`提供了`SKL-UGI知识蒸馏策略`的配置。该配置将`ResNet101_vd`当作教师模型,`PPLCNet_x1_0`当作学生模型,使用ImageNet数据集的验证集作为新增的无标签数据。训练脚本如下:
+
+```shell
+export CUDA_VISIBLE_DEVICES=0,1,2,3
+python3 -m paddle.distributed.launch \
+ --gpus="0,1,2,3" \
+ tools/train.py \
+ -c ./ppcls/configs/PULC/person/Distillation/PPLCNet_x1_0_distillation.yaml \
+ -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model
+```
+
+验证集的最佳指标为0.95-0.97之间,当前模型最好的权重保存在`output/DistillationModel/best_model_student.pdparams`。
+
+
+
+#### 3.2.2 超参数搜索训练
+
+[3.2 小节](#3.2) 提供了在已经搜索并得到的超参数上进行了训练,此部分内容提供了搜索的过程,此过程是为了得到更好的训练超参数。
+
+* 搜索运行脚本如下:
+
+```shell
+python tools/search_strategy.py -c ppcls/configs/StrategySearch/person.yaml
+```
+
+在`ppcls/configs/StrategySearch/person.yaml`中指定了具体的 GPU id 号和搜索配置, 默认搜索的训练日志和模型存放于`output/search_person`中,最终的蒸馏模型存放于`output/search_person/search_res/DistillationModel/best_model_student.pdparams`。
+
+* **注意**:
+
+* 3.1小节提供的默认配置已经经过了搜索,所以此过程不是必要的过程,如果自己的训练数据集有变化,可以尝试此过程。
+
+* 此过程基于当前数据集在 V100 4 卡上大概需要耗时 10 小时,如果缺少机器资源,希望体验搜索过程,可以将`ppcls/configs/cls_demo/person/PPLCNet/PPLCNet_x1_0_search.yaml`中的`train_list.txt`和`val_list.txt`分别替换为`train_list.txt.debug`和`val_list.txt.debug`。替换list只是为了加速跑通整个搜索过程,由于数据量较小,其搜素的结果没有参考性。另外,搜索空间可以根据当前的机器资源来调整,如果机器资源有限,可以尝试缩小搜索空间,如果机器资源较充足,可以尝试扩大搜索空间。
+
+* 如果此过程搜索的得到的超参数与[3.2.1小节](#3.2.1)提供的超参数不一致,主要是由于训练数据较小造成的波动导致,可以忽略。
+
+
+
+
+## 4. 模型评估与推理
+
+
+
+
+### 4.1 模型评估
+
+训练好模型之后,可以通过以下命令实现对模型指标的评估。
+
+```bash
+python3 tools/eval.py \
+ -c ./ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml \
+ -o Global.pretrained_model="output/DistillationModel/best_model_student"
+```
+
+
+
+### 4.2 模型预测
+
+模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测:
+
+```python
+python3 tools/infer.py \
+ -c ./ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml \
+ -o Infer.infer_imgs=./dataset/person/val/objects365_01780637.jpg \
+ -o Global.pretrained_model=output/DistillationModel/best_model_student \
+ -o Global.pretrained_model=Infer.PostProcess.threshold=0.9794
+```
+
+输出结果如下:
+
+```
+[{'class_ids': [0], 'scores': [0.9878496769815683], 'label_names': ['nobody'], 'file_name': './dataset/person/val/objects365_01780637.jpg'}]
+```
+
+**备注:** 这里的`Infer.PostProcess.threshold`的值需要根据实际场景来确定,此处的`0.9794`是在该场景中的`val`数据集在千分之一Fpr下得到的最佳Tpr所得到的。
+
+
+
+### 4.3 使用 inference 模型进行推理
+
+
+
+### 4.3.1 导出 inference 模型
+
+通过导出 inference 模型,PaddlePaddle 支持使用预测引擎进行预测推理。接下来介绍如何用预测引擎进行推理:
+首先,对训练好的模型进行转换:
+
+```bash
+python3 tools/export_model.py \
+ -c ./ppcls/configs/cls_demo/PULC/PPLCNet/PPLCNet_x1_0.yaml \
+ -o Global.pretrained_model=output/DistillationModel/best_model_student \
+ -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_person
+```
+执行完该脚本后会在`deploy/models/`下生成`PPLCNet_x1_0_person`文件夹,该文件夹中的模型与 2.2 节下载的推理预测模型格式一致。
+
+
+
+### 4.3.2 基于 inference 模型推理预测
+推理预测的脚本为:
+
+```
+python3.7 python/predict_cls.py -c configs/PULC/person/inference_person_cls.yaml -o Global.inference_model_dir="models/PPLCNet_x1_0_person" -o PostProcess.ThreshOutput.threshold=0.9794
+```
+
+**备注:**
+
+- 此处的`PostProcess.ThreshOutput.threshold`由eval时的最佳`threshold`来确定。
+- 更多关于推理的细节,可以参考[2.2节](#2.2)。
+
diff --git a/docs/zh_CN/algorithm_introduction/ImageNet_models.md b/docs/zh_CN/algorithm_introduction/ImageNet_models.md
index 4c26ea105453e954457aca71edb66394c5037153..8e847bb8c17db46e71e8542b954fdf49e8cd549d 100644
--- a/docs/zh_CN/algorithm_introduction/ImageNet_models.md
+++ b/docs/zh_CN/algorithm_introduction/ImageNet_models.md
@@ -5,40 +5,41 @@
## 目录
-- [1. 模型库概览图](#1)
-- [2. SSLD 知识蒸馏预训练模型](#2)
- - [2.1 服务器端知识蒸馏模型](#2.1)
- - [2.2 移动端知识蒸馏模型](#2.2)
- - [2.3 Intel CPU 端知识蒸馏模型](#2.3)
-- [3. PP-LCNet & PP-LCNetV2 系列](#3)
-- [4. ResNet 系列](#4)
-- [5. 移动端系列](#5)
-- [6. SEResNeXt 与 Res2Net 系列](#6)
-- [7. DPN 与 DenseNet 系列](#7)
-- [8. HRNet 系列](#8)
-- [9. Inception 系列](#9)
-- [10. EfficientNet 与 ResNeXt101_wsl 系列](#10)
-- [11. ResNeSt 与 RegNet 系列](#11)
-- [12. ViT_and_DeiT 系列](#12)
-- [13. RepVGG 系列](#13)
-- [14. MixNet 系列](#14)
-- [15. ReXNet 系列](#15)
-- [16. SwinTransformer 系列](#16)
-- [17. LeViT 系列](#17)
-- [18. Twins 系列](#18)
-- [19. HarDNet 系列](#19)
-- [20. DLA 系列](#20)
-- [21. RedNet 系列](#21)
-- [22. TNT 系列](#22)
-- [23. CSwinTransformer 系列](#23)
-- [24. PVTV2 系列](#24)
-- [25. MobileViT 系列](#25)
-- [26. 其他模型](#26)
+- [模型库概览图](#Overview)
+- [SSLD 知识蒸馏预训练模型](#SSLD)
+ - [服务器端知识蒸馏模型](#SSLD_server)
+ - [移动端知识蒸馏模型](#SSLD_mobile)
+ - [Intel CPU 端知识蒸馏模型](#SSLD_intel_cpu)
+- [PP-LCNet & PP-LCNetV2 系列](#PPLCNet)
+- [PP-HGNet 系列](#PPHGNet)
+- [ResNet 系列](#ResNet)
+- [移动端系列](#Mobile)
+- [SEResNeXt 与 Res2Net 系列](#SEResNeXt_Res2Net)
+- [DPN 与 DenseNet 系列](#DPN&DenseNet)
+- [HRNet 系列](#HRNet)
+- [Inception 系列](#Inception)
+- [EfficientNet 与 ResNeXt101_wsl 系列](#EfficientNetRes&NeXt101_wsl)
+- [ResNeSt 与 RegNet 系列](#ResNeSt&RegNet)
+- [ViT_and_DeiT 系列](#ViT&DeiT)
+- [RepVGG 系列](#RepVGG)
+- [MixNet 系列](#MixNet)
+- [ReXNet 系列](#ReXNet)
+- [SwinTransformer 系列](#SwinTransformer)
+- [LeViT 系列](#LeViT)
+- [Twins 系列](#Twins)
+- [HarDNet 系列](#HarDNet)
+- [DLA 系列](#DLA)
+- [RedNet 系列](#RedNet)
+- [TNT 系列](#TNT)
+- [CSwinTransformer 系列](#CSwinTransformer)
+- [PVTV2 系列](#PVTV2)
+- [MobileViT 系列](#MobileViT)
+- [其他模型](#Others)
- [参考文献](#reference)
-
+
-## 1. 模型库概览图
+## 模型库概览图
基于 ImageNet1k 分类数据集,PaddleClas 支持 37 个系列分类网络结构以及对应的 217 个图像分类预训练模型,训练技巧、每个系列网络结构的简单介绍和性能评估将在相应章节展现,下面所有的速度指标评估环境如下:
* Arm CPU 的评估环境基于骁龙 855(SD855)。
@@ -58,14 +59,14 @@

-
+
-## 2. SSLD 知识蒸馏预训练模型
+## SSLD 知识蒸馏预训练模型
基于 SSLD 知识蒸馏的预训练模型列表如下所示,更多关于 SSLD 知识蒸馏方案的介绍可以参考:[SSLD 知识蒸馏文档](./knowledge_distillation.md)。
-
+
-### 2.1 服务器端知识蒸馏模型
+### 服务器端知识蒸馏模型
| 模型 | Top-1 Acc | Reference
Top-1 Acc | Acc gain | time(ms)
bs=1 | time(ms)
bs=4 | time(ms)
bs=8 | FLOPs(G) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|---------------------|-----------|-----------|---------------|----------------|-----------|----------|-----------|-----------------------------------|-----------------------------------|-----------------------------------|
@@ -78,10 +79,12 @@
| HRNet_W18_C_ssld | 0.812 | 0.769 | 0.043 | 6.66 | 8.94 | 11.95 | 4.32 | 21.35 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HRNet_W18_C_ssld_infer.tar) |
| HRNet_W48_C_ssld | 0.836 | 0.790 | 0.046 | 11.07 | 17.06 | 27.28 | 17.34 | 77.57 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HRNet_W48_C_ssld_infer.tar) |
| SE_HRNet_W64_C_ssld | 0.848 | - | - | 17.11 | 26.87 | 43.24 | 29.00 | 129.12 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SE_HRNet_W64_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SE_HRNet_W64_C_ssld_infer.tar) |
+| PPHGNet_tiny_ssld | 0.8195 | 0.7983 | 0.021 | 1.77 | - | - | 4.54 | 14.75 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_ssld_infer.tar) |
+| PPHGNet_small_ssld | 0.8382 | 0.8151 | 0.023 | 2.52 | - | - | 8.53 | 24.38 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_ssld_infer.tar) |
-
+
-### 2.2 移动端知识蒸馏模型
+### 移动端知识蒸馏模型
| 模型 | Top-1 Acc | Reference
Top-1 Acc | Acc gain | SD855 time(ms)
bs=1, thread=1 | SD855 time(ms)
bs=1, thread=2 | SD855 time(ms)
bs=1, thread=4 | FLOPs(M) | Params(M) | 模型大小(M) | 预训练模型下载地址 | inference模型下载地址 |
|---------------------|-----------|-----------|---------------|----------------|-----------|----------|-----------|-----------------------------------|-----------------------------------|-----------------------------------|-----------------------------------|
@@ -92,9 +95,9 @@
| MobileNetV3_small_x1_0_ssld | 0.713 | 0.682 | 0.031 | 5.63 | 3.65 | 2.60 | 63.67 | 2.95 | 12 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_small_x1_0_ssld_infer.tar) |
| GhostNet_x1_3_ssld | 0.794 | 0.757 | 0.037 | 19.16 | 12.25 | 9.40 | 236.89 | 7.38 | 29 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/GhostNet_x1_3_ssld_infer.tar) |
-
+
-### 2.3 Intel CPU 端知识蒸馏模型
+### Intel CPU 端知识蒸馏模型
| 模型 | Top-1 Acc | Reference
Top-1 Acc | Acc gain | Intel-Xeon-Gold-6148 time(ms)
bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|---------------------|-----------|-----------|---------------|----------------|----------|-----------|-----------------------------------|-----------------------------------|
@@ -104,30 +107,44 @@
* 注: `Reference Top-1 Acc` 表示 PaddleClas 基于 ImageNet1k 数据集训练得到的预训练模型精度。
-
+
-## 3. PP-LCNet & PP-LCNetV2 系列 [[28](#ref28)]
+## PP-LCNet & PP-LCNetV2 系列 [[28](#ref28)]
PP-LCNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[PP-LCNet 系列模型文档](../models/PP-LCNet.md),[PP-LCNetV2 系列模型文档](../models/PP-LCNetV2.md)。
-| 模型 | Top-1 Acc | Top-5 Acc | Intel-Xeon-Gold-6148 time(ms)
bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
+| 模型 | Top-1 Acc | Top-5 Acc | time(ms)*
bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|:--:|:--:|:--:|:--:|----|----|----|:--:|
-| PPLCNet_x0_25 |0.5186 | 0.7565 | 1.61785 | 18.25 | 1.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar) |
-| PPLCNet_x0_35 |0.5809 | 0.8083 | 2.11344 | 29.46 | 1.65 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar) |
-| PPLCNet_x0_5 |0.6314 | 0.8466 | 2.72974 | 47.28 | 1.89 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar) |
-| PPLCNet_x0_75 |0.6818 | 0.8830 | 4.51216 | 98.82 | 2.37 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar) |
-| PPLCNet_x1_0 |0.7132 | 0.9003 | 6.49276 | 160.81 | 2.96 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar) |
-| PPLCNet_x1_5 |0.7371 | 0.9153 | 12.2601 | 341.86 | 4.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar) |
-| PPLCNet_x2_0 |0.7518 | 0.9227 | 20.1667 | 590 | 6.54 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar) |
-| PPLCNet_x2_5 |0.7660 | 0.9300 | 29.595 | 906 | 9.04 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar) |
-
-| 模型 | Top-1 Acc | Top-5 Acc | Intel-Xeon-Gold-6271C
bs=1
OpenVINO 2021.4.2
time(ms) | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
+| PPLCNet_x0_25 |0.5186 | 0.7565 | 1.74 | 18.25 | 1.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar) |
+| PPLCNet_x0_35 |0.5809 | 0.8083 | 1.92 | 29.46 | 1.65 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar) |
+| PPLCNet_x0_5 |0.6314 | 0.8466 | 2.05 | 47.28 | 1.89 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar) |
+| PPLCNet_x0_75 |0.6818 | 0.8830 | 2.29 | 98.82 | 2.37 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar) |
+| PPLCNet_x1_0 |0.7132 | 0.9003 | 2.46 | 160.81 | 2.96 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar) |
+| PPLCNet_x1_5 |0.7371 | 0.9153 | 3.19 | 341.86 | 4.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar) |
+| PPLCNet_x2_0 |0.7518 | 0.9227 | 4.27 | 590 | 6.54 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar) |
+| PPLCNet_x2_5 |0.7660 | 0.9300 | 5.39 | 906 | 9.04 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar) |
+
+| 模型 | Top-1 Acc | Top-5 Acc | time(ms)**
bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
|:--:|:--:|:--:|:--:|----|----|----|:--:|
-| PPLCNetV2_base | 77.04 | 93.27 | 4.32 | 604 | 6.6 | https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams | https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar |
+| PPLCNetV2_base | 77.04 | 93.27 | 4.32 | 604 | 6.6 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar) |
-
-## 4. ResNet 系列 [[1](#ref1)]
+*: 基于 Intel-Xeon-Gold-6148 硬件平台与 PaddlePaddle 推理平台。
+
+**: 基于 Intel-Xeon-Gold-6271C 硬件平台与 OpenVINO 2021.4.2 推理平台。
+
+## PP-HGNet 系列
+
+PP-HGNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[PP-HGNet 系列模型文档](../models/PP-HGNet.md)。
+
+| 模型 | Top-1 Acc | Top-5 Acc | time(ms)
bs=1 | time(ms)
bs=4 | time(ms)
bs=8 | FLOPs(G) | Params(M) | 预训练模型下载地址 | inference模型下载地址 |
+| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
+| PPHGNet_tiny | 0.7983 | 0.9504 | 1.77 | - | - | 4.54 | 14.75 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar) |
+| PPHGNet_small | 0.8151 | 0.9582 | 2.52 | - | - | 8.53 | 24.38 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar) |
+
+
+
+## ResNet 系列 [[1](#ref1)]
ResNet 及其 Vd 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[ResNet 及其 Vd 系列模型文档](../models/ResNet_and_vd.md)。
@@ -149,9 +166,9 @@ ResNet 及其 Vd 系列模型的精度、速度指标如下表所示,更多关
| ResNet50_vd_
ssld | 0.8300 | 0.9640 | 2.60 | 4.86 | 7.63 | 4.35 | 25.63 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_ssld_infer.tar) |
| ResNet101_vd_
ssld | 0.8373 | 0.9669 | 4.43 | 8.25 | 12.60 | 8.08 | 44.67 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet101_vd_ssld_infer.tar) |
-
+
-## 5. 移动端系列 [[3](#ref3)][[4](#ref4)][[5](#ref5)][[6](#ref6)][[23](#ref23)]
+## 移动端系列 [[3](#ref3)][[4](#ref4)][[5](#ref5)][[6](#ref6)][[23](#ref23)]
移动端系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[移动端系列模型文档](../models/Mobile.md)。
@@ -198,9 +215,9 @@ ResNet 及其 Vd 系列模型的精度、速度指标如下表所示,更多关
| ESNet_x0_75 | 0.7224 | 0.9045 |9.59|6.28|4.52| 123.74 | 3.87 | 15 |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_75_pretrained.pdparams) |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ESNet_x0_75_infer.tar) |
| ESNet_x1_0 | 0.7392 | 0.9140 |13.67|8.71|5.97| 197.33 | 4.64 | 18 |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x1_0_pretrained.pdparams) |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ESNet_x1_0_infer.tar) |
-
+
-## 6. SEResNeXt 与 Res2Net 系列 [[7](#ref7)][[8](#ref8)][[9](#ref9)]
+## SEResNeXt 与 Res2Net 系列 [[7](#ref7)][[8](#ref8)][[9](#ref9)]
SEResNeXt 与 Res2Net 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[SEResNeXt 与 Res2Net 系列模型文档](../models/SEResNext_and_Res2Net.md)。
@@ -233,9 +250,9 @@ SEResNeXt 与 Res2Net 系列模型的精度、速度指标如下表所示,更
| SE_ResNeXt101_
32x4d | 0.7939 | 0.9443 | 13.31 | 21.85 | 28.77 | 8.03 | 49.09 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SE_ResNeXt101_32x4d_infer.tar) |
| SENet154_vd | 0.8140 | 0.9548 | 34.83 | 51.22 | 69.74 | 24.45 | 122.03 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SENet154_vd_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SENet154_vd_infer.tar) |
-
+
-## 7. DPN 与 DenseNet 系列 [[14](#ref14)][[15](#ref15)]
+## DPN 与 DenseNet 系列 [[14](#ref14)][[15](#ref15)]
DPN 与 DenseNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[DPN 与 DenseNet 系列模型文档](../models/DPN_DenseNet.md)。
@@ -253,9 +270,9 @@ DPN 与 DenseNet 系列模型的精度、速度指标如下表所示,更多关
| DPN107 | 0.8089 | 0.9532 | 19.46 | 35.62 | 50.22 | 18.38 | 87.13 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN107_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DPN107_infer.tar) |
| DPN131 | 0.8070 | 0.9514 | 19.64 | 34.60 | 47.42 | 16.09 | 79.48 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN131_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DPN131_infer.tar) |
-
+
-## 8. HRNet 系列 [[13](#ref13)]
+## HRNet 系列 [[13](#ref13)]
HRNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[HRNet 系列模型文档](../models/HRNet.md)。
@@ -272,9 +289,9 @@ HRNet 系列模型的精度、速度指标如下表所示,更多关于该系
| HRNet_W64_C | 0.7930 | 0.9461 | 13.82 | 21.15 | 35.51 | 28.97 | 128.18 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W64_C_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HRNet_W64_C_infer.tar) |
| SE_HRNet_W64_C_ssld | 0.8475 | 0.9726 | 17.11 | 26.87 | 43.24 | 29.00 | 129.12 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SE_HRNet_W64_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SE_HRNet_W64_C_ssld_infer.tar) |
-
+
-## 9. Inception 系列 [[10](#ref10)][[11](#ref11)][[12](#ref12)][[26](#ref26)]
+## Inception 系列 [[10](#ref10)][[11](#ref11)][[12](#ref12)][[26](#ref26)]
Inception 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[Inception 系列模型文档](../models/Inception.md)。
@@ -289,9 +306,9 @@ Inception 系列模型的精度、速度指标如下表所示,更多关于该
| InceptionV3 | 0.7914 | 0.9459 | 4.78 | 8.53 | 12.28 | 5.73 | 23.87 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/InceptionV3_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/InceptionV3_infer.tar) |
| InceptionV4 | 0.8077 | 0.9526 | 8.93 | 15.17 | 21.56 | 12.29 | 42.74 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV4_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/InceptionV4_infer.tar) |
-
+
-## 10. EfficientNet 与 ResNeXt101_wsl 系列 [[16](#ref16)][[17](#ref17)]
+## EfficientNet 与 ResNeXt101_wsl 系列 [[16](#ref16)][[17](#ref17)]
EfficientNet 与 ResNeXt101_wsl 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[EfficientNet 与 ResNeXt101_wsl 系列模型文档](../models/EfficientNet_and_ResNeXt101_wsl.md)。
@@ -312,9 +329,9 @@ EfficientNet 与 ResNeXt101_wsl 系列模型的精度、速度指标如下表所
| EfficientNetB7 | 0.8430 | 0.9689 | 25.91 | 71.23 | 128.20 | 38.45 | 66.66 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/EfficientNetB7_infer.tar) |
| EfficientNetB0_
small | 0.7580 | 0.9258 | 1.24 | 2.59 | 3.92 | 0.40 | 4.69 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_small_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/EfficientNetB0_small_infer.tar) |
-
+
-## 11. ResNeSt 与 RegNet 系列 [[24](#ref24)][[25](#ref25)]
+## ResNeSt 与 RegNet 系列 [[24](#ref24)][[25](#ref25)]
ResNeSt 与 RegNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[ResNeSt 与 RegNet 系列模型文档](../models/ResNeSt_RegNet.md)。
@@ -324,9 +341,9 @@ ResNeSt 与 RegNet 系列模型的精度、速度指标如下表所示,更多
| ResNeSt50 | 0.8083 | 0.9542 | 7.36 | 10.23 | 13.84 | 5.40 | 27.54 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNeSt50_infer.tar) |
| RegNetX_4GF | 0.785 | 0.9416 | 6.46 | 8.48 | 11.45 | 4.00 | 22.23 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_4GF_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RegNetX_4GF_infer.tar) |
-
+
-## 12. ViT_and_DeiT 系列 [[31](#ref31)][[32](#ref32)]
+## ViT_and_DeiT 系列 [[31](#ref31)][[32](#ref32)]
ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模型的精度、速度指标如下表所示. 更多关于该系列模型的介绍可以参考: [ViT_and_DeiT 系列模型文档](../models/ViT_and_DeiT.md)。
@@ -351,9 +368,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| DeiT_base_
distilled_patch16_224 | 0.831 | 0.964 | 6.17 | 14.94 | 28.58 | 16.93 | 87.18 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_224_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DeiT_base_distilled_patch16_224_infer.tar) |
| DeiT_base_
distilled_patch16_384 | 0.851 | 0.973 | 14.12 | 48.76 | 97.09 | 49.43 | 87.18 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_384_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DeiT_base_distilled_patch16_384_infer.tar) |
-
+
-## 13. RepVGG 系列 [[36](#ref36)]
+## RepVGG 系列 [[36](#ref36)]
关于 RepVGG 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[RepVGG 系列模型文档](../models/RepVGG.md)。
@@ -370,9 +387,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| RepVGG_B2g4 | 0.7881 | 0.9448 | | | | 11.34 | 55.78 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2g4_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RepVGG_B2g4_infer.tar) |
| RepVGG_B3g4 | 0.7965 | 0.9485 | | | | 16.07 | 75.63 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B3g4_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RepVGG_B3g4_infer.tar) |
-
+
-## 14. MixNet 系列 [[29](#ref29)]
+## MixNet 系列 [[29](#ref29)]
关于 MixNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[MixNet 系列模型文档](../models/MixNet.md)。
@@ -382,9 +399,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| MixNet_M | 0.7767 | 0.9364 | 2.84 | 4.60 | 6.62 | 357.119 | 5.065 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_M_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MixNet_M_infer.tar) |
| MixNet_L | 0.7860 | 0.9437 | 3.16 | 5.55 | 8.03 | 579.017 | 7.384 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_L_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MixNet_L_infer.tar) |
-
+
-## 15. ReXNet 系列 [[30](#ref30)]
+## ReXNet 系列 [[30](#ref30)]
关于 ReXNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[ReXNet 系列模型文档](../models/ReXNet.md)。
@@ -396,9 +413,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| ReXNet_2_0 | 0.8122 | 0.9536 | 4.30 | 6.54 | 9.19 | 1.56 | 16.45 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ReXNet_2_0_infer.tar) |
| ReXNet_3_0 | 0.8209 | 0.9612 | 5.74 | 9.49 | 13.62 | 3.44 | 34.83 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_3_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ReXNet_3_0_infer.tar) |
-
+
-## 16. SwinTransformer 系列 [[27](#ref27)]
+## SwinTransformer 系列 [[27](#ref27)]
关于 SwinTransformer 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[SwinTransformer 系列模型文档](../models/SwinTransformer.md)。
@@ -415,9 +432,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
[1]:基于 ImageNet22k 数据集预训练,然后在 ImageNet1k 数据集迁移学习得到。
-
+
-## 17. LeViT 系列 [[33](#ref33)]
+## LeViT 系列 [[33](#ref33)]
关于 LeViT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[LeViT 系列模型文档](../models/LeViT.md)。
@@ -431,9 +448,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
**注**:与 Reference 的精度差异源于数据预处理不同及未使用蒸馏的 head 作为输出。
-
+
-## 18. Twins 系列 [[34](#ref34)]
+## Twins 系列 [[34](#ref34)]
关于 Twins 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[Twins 系列模型文档](../models/Twins.md)。
@@ -448,9 +465,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
**注**:与 Reference 的精度差异源于数据预处理不同。
-
+
-## 19. HarDNet 系列 [[37](#ref37)]
+## HarDNet 系列 [[37](#ref37)]
关于 HarDNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[HarDNet 系列模型文档](../models/HarDNet.md)。
@@ -461,9 +478,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| HarDNet68| 0.7546 | 0.9265 | 3.58 | 8.53 | 11.58 | 4.26 | 17.58 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HarDNet68_infer.tar) |
| HarDNet85 | 0.7744 | 0.9355 | 6.24 | 14.85 | 20.57 | 9.09 | 36.69 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet85_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HarDNet85_infer.tar) |
-
+
-## 20. DLA 系列 [[38](#ref38)]
+## DLA 系列 [[38](#ref38)]
关于 DLA 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[DLA 系列模型文档](../models/DLA.md)。
@@ -479,9 +496,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| DLA60x_c | 0.6645 | 0.8754 | 1.79 | 3.68 | 5.19 | 0.59 | 1.33 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_c_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DLA60x_c_infer.tar) |
| DLA60x | 0.7753 | 0.9378 | 5.98 | 9.24 | 12.52 | 3.54 | 17.41 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DLA60x_infer.tar) |
-
+
-## 21. RedNet 系列 [[39](#ref39)]
+## RedNet 系列 [[39](#ref39)]
关于 RedNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[RedNet 系列模型文档](../models/RedNet.md)。
@@ -493,9 +510,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| RedNet101 | 0.7894 | 0.9436 | 13.07 | 44.12 | 83.28 | 4.59 | 25.76 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet101_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RedNet101_infer.tar) |
| RedNet152 | 0.7917 | 0.9440 | 18.66 | 63.27 | 119.48 | 6.57 | 34.14 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet152_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RedNet152_infer.tar) |
-
+
-## 22. TNT 系列 [[35](#ref35)]
+## TNT 系列 [[35](#ref35)]
关于 TNT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[TNT 系列模型文档](../models/TNT.md)。
@@ -505,9 +522,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
**注**:TNT 模型的数据预处理部分 `NormalizeImage` 中的 `mean` 与 `std` 均为 0.5。
-
+
-## 23. CSWinTransformer 系列 [[40](#ref40)]
+## CSWinTransformer 系列 [[40](#ref40)]
关于 CSWinTransformer 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[CSWinTransformer 系列模型文档](../models/CSWinTransformer.md)。
@@ -521,9 +538,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| CSWinTransformer_large_384 | 0.8748 | 0.9833 | - | - | - | 94.7 | 173.3 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_large_384_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/CSWinTransformer_large_384_infer.tar) |
-
+
-## 24. PVTV2 系列 [[41](#ref41)]
+## PVTV2 系列 [[41](#ref41)]
关于 PVTV2 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[PVTV2 系列模型文档](../models/PVTV2.md)。
@@ -538,9 +555,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| PVT_V2_B5 | 0.837 | 0.966 | - | - | - | 11.4 | 82.0 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PVT_V2_B5_infer.tar) |
-
+
-## 25. MobileViT 系列 [[42](#ref42)]
+## MobileViT 系列 [[42](#ref42)]
关于 MobileViT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[MobileViT 系列模型文档](../models/MobileViT.md)。
@@ -550,9 +567,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模
| MobileViT_XS | 0.7454 | 0.9227 | - | - | - | 930.75 | 2.33 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XS_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XS_infer.tar) |
| MobileViT_S | 0.7814 | 0.9413 | - | - | - | 337.24 | 1.28 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_S_infer.tar) |
-
+
-## 26. 其他模型
+## 其他模型
关于 AlexNet [[18](#ref18)]、SqueezeNet 系列 [[19](#ref19)]、VGG 系列 [[20](#ref20)]、DarkNet53 [[21](#ref21)] 等模型的精度、速度指标如下表所示,更多介绍可以参考:[其他模型文档](../models/Others.md)。
diff --git a/docs/zh_CN/models/PP-HGNet.md b/docs/zh_CN/models/PP-HGNet.md
new file mode 100644
index 0000000000000000000000000000000000000000..d4b4a975d105f632a46c75a78b89089bdb1590e0
--- /dev/null
+++ b/docs/zh_CN/models/PP-HGNet.md
@@ -0,0 +1,51 @@
+# PP-HGNet 系列
+---
+## 目录
+
+* [1. 概述](#1)
+* [2. 结构信息](#2)
+* [3. 实验结果](#3)
+
+
+
+## 1. 概述
+
+PP-HGNet(High Performance GPU Net) 是百度飞桨视觉团队自研的更适用于 GPU 平台的高性能骨干网络,该网络在 VOVNet 的基础上使用了可学习的下采样层(LDS Layer),融合了 ResNet_vd、PPLCNet 等模型的优点,该模型在 GPU 平台上与其他 SOTA 模型在相同的速度下有着更高的精度。在同等速度下,该模型高于 ResNet34-D 模型 3.8 个百分点,高于 ResNet50-D 模型 2.4 个百分点,在使用百度自研 SSLD 蒸馏策略后,超越 ResNet50-D 模型 4.7 个百分点。与此同时,在相同精度下,其推理速度也远超主流 VisionTransformer 的推理速度。
+
+
+
+## 2. 结构信息
+
+PP-HGNet 作者针对 GPU 设备,对目前 GPU 友好的网络做了分析和归纳,尽可能多的使用 3x3 标准卷积(计算密度最高)。在此将 VOVNet 作为基准模型,将主要的有利于 GPU 推理的改进点进行融合。从而得到一个有利于 GPU 推理的骨干网络,同样速度下,精度大幅超越其他 CNN 或者 VisionTransformer 模型。
+
+PP-HGNet 骨干网络的整体结构如下:
+
+
+
+其中,PP-HGNet是由多个HG-Block组成,HG-Block的细节如下:
+
+
+
+
+
+## 3. 实验结果
+
+PP-HGNet 与其他模型的比较如下,其中测试机器为 NVIDIA® Tesla® V100,开启 TensorRT 引擎,精度类型为 FP32。在相同速度下,PP-HGNet 精度均超越了其他 SOTA CNN 模型,在与 SwinTransformer 模型的比较中,在更高精度的同时,速度快 2 倍以上。
+
+| Model | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) |
+|-------|---------------|---------------|-------------|
+| ResNet34 | 74.57 | 92.14 | 1.97 |
+| ResNet34_vd | 75.98 | 92.98 | 2.00 |
+| EfficientNetB0 | 77.38 | 93.31 | 1.96 |
+| PPHGNet_tiny | 79.83 | 95.04 | 1.77 |
+| PPHGNet_tiny_ssld | 81.95 | 96.12 | 1.77 |
+| ResNet50 | 76.50 | 93.00 | 2.54 |
+| ResNet50_vd | 79.12 | 94.44 | 2.60 |
+| ResNet50_rsb | 80.40 | | 2.54 |
+| EfficientNetB1 | 79.15 | 94.41 | 2.88 |
+| SwinTransformer_tiny | 81.2 | 95.5 | 6.59 |
+| PPHGNet_small | 81.51| 95.82 | 2.52 |
+| PPHGNet_small_ssld | 83.82| 96.81 | 2.52 |
+
+
+关于更多 PP-HGNet 的介绍以及下游任务的表现,敬请期待。
diff --git a/docs/zh_CN/models/PP-LCNetV2.md b/docs/zh_CN/models/PP-LCNetV2.md
index 7563574694696247d553669e363df68fa00148dc..362bac6f62957ae484a15a7f1b396e86d593214f 100644
--- a/docs/zh_CN/models/PP-LCNetV2.md
+++ b/docs/zh_CN/models/PP-LCNetV2.md
@@ -1,15 +1,53 @@
-# PP-LCNetV2 系列
+# PP-LCNetV2
---
-## 概述
+## 1. 概述
-PP-LCNetV2 是在 [PP-LCNet 系列模型](./PP-LCNet.md)的基础上,所提出的针对 Intel CPU 硬件平台设计的计算机视觉骨干网络,该模型更为
+骨干网络对计算机视觉下游任务的影响不言而喻,不仅对下游模型的性能影响很大,而且模型效率也极大地受此影响,但现有的大多骨干网络在真实应用中的效率并不理想,特别是缺乏针对 Intel CPU 平台所优化的骨干网络,我们测试了现有的主流轻量级模型,发现在 Intel CPU 平台上的效率并不理想,然而目前 Intel CPU 平台在工业界仍有大量使用场景,因此我们提出了 PP-LCNet 系列模型,PP-LCNetV2 是在 [PP-LCNetV1](./PP-LCNet.md) 基础上所改进的。
-在不使用额外数据的前提下,PPLCNetV2_base 模型在图像分类 ImageNet 数据集上能够取得超过 77% 的 Top1 Acc,同时在 Intel CPU 平台仅有 4.4 ms 以下的延迟,如下表所示,其中延时测试基于 Intel(R) Xeon(R) Gold 6271C CPU @ 2.60GHz 硬件平台,OpenVINO 2021.4.2推理平台。
+## 2. 设计细节
+
+
+
+PP-LCNetV2 模型的网络整体结构如上图所示。PP-LCNetV2 模型是在 PP-LCNetV1 的基础上优化而来,主要使用重参数化策略组合了不同大小卷积核的深度卷积,并优化了点卷积、Shortcut等。
+
+### 2.1 Rep 策略
+
+卷积核的大小决定了卷积层感受野的大小,通过组合使用不同大小的卷积核,能够获取不同尺度的特征,因此 PPLCNetV2 在 Stage3、Stage4 中,在同一层组合使用 kernel size 分别为 5、3、1 的 DW 卷积,同时为了避免对模型效率的影响,使用重参数化(Re parameterization,Rep)策略对同层的 DW 卷积进行融合,如下图所示。
+
+
+
+### 2.2 PW 卷积
+
+深度可分离卷积通常由一层 DW 卷积和一层 PW 卷积组成,用以替换标准卷积,为了使深度可分离卷积具有更强的拟合能力,我们尝试使用两层 PW 卷积,同时为了控制模型效率不受影响,两层 PW 卷积设置为:第一个在通道维度对特征图压缩,第二个再通过放大还原特征图通道,如下图所示。通过实验发现,该策略能够显著提高模型性能,同时为了平衡对模型效率带来的影响,PPLCNetV2 仅在 Stage4、Stage5 中使用了该策略。
+
+
+
+### 2.3 Shortcut
+
+残差结构(residual)自提出以来,被诸多模型广泛使用,但在轻量级卷积神经网络中,由于残差结构所带来的元素级(element-wise)加法操作,会对模型的速度造成影响,我们在 PP-LCNetV2 中,以 Stage 为单位实验了 残差结构对模型的影响,发现残差结构的使用并非一定会带来性能的提高,因此 PPLCNetV2 仅在最后一个 Stage 中的使用了残差结构:在 Block 中增加 Shortcut,如下图所示。
+
+
+
+### 2.4 激活函数
+
+在目前的轻量级卷积神经网络中,ReLU、Hard-Swish 激活函数最为常用,虽然在模型性能方面,Hard-Swish 通常更为优秀,然而我们发现部分推理平台对于 Hard-Swish 激活函数的效率优化并不理想,因此为了兼顾通用性,PP-LCNetV2 默认使用了 ReLU 激活函数,并且我们测试发现,ReLU 激活函数对于较大模型的性能影响较小。
+
+### 2.5 SE 模块
+
+虽然 SE 模块能够显著提高模型性能,但其对模型速度的影响同样不可忽视,在 PP-LCNetV1 中,我们发现在模型中后部使用 SE 模块能够获得最大化的收益。在 PP-LCNetV2 的优化过程中,我们以 Stage 为单位对 SE 模块的位置做了进一步实验,并发现在 Stage3 中使用能够取得更好的平衡。
+
+## 3. 实验结果
+
+在不使用额外数据的前提下,PPLCNetV2_base 模型在图像分类 ImageNet 数据集上能够取得超过 77% 的 Top1 Acc,同时在 Intel CPU 平台的推理时间在 4.4 ms 以下,如下表所示,其中推理时间基于 Intel(R) Xeon(R) Gold 6271C CPU @ 2.60GHz 硬件平台,OpenVINO 推理平台。
| Model | Params(M) | FLOPs(M) | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) |
|-------|-----------|----------|---------------|---------------|-------------|
-| PPLCNetV2_base | 6.6 | 604 | 77.04 | 93.27 | 4.32 |
+| MobileNetV3_Large_x1_25 | 7.4 | 714 | 76.4 | 93.00 | 5.19 |
+| PPLCNetV2_x2_5 | 9 | 906 | 76.60 | 93.00 | 7.25 |
+| PPLCNetV2_base | 6.6 | 604 | 77.04 | 93.27 | 4.32 |
+
+
-关于 PP-LCNetV2 系列模型的更多信息,敬请关注。
+关于 PP-LCNetV2 模型的更多信息,敬请关注。
diff --git a/docs/zh_CN/samples/.gitkeep b/docs/zh_CN/samples/.gitkeep
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/docs/zh_CN/samples/.gitkeep
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/docs/zh_CN/samples/Personnel_Access/README.md b/docs/zh_CN/samples/Personnel_Access/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d63c67afea59430cc627458d6f35fd95e2ac59d1
--- /dev/null
+++ b/docs/zh_CN/samples/Personnel_Access/README.md
@@ -0,0 +1,16 @@
+## 人员出入管理
+
+近几年,AI视觉技术在安防、工业制造等场景在产业智能化升级进程中发挥着举足轻重的作用。【进出管控】作为各行业中的关键场景,应用需求十分迫切。 如在居家防盗、机房管控以及景区危险告警等场景中,存在大量对异常目标(人、车或其他物体)不经允许擅自进入规定区域的及时检测需求。利用深度学习视觉技术,可以及时准确地对闯入行为进行识别并发出告警信息。切实保障人员的生命财产安全。相比传统人力监管的方式,不仅可以实现7*24小时不间断的全方位保护,还能极大地降低管理成本,解放劳动力。
+
+但在真实产业中,要实现高精度的人员进出识别不是一件容易的事,在实际场景中存在着各种各样的问题:
+
+**摄像头采集到的图像会受到建筑、机器、车辆等遮挡的影响**
+
+**天气多种多样,要适应白天、黑夜、雾天和雨天等**
+
+针对上述场景,本次飞桨产业实践范例库推出了重点区域人员进出管控实践示例,提供从数据准备、技术方案、模型训练优化,到模型部署的全流程可复用方案,有效解决了不同光照、不同天气等室外复杂环境下的图像分类问题,并且极大地降低了数据标注和算力成本,适用于厂区巡检、家居防盗、景区管理等多个产业应用。
+
+
+
+
+**注**: AI Studio在线运行代码请参考[人员出入管理](https://aistudio.baidu.com/aistudio/projectdetail/4094475)
diff --git a/docs/zh_CN/samples/Personnel_Access/imgs/someone.gif b/docs/zh_CN/samples/Personnel_Access/imgs/someone.gif
new file mode 100644
index 0000000000000000000000000000000000000000..1f5d684e5455971a636f70540216366166d8d9f8
Binary files /dev/null and b/docs/zh_CN/samples/Personnel_Access/imgs/someone.gif differ
diff --git a/ppcls/arch/__init__.py b/ppcls/arch/__init__.py
index da21e101a27eb0db2c05b658346148bda3139c80..4021457961ad9013df79b05594e8424d1c312b10 100644
--- a/ppcls/arch/__init__.py
+++ b/ppcls/arch/__init__.py
@@ -32,14 +32,19 @@ from ppcls.arch.distill.afd_attention import LinearTransformStudent, LinearTrans
__all__ = ["build_model", "RecModel", "DistillationModel", "AttentionModel"]
-def build_model(config):
+def build_model(config, mode="train"):
arch_config = copy.deepcopy(config["Arch"])
model_type = arch_config.pop("name")
+ use_sync_bn = arch_config.pop("use_sync_bn", False)
mod = importlib.import_module(__name__)
arch = getattr(mod, model_type)(**arch_config)
+ if use_sync_bn:
+ arch = nn.SyncBatchNorm.convert_sync_batchnorm(arch)
+
if isinstance(arch, TheseusLayer):
prune_model(config, arch)
- quantize_model(config, arch)
+ quantize_model(config, arch, mode)
+
return arch
@@ -50,6 +55,7 @@ def apply_to_static(config, model):
specs = None
if 'image_shape' in config['Global']:
specs = [InputSpec([None] + config['Global']['image_shape'])]
+ specs[0].stop_gradient = True
model = to_static(model, input_spec=specs)
logger.info("Successfully to apply @to_static with specs: {}".format(
specs))
diff --git a/ppcls/arch/backbone/__init__.py b/ppcls/arch/backbone/__init__.py
index a685cfb5b23f299e7d875470034f4f7b3f626086..e957358479cb98d8bde3dac0d4b2785b8965c7bf 100644
--- a/ppcls/arch/backbone/__init__.py
+++ b/ppcls/arch/backbone/__init__.py
@@ -24,6 +24,7 @@ from ppcls.arch.backbone.legendary_models.hrnet import HRNet_W18_C, HRNet_W30_C,
from ppcls.arch.backbone.legendary_models.pp_lcnet import PPLCNet_x0_25, PPLCNet_x0_35, PPLCNet_x0_5, PPLCNet_x0_75, PPLCNet_x1_0, PPLCNet_x1_5, PPLCNet_x2_0, PPLCNet_x2_5
from ppcls.arch.backbone.legendary_models.pp_lcnet_v2 import PPLCNetV2_base
from ppcls.arch.backbone.legendary_models.esnet import ESNet_x0_25, ESNet_x0_5, ESNet_x0_75, ESNet_x1_0
+from ppcls.arch.backbone.legendary_models.pp_hgnet import PPHGNet_tiny, PPHGNet_small, PPHGNet_base
from ppcls.arch.backbone.model_zoo.resnet_vc import ResNet50_vc
from ppcls.arch.backbone.model_zoo.resnext import ResNeXt50_32x4d, ResNeXt50_64x4d, ResNeXt101_32x4d, ResNeXt101_64x4d, ResNeXt152_32x4d, ResNeXt152_64x4d
@@ -51,7 +52,7 @@ from ppcls.arch.backbone.model_zoo.darknet import DarkNet53
from ppcls.arch.backbone.model_zoo.regnet import RegNetX_200MF, RegNetX_4GF, RegNetX_32GF, RegNetY_200MF, RegNetY_4GF, RegNetY_32GF
from ppcls.arch.backbone.model_zoo.vision_transformer import ViT_small_patch16_224, ViT_base_patch16_224, ViT_base_patch16_384, ViT_base_patch32_384, ViT_large_patch16_224, ViT_large_patch16_384, ViT_large_patch32_384
from ppcls.arch.backbone.model_zoo.distilled_vision_transformer import DeiT_tiny_patch16_224, DeiT_small_patch16_224, DeiT_base_patch16_224, DeiT_tiny_distilled_patch16_224, DeiT_small_distilled_patch16_224, DeiT_base_distilled_patch16_224, DeiT_base_patch16_384, DeiT_base_distilled_patch16_384
-from ppcls.arch.backbone.model_zoo.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384
+from ppcls.arch.backbone.legendary_models.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384
from ppcls.arch.backbone.model_zoo.cswin_transformer import CSWinTransformer_tiny_224, CSWinTransformer_small_224, CSWinTransformer_base_224, CSWinTransformer_large_224, CSWinTransformer_base_384, CSWinTransformer_large_384
from ppcls.arch.backbone.model_zoo.mixnet import MixNet_S, MixNet_M, MixNet_L
from ppcls.arch.backbone.model_zoo.rexnet import ReXNet_1_0, ReXNet_1_3, ReXNet_1_5, ReXNet_2_0, ReXNet_3_0
@@ -69,6 +70,7 @@ from ppcls.arch.backbone.model_zoo.van import VAN_tiny
from ppcls.arch.backbone.variant_models.resnet_variant import ResNet50_last_stage_stride1
from ppcls.arch.backbone.variant_models.vgg_variant import VGG19Sigmoid
from ppcls.arch.backbone.variant_models.pp_lcnet_variant import PPLCNet_x2_5_Tanh
+from ppcls.arch.backbone.model_zoo.adaface_ir_net import AdaFace_IR_18, AdaFace_IR_34, AdaFace_IR_50, AdaFace_IR_101, AdaFace_IR_152, AdaFace_IR_SE_50, AdaFace_IR_SE_101, AdaFace_IR_SE_152, AdaFace_IR_SE_200
# help whl get all the models' api (class type) and components' api (func type)
diff --git a/ppcls/arch/backbone/legendary_models/pp_hgnet.py b/ppcls/arch/backbone/legendary_models/pp_hgnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e0412dfb210c7dc44bc98854dbb96fca526ab1f
--- /dev/null
+++ b/ppcls/arch/backbone/legendary_models/pp_hgnet.py
@@ -0,0 +1,372 @@
+# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import KaimingNormal, Constant
+from paddle.nn import Conv2D, BatchNorm2D, ReLU, AdaptiveAvgPool2D, MaxPool2D
+from paddle.regularizer import L2Decay
+from paddle import ParamAttr
+
+from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
+from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
+
+MODEL_URLS = {
+ "PPHGNet_tiny":
+ "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams",
+ "PPHGNet_small":
+ "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams"
+}
+
+__all__ = list(MODEL_URLS.keys())
+
+kaiming_normal_ = KaimingNormal()
+zeros_ = Constant(value=0.)
+ones_ = Constant(value=1.)
+
+
+class ConvBNAct(TheseusLayer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ groups=1,
+ use_act=True):
+ super().__init__()
+ self.use_act = use_act
+ self.conv = Conv2D(
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ padding=(kernel_size - 1) // 2,
+ groups=groups,
+ bias_attr=False)
+ self.bn = BatchNorm2D(
+ out_channels,
+ weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
+ bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
+ if self.use_act:
+ self.act = ReLU()
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ if self.use_act:
+ x = self.act(x)
+ return x
+
+
+class ESEModule(TheseusLayer):
+ def __init__(self, channels):
+ super().__init__()
+ self.avg_pool = AdaptiveAvgPool2D(1)
+ self.conv = Conv2D(
+ in_channels=channels,
+ out_channels=channels,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, x):
+ identity = x
+ x = self.avg_pool(x)
+ x = self.conv(x)
+ x = self.sigmoid(x)
+ return paddle.multiply(x=identity, y=x)
+
+
+class HG_Block(TheseusLayer):
+ def __init__(
+ self,
+ in_channels,
+ mid_channels,
+ out_channels,
+ layer_num,
+ identity=False, ):
+ super().__init__()
+ self.identity = identity
+
+ self.layers = nn.LayerList()
+ self.layers.append(
+ ConvBNAct(
+ in_channels=in_channels,
+ out_channels=mid_channels,
+ kernel_size=3,
+ stride=1))
+ for _ in range(layer_num - 1):
+ self.layers.append(
+ ConvBNAct(
+ in_channels=mid_channels,
+ out_channels=mid_channels,
+ kernel_size=3,
+ stride=1))
+
+ # feature aggregation
+ total_channels = in_channels + layer_num * mid_channels
+ self.aggregation_conv = ConvBNAct(
+ in_channels=total_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ stride=1)
+ self.att = ESEModule(out_channels)
+
+ def forward(self, x):
+ identity = x
+ output = []
+ output.append(x)
+ for layer in self.layers:
+ x = layer(x)
+ output.append(x)
+ x = paddle.concat(output, axis=1)
+ x = self.aggregation_conv(x)
+ x = self.att(x)
+ if self.identity:
+ x += identity
+ return x
+
+
+class HG_Stage(TheseusLayer):
+ def __init__(self,
+ in_channels,
+ mid_channels,
+ out_channels,
+ block_num,
+ layer_num,
+ downsample=True):
+ super().__init__()
+ self.downsample = downsample
+ if downsample:
+ self.downsample = ConvBNAct(
+ in_channels=in_channels,
+ out_channels=in_channels,
+ kernel_size=3,
+ stride=2,
+ groups=in_channels,
+ use_act=False)
+
+ blocks_list = []
+ blocks_list.append(
+ HG_Block(
+ in_channels,
+ mid_channels,
+ out_channels,
+ layer_num,
+ identity=False))
+ for _ in range(block_num - 1):
+ blocks_list.append(
+ HG_Block(
+ out_channels,
+ mid_channels,
+ out_channels,
+ layer_num,
+ identity=True))
+ self.blocks = nn.Sequential(*blocks_list)
+
+ def forward(self, x):
+ if self.downsample:
+ x = self.downsample(x)
+ x = self.blocks(x)
+ return x
+
+
+class PPHGNet(TheseusLayer):
+ """
+ PPHGNet
+ Args:
+ stem_channels: list. Stem channel list of PPHGNet.
+ stage_config: dict. The configuration of each stage of PPHGNet. such as the number of channels, stride, etc.
+ layer_num: int. Number of layers of HG_Block.
+ use_last_conv: boolean. Whether to use a 1x1 convolutional layer before the classification layer.
+ class_expand: int=2048. Number of channels for the last 1x1 convolutional layer.
+ dropout_prob: float. Parameters of dropout, 0.0 means dropout is not used.
+ class_num: int=1000. The number of classes.
+ Returns:
+ model: nn.Layer. Specific PPHGNet model depends on args.
+ """
+ def __init__(self,
+ stem_channels,
+ stage_config,
+ layer_num,
+ use_last_conv=True,
+ class_expand=2048,
+ dropout_prob=0.0,
+ class_num=1000):
+ super().__init__()
+ self.use_last_conv = use_last_conv
+ self.class_expand = class_expand
+
+ # stem
+ stem_channels.insert(0, 3)
+ self.stem = nn.Sequential(* [
+ ConvBNAct(
+ in_channels=stem_channels[i],
+ out_channels=stem_channels[i + 1],
+ kernel_size=3,
+ stride=2 if i == 0 else 1) for i in range(
+ len(stem_channels) - 1)
+ ])
+ self.pool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
+
+ # stages
+ self.stages = nn.LayerList()
+ for k in stage_config:
+ in_channels, mid_channels, out_channels, block_num, downsample = stage_config[
+ k]
+ self.stages.append(
+ HG_Stage(in_channels, mid_channels, out_channels, block_num,
+ layer_num, downsample))
+
+ self.avg_pool = AdaptiveAvgPool2D(1)
+ if self.use_last_conv:
+ self.last_conv = Conv2D(
+ in_channels=out_channels,
+ out_channels=self.class_expand,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias_attr=False)
+ self.act = nn.ReLU()
+ self.dropout = nn.Dropout(
+ p=dropout_prob, mode="downscale_in_infer")
+
+ self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
+ self.fc = nn.Linear(self.class_expand
+ if self.use_last_conv else out_channels, class_num)
+
+ self._init_weights()
+
+ def _init_weights(self):
+ for m in self.sublayers():
+ if isinstance(m, nn.Conv2D):
+ kaiming_normal_(m.weight)
+ elif isinstance(m, (nn.BatchNorm2D)):
+ ones_(m.weight)
+ zeros_(m.bias)
+ elif isinstance(m, nn.Linear):
+ zeros_(m.bias)
+
+ def forward(self, x):
+ x = self.stem(x)
+ x = self.pool(x)
+
+ for stage in self.stages:
+ x = stage(x)
+
+ x = self.avg_pool(x)
+ if self.use_last_conv:
+ x = self.last_conv(x)
+ x = self.act(x)
+ x = self.dropout(x)
+ x = self.flatten(x)
+ x = self.fc(x)
+ return x
+
+
+def _load_pretrained(pretrained, model, model_url, use_ssld):
+ if pretrained is False:
+ pass
+ elif pretrained is True:
+ load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
+ elif isinstance(pretrained, str):
+ load_dygraph_pretrain(model, pretrained)
+ else:
+ raise RuntimeError(
+ "pretrained type is not available. Please use `string` or `boolean` type."
+ )
+
+
+def PPHGNet_tiny(pretrained=False, use_ssld=False, **kwargs):
+ """
+ PPHGNet_tiny
+ Args:
+ pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
+ If str, means the path of the pretrained model.
+ use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
+ Returns:
+ model: nn.Layer. Specific `PPHGNet_tiny` model depends on args.
+ """
+ stage_config = {
+ # in_channels, mid_channels, out_channels, blocks, downsample
+ "stage1": [96, 96, 224, 1, False],
+ "stage2": [224, 128, 448, 1, True],
+ "stage3": [448, 160, 512, 2, True],
+ "stage4": [512, 192, 768, 1, True],
+ }
+
+ model = PPHGNet(
+ stem_channels=[48, 48, 96],
+ stage_config=stage_config,
+ layer_num=5,
+ **kwargs)
+ _load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_tiny"], use_ssld)
+ return model
+
+
+def PPHGNet_small(pretrained=False, use_ssld=False, **kwargs):
+ """
+ PPHGNet_small
+ Args:
+ pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
+ If str, means the path of the pretrained model.
+ use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
+ Returns:
+ model: nn.Layer. Specific `PPHGNet_small` model depends on args.
+ """
+ stage_config = {
+ # in_channels, mid_channels, out_channels, blocks, downsample
+ "stage1": [128, 128, 256, 1, False],
+ "stage2": [256, 160, 512, 1, True],
+ "stage3": [512, 192, 768, 2, True],
+ "stage4": [768, 224, 1024, 1, True],
+ }
+
+ model = PPHGNet(
+ stem_channels=[64, 64, 128],
+ stage_config=stage_config,
+ layer_num=6,
+ **kwargs)
+ _load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_small"], use_ssld)
+ return model
+
+
+def PPHGNet_base(pretrained=False, use_ssld=False, **kwargs):
+ """
+ PPHGNet_base
+ Args:
+ pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
+ If str, means the path of the pretrained model.
+ use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
+ Returns:
+ model: nn.Layer. Specific `PPHGNet_base` model depends on args.
+ """
+ stage_config = {
+ # in_channels, mid_channels, out_channels, blocks, downsample
+ "stage1": [160, 192, 320, 1, False],
+ "stage2": [320, 224, 640, 2, True],
+ "stage3": [640, 256, 960, 3, True],
+ "stage4": [960, 288, 1280, 2, True],
+ }
+
+ model = PPHGNet(
+ stem_channels=[96, 96, 160],
+ stage_config=stage_config,
+ layer_num=7,
+ dropout_prob=0.2,
+ **kwargs)
+ _load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_base"], use_ssld)
+ return model
diff --git a/ppcls/arch/backbone/legendary_models/pp_lcnet.py b/ppcls/arch/backbone/legendary_models/pp_lcnet.py
index e0ae3c640360870cf1010204ee330f1a65387ba1..d0bca63eec42e0b676a0fb8915d32d6d293b78a3 100644
--- a/ppcls/arch/backbone/legendary_models/pp_lcnet.py
+++ b/ppcls/arch/backbone/legendary_models/pp_lcnet.py
@@ -132,6 +132,7 @@ class DepthwiseSeparable(TheseusLayer):
lr_mult=lr_mult)
if use_se:
self.se = SEModule(num_channels, lr_mult=lr_mult)
+
self.pw_conv = ConvBNLayer(
num_channels=num_channels,
filter_size=1,
diff --git a/ppcls/arch/backbone/legendary_models/pp_lcnet_v2.py b/ppcls/arch/backbone/legendary_models/pp_lcnet_v2.py
index 3ce03a9c9f01d2e148e8894de6f1aaad704dcc33..459d84275ac63af54fb9ad10af2bcf2f7759052d 100644
--- a/ppcls/arch/backbone/legendary_models/pp_lcnet_v2.py
+++ b/ppcls/arch/backbone/legendary_models/pp_lcnet_v2.py
@@ -188,7 +188,7 @@ class RepDepthwiseSeparable(TheseusLayer):
def forward(self, x):
if self.use_rep:
input_x = x
- if not self.training:
+ if self.is_repped:
x = self.act(self.dw_conv(x))
else:
y = self.dw_conv_list[0](x)
@@ -209,14 +209,12 @@ class RepDepthwiseSeparable(TheseusLayer):
x = x + input_x
return x
- def eval(self):
+ def rep(self):
if self.use_rep:
+ self.is_repped = True
kernel, bias = self._get_equivalent_kernel_bias()
self.dw_conv.weight.set_value(kernel)
self.dw_conv.bias.set_value(bias)
- self.training = False
- for layer in self.sublayers():
- layer.eval()
def _get_equivalent_kernel_bias(self):
kernel_sum = 0
diff --git a/ppcls/arch/backbone/legendary_models/resnet.py b/ppcls/arch/backbone/legendary_models/resnet.py
index 1e153d78607d10648be2cf62fa053ca4781f97f3..551d326e05c49f7e0f371d6898bee245fe7a9ee9 100644
--- a/ppcls/arch/backbone/legendary_models/resnet.py
+++ b/ppcls/arch/backbone/legendary_models/resnet.py
@@ -20,7 +20,7 @@ import numpy as np
import paddle
from paddle import ParamAttr
import paddle.nn as nn
-from paddle.nn import Conv2D, BatchNorm, Linear
+from paddle.nn import Conv2D, BatchNorm, Linear, BatchNorm2D
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddle.nn.initializer import Uniform
from paddle.regularizer import L2Decay
@@ -395,7 +395,10 @@ def _load_pretrained(pretrained, model, model_url, use_ssld):
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
- load_dygraph_pretrain(model, pretrained)
+ if 'http' in pretrained:
+ load_dygraph_pretrain_from_url(model, pretrained, use_ssld=False)
+ else:
+ load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
diff --git a/ppcls/arch/backbone/model_zoo/swin_transformer.py b/ppcls/arch/backbone/legendary_models/swin_transformer.py
similarity index 99%
rename from ppcls/arch/backbone/model_zoo/swin_transformer.py
rename to ppcls/arch/backbone/legendary_models/swin_transformer.py
index 877b7365998bce81489a89ab57a240deb66d45cc..2a3401b2a3fae17e6ca5834cad1b362c5955400f 100644
--- a/ppcls/arch/backbone/model_zoo/swin_transformer.py
+++ b/ppcls/arch/backbone/legendary_models/swin_transformer.py
@@ -21,8 +21,8 @@ import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn.initializer import TruncatedNormal, Constant
-from .vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity
-
+from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
+from ppcls.arch.backbone.model_zoo.vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
@@ -589,7 +589,7 @@ class PatchEmbed(nn.Layer):
return flops
-class SwinTransformer(nn.Layer):
+class SwinTransformer(TheseusLayer):
""" Swin Transformer
A PaddlePaddle impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
diff --git a/ppcls/arch/backbone/model_zoo/adaface_ir_net.py b/ppcls/arch/backbone/model_zoo/adaface_ir_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..47de152b646e6f824e5a888692b770d9e146223b
--- /dev/null
+++ b/ppcls/arch/backbone/model_zoo/adaface_ir_net.py
@@ -0,0 +1,529 @@
+# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# this code is based on AdaFace(https://github.com/mk-minchul/AdaFace)
+from collections import namedtuple
+import paddle
+import paddle.nn as nn
+from paddle.nn import Dropout
+from paddle.nn import MaxPool2D
+from paddle.nn import Sequential
+from paddle.nn import Conv2D, Linear
+from paddle.nn import BatchNorm1D, BatchNorm2D
+from paddle.nn import ReLU, Sigmoid
+from paddle.nn import Layer
+from paddle.nn import PReLU
+
+# from ppcls.arch.backbone.legendary_models.resnet import _load_pretrained
+
+
+class Flatten(Layer):
+ """ Flat tensor
+ """
+
+ def forward(self, input):
+ return paddle.reshape(input, [input.shape[0], -1])
+
+
+class LinearBlock(Layer):
+ """ Convolution block without no-linear activation layer
+ """
+
+ def __init__(self,
+ in_c,
+ out_c,
+ kernel=(1, 1),
+ stride=(1, 1),
+ padding=(0, 0),
+ groups=1):
+ super(LinearBlock, self).__init__()
+ self.conv = Conv2D(
+ in_c,
+ out_c,
+ kernel,
+ stride,
+ padding,
+ groups=groups,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=None)
+ weight_attr = paddle.ParamAttr(
+ regularizer=None, initializer=nn.initializer.Constant(value=1.0))
+ bias_attr = paddle.ParamAttr(
+ regularizer=None, initializer=nn.initializer.Constant(value=0.0))
+ self.bn = BatchNorm2D(
+ out_c, weight_attr=weight_attr, bias_attr=bias_attr)
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ return x
+
+
+class GNAP(Layer):
+ """ Global Norm-Aware Pooling block
+ """
+
+ def __init__(self, in_c):
+ super(GNAP, self).__init__()
+ self.bn1 = BatchNorm2D(in_c, weight_attr=False, bias_attr=False)
+ self.pool = nn.AdaptiveAvgPool2D((1, 1))
+ self.bn2 = BatchNorm1D(in_c, weight_attr=False, bias_attr=False)
+
+ def forward(self, x):
+ x = self.bn1(x)
+ x_norm = paddle.norm(x, 2, 1, True)
+ x_norm_mean = paddle.mean(x_norm)
+ weight = x_norm_mean / x_norm
+ x = x * weight
+ x = self.pool(x)
+ x = x.view(x.shape[0], -1)
+ feature = self.bn2(x)
+ return feature
+
+
+class GDC(Layer):
+ """ Global Depthwise Convolution block
+ """
+
+ def __init__(self, in_c, embedding_size):
+ super(GDC, self).__init__()
+ self.conv_6_dw = LinearBlock(
+ in_c,
+ in_c,
+ groups=in_c,
+ kernel=(7, 7),
+ stride=(1, 1),
+ padding=(0, 0))
+ self.conv_6_flatten = Flatten()
+ self.linear = Linear(
+ in_c,
+ embedding_size,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False)
+ self.bn = BatchNorm1D(
+ embedding_size, weight_attr=False, bias_attr=False)
+
+ def forward(self, x):
+ x = self.conv_6_dw(x)
+ x = self.conv_6_flatten(x)
+ x = self.linear(x)
+ x = self.bn(x)
+ return x
+
+
+class SELayer(Layer):
+ """ SE block
+ """
+
+ def __init__(self, channels, reduction):
+ super(SELayer, self).__init__()
+ self.avg_pool = nn.AdaptiveAvgPool2D(1)
+ weight_attr = paddle.ParamAttr(
+ initializer=paddle.nn.initializer.XavierUniform())
+ self.fc1 = Conv2D(
+ channels,
+ channels // reduction,
+ kernel_size=1,
+ padding=0,
+ weight_attr=weight_attr,
+ bias_attr=False)
+
+ self.relu = ReLU()
+ self.fc2 = Conv2D(
+ channels // reduction,
+ channels,
+ kernel_size=1,
+ padding=0,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False)
+
+ self.sigmoid = Sigmoid()
+
+ def forward(self, x):
+ module_input = x
+ x = self.avg_pool(x)
+ x = self.fc1(x)
+ x = self.relu(x)
+ x = self.fc2(x)
+ x = self.sigmoid(x)
+
+ return module_input * x
+
+
+class BasicBlockIR(Layer):
+ """ BasicBlock for IRNet
+ """
+
+ def __init__(self, in_channel, depth, stride):
+ super(BasicBlockIR, self).__init__()
+
+ weight_attr = paddle.ParamAttr(
+ regularizer=None, initializer=nn.initializer.Constant(value=1.0))
+ bias_attr = paddle.ParamAttr(
+ regularizer=None, initializer=nn.initializer.Constant(value=0.0))
+ if in_channel == depth:
+ self.shortcut_layer = MaxPool2D(1, stride)
+ else:
+ self.shortcut_layer = Sequential(
+ Conv2D(
+ in_channel,
+ depth, (1, 1),
+ stride,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False),
+ BatchNorm2D(
+ depth, weight_attr=weight_attr, bias_attr=bias_attr))
+ self.res_layer = Sequential(
+ BatchNorm2D(
+ in_channel, weight_attr=weight_attr, bias_attr=bias_attr),
+ Conv2D(
+ in_channel,
+ depth, (3, 3), (1, 1),
+ 1,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False),
+ BatchNorm2D(
+ depth, weight_attr=weight_attr, bias_attr=bias_attr),
+ PReLU(depth),
+ Conv2D(
+ depth,
+ depth, (3, 3),
+ stride,
+ 1,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False),
+ BatchNorm2D(
+ depth, weight_attr=weight_attr, bias_attr=bias_attr))
+
+ def forward(self, x):
+ shortcut = self.shortcut_layer(x)
+ res = self.res_layer(x)
+
+ return res + shortcut
+
+
+class BottleneckIR(Layer):
+ """ BasicBlock with bottleneck for IRNet
+ """
+
+ def __init__(self, in_channel, depth, stride):
+ super(BottleneckIR, self).__init__()
+ reduction_channel = depth // 4
+ weight_attr = paddle.ParamAttr(
+ regularizer=None, initializer=nn.initializer.Constant(value=1.0))
+ bias_attr = paddle.ParamAttr(
+ regularizer=None, initializer=nn.initializer.Constant(value=0.0))
+ if in_channel == depth:
+ self.shortcut_layer = MaxPool2D(1, stride)
+ else:
+ self.shortcut_layer = Sequential(
+ Conv2D(
+ in_channel,
+ depth, (1, 1),
+ stride,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False),
+ BatchNorm2D(
+ depth, weight_attr=weight_attr, bias_attr=bias_attr))
+ self.res_layer = Sequential(
+ BatchNorm2D(
+ in_channel, weight_attr=weight_attr, bias_attr=bias_attr),
+ Conv2D(
+ in_channel,
+ reduction_channel, (1, 1), (1, 1),
+ 0,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False),
+ BatchNorm2D(
+ reduction_channel,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr),
+ PReLU(reduction_channel),
+ Conv2D(
+ reduction_channel,
+ reduction_channel, (3, 3), (1, 1),
+ 1,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False),
+ BatchNorm2D(
+ reduction_channel,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr),
+ PReLU(reduction_channel),
+ Conv2D(
+ reduction_channel,
+ depth, (1, 1),
+ stride,
+ 0,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False),
+ BatchNorm2D(
+ depth, weight_attr=weight_attr, bias_attr=bias_attr))
+
+ def forward(self, x):
+ shortcut = self.shortcut_layer(x)
+ res = self.res_layer(x)
+
+ return res + shortcut
+
+
+class BasicBlockIRSE(BasicBlockIR):
+ def __init__(self, in_channel, depth, stride):
+ super(BasicBlockIRSE, self).__init__(in_channel, depth, stride)
+ self.res_layer.add_sublayer("se_block", SELayer(depth, 16))
+
+
+class BottleneckIRSE(BottleneckIR):
+ def __init__(self, in_channel, depth, stride):
+ super(BottleneckIRSE, self).__init__(in_channel, depth, stride)
+ self.res_layer.add_sublayer("se_block", SELayer(depth, 16))
+
+
+class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
+ '''A named tuple describing a ResNet block.'''
+
+
+def get_block(in_channel, depth, num_units, stride=2):
+
+ return [Bottleneck(in_channel, depth, stride)] +\
+ [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
+
+
+def get_blocks(num_layers):
+ if num_layers == 18:
+ blocks = [
+ get_block(
+ in_channel=64, depth=64, num_units=2), get_block(
+ in_channel=64, depth=128, num_units=2), get_block(
+ in_channel=128, depth=256, num_units=2), get_block(
+ in_channel=256, depth=512, num_units=2)
+ ]
+ elif num_layers == 34:
+ blocks = [
+ get_block(
+ in_channel=64, depth=64, num_units=3), get_block(
+ in_channel=64, depth=128, num_units=4), get_block(
+ in_channel=128, depth=256, num_units=6), get_block(
+ in_channel=256, depth=512, num_units=3)
+ ]
+ elif num_layers == 50:
+ blocks = [
+ get_block(
+ in_channel=64, depth=64, num_units=3), get_block(
+ in_channel=64, depth=128, num_units=4), get_block(
+ in_channel=128, depth=256, num_units=14), get_block(
+ in_channel=256, depth=512, num_units=3)
+ ]
+ elif num_layers == 100:
+ blocks = [
+ get_block(
+ in_channel=64, depth=64, num_units=3), get_block(
+ in_channel=64, depth=128, num_units=13), get_block(
+ in_channel=128, depth=256, num_units=30), get_block(
+ in_channel=256, depth=512, num_units=3)
+ ]
+ elif num_layers == 152:
+ blocks = [
+ get_block(
+ in_channel=64, depth=256, num_units=3), get_block(
+ in_channel=256, depth=512, num_units=8), get_block(
+ in_channel=512, depth=1024, num_units=36), get_block(
+ in_channel=1024, depth=2048, num_units=3)
+ ]
+ elif num_layers == 200:
+ blocks = [
+ get_block(
+ in_channel=64, depth=256, num_units=3), get_block(
+ in_channel=256, depth=512, num_units=24), get_block(
+ in_channel=512, depth=1024, num_units=36), get_block(
+ in_channel=1024, depth=2048, num_units=3)
+ ]
+
+ return blocks
+
+
+class Backbone(Layer):
+ def __init__(self, input_size, num_layers, mode='ir'):
+ """ Args:
+ input_size: input_size of backbone
+ num_layers: num_layers of backbone
+ mode: support ir or irse
+ """
+ super(Backbone, self).__init__()
+ assert input_size[0] in [112, 224], \
+ "input_size should be [112, 112] or [224, 224]"
+ assert num_layers in [18, 34, 50, 100, 152, 200], \
+ "num_layers should be 18, 34, 50, 100 or 152"
+ assert mode in ['ir', 'ir_se'], \
+ "mode should be ir or ir_se"
+ weight_attr = paddle.ParamAttr(
+ regularizer=None, initializer=nn.initializer.Constant(value=1.0))
+ bias_attr = paddle.ParamAttr(
+ regularizer=None, initializer=nn.initializer.Constant(value=0.0))
+ self.input_layer = Sequential(
+ Conv2D(
+ 3,
+ 64, (3, 3),
+ 1,
+ 1,
+ weight_attr=nn.initializer.KaimingNormal(),
+ bias_attr=False),
+ BatchNorm2D(
+ 64, weight_attr=weight_attr, bias_attr=bias_attr),
+ PReLU(64))
+ blocks = get_blocks(num_layers)
+ if num_layers <= 100:
+ if mode == 'ir':
+ unit_module = BasicBlockIR
+ elif mode == 'ir_se':
+ unit_module = BasicBlockIRSE
+ output_channel = 512
+ else:
+ if mode == 'ir':
+ unit_module = BottleneckIR
+ elif mode == 'ir_se':
+ unit_module = BottleneckIRSE
+ output_channel = 2048
+
+ if input_size[0] == 112:
+ self.output_layer = Sequential(
+ BatchNorm2D(
+ output_channel,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr),
+ Dropout(0.4),
+ Flatten(),
+ Linear(
+ output_channel * 7 * 7,
+ 512,
+ weight_attr=nn.initializer.KaimingNormal()),
+ BatchNorm1D(
+ 512, weight_attr=False, bias_attr=False))
+ else:
+ self.output_layer = Sequential(
+ BatchNorm2D(
+ output_channel,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr),
+ Dropout(0.4),
+ Flatten(),
+ Linear(
+ output_channel * 14 * 14,
+ 512,
+ weight_attr=nn.initializer.KaimingNormal()),
+ BatchNorm1D(
+ 512, weight_attr=False, bias_attr=False))
+
+ modules = []
+ for block in blocks:
+ for bottleneck in block:
+ modules.append(
+ unit_module(bottleneck.in_channel, bottleneck.depth,
+ bottleneck.stride))
+ self.body = Sequential(*modules)
+
+ # initialize_weights(self.modules())
+
+ def forward(self, x):
+
+ # current code only supports one extra image
+ # it comes with a extra dimension for number of extra image. We will just squeeze it out for now
+ x = self.input_layer(x)
+
+ for idx, module in enumerate(self.body):
+ x = module(x)
+
+ x = self.output_layer(x)
+ # norm = paddle.norm(x, 2, 1, True)
+ # output = paddle.divide(x, norm)
+ # return output, norm
+ return x
+
+
+def AdaFace_IR_18(input_size=(112, 112)):
+ """ Constructs a ir-18 model.
+ """
+ model = Backbone(input_size, 18, 'ir')
+ return model
+
+
+def AdaFace_IR_34(input_size=(112, 112)):
+ """ Constructs a ir-34 model.
+ """
+ model = Backbone(input_size, 34, 'ir')
+
+ return model
+
+
+def AdaFace_IR_50(input_size=(112, 112)):
+ """ Constructs a ir-50 model.
+ """
+ model = Backbone(input_size, 50, 'ir')
+
+ return model
+
+
+def AdaFace_IR_101(input_size=(112, 112)):
+ """ Constructs a ir-101 model.
+ """
+ model = Backbone(input_size, 100, 'ir')
+
+ return model
+
+
+def AdaFace_IR_152(input_size=(112, 112)):
+ """ Constructs a ir-152 model.
+ """
+ model = Backbone(input_size, 152, 'ir')
+
+ return model
+
+
+def AdaFace_IR_200(input_size=(112, 112)):
+ """ Constructs a ir-200 model.
+ """
+ model = Backbone(input_size, 200, 'ir')
+
+ return model
+
+
+def AdaFace_IR_SE_50(input_size=(112, 112)):
+ """ Constructs a ir_se-50 model.
+ """
+ model = Backbone(input_size, 50, 'ir_se')
+
+ return model
+
+
+def AdaFace_IR_SE_101(input_size=(112, 112)):
+ """ Constructs a ir_se-101 model.
+ """
+ model = Backbone(input_size, 100, 'ir_se')
+
+ return model
+
+
+def AdaFace_IR_SE_152(input_size=(112, 112)):
+ """ Constructs a ir_se-152 model.
+ """
+ model = Backbone(input_size, 152, 'ir_se')
+
+ return model
+
+
+def AdaFace_IR_SE_200(input_size=(112, 112)):
+ """ Constructs a ir_se-200 model.
+ """
+ model = Backbone(input_size, 200, 'ir_se')
+
+ return model
diff --git a/ppcls/arch/backbone/model_zoo/repvgg.py b/ppcls/arch/backbone/model_zoo/repvgg.py
index 8ff662a7f88086abeee6b7f6e0260d2d3b3cd0c1..12f65549fad60adae6a412d8adb05f9846922c81 100644
--- a/ppcls/arch/backbone/model_zoo/repvgg.py
+++ b/ppcls/arch/backbone/model_zoo/repvgg.py
@@ -124,13 +124,7 @@ class RepVGGBlock(nn.Layer):
groups=groups)
def forward(self, inputs):
- if not self.training and not self.is_repped:
- self.rep()
- self.is_repped = True
- if self.training and self.is_repped:
- self.is_repped = False
-
- if not self.training:
+ if self.is_repped:
return self.nonlinearity(self.rbr_reparam(inputs))
if self.rbr_identity is None:
@@ -154,6 +148,7 @@ class RepVGGBlock(nn.Layer):
kernel, bias = self.get_equivalent_kernel_bias()
self.rbr_reparam.weight.set_value(kernel)
self.rbr_reparam.bias.set_value(bias)
+ self.is_repped = True
def get_equivalent_kernel_bias(self):
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
diff --git a/ppcls/arch/gears/__init__.py b/ppcls/arch/gears/__init__.py
index 8757aa4aeb4a510857ca4dc1c60696b1d6e86a0b..871967804e21c362935915942aa3f621207b934e 100644
--- a/ppcls/arch/gears/__init__.py
+++ b/ppcls/arch/gears/__init__.py
@@ -19,6 +19,7 @@ from .fc import FC
from .vehicle_neck import VehicleNeck
from paddle.nn import Tanh
from .bnneck import BNNeck
+from .adamargin import AdaMargin
__all__ = ['build_gear']
@@ -26,7 +27,7 @@ __all__ = ['build_gear']
def build_gear(config):
support_dict = [
'ArcMargin', 'CosMargin', 'CircleMargin', 'FC', 'VehicleNeck', 'Tanh',
- 'BNNeck'
+ 'BNNeck', 'AdaMargin'
]
module_name = config.pop('name')
assert module_name in support_dict, Exception(
diff --git a/ppcls/arch/gears/adamargin.py b/ppcls/arch/gears/adamargin.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b0f5f245dbbe2c282f726b7d5be3634d6df912c
--- /dev/null
+++ b/ppcls/arch/gears/adamargin.py
@@ -0,0 +1,111 @@
+# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This code is based on AdaFace(https://github.com/mk-minchul/AdaFace)
+# Paper: AdaFace: Quality Adaptive Margin for Face Recognition
+from paddle.nn import Layer
+import math
+import paddle
+
+
+def l2_norm(input, axis=1):
+ norm = paddle.norm(input, 2, axis, True)
+ output = paddle.divide(input, norm)
+ return output
+
+
+class AdaMargin(Layer):
+ def __init__(
+ self,
+ embedding_size=512,
+ class_num=70722,
+ m=0.4,
+ h=0.333,
+ s=64.,
+ t_alpha=1.0, ):
+ super(AdaMargin, self).__init__()
+ self.classnum = class_num
+ kernel_weight = paddle.uniform(
+ [embedding_size, class_num], min=-1, max=1)
+ kernel_weight_norm = paddle.norm(
+ kernel_weight, p=2, axis=0, keepdim=True)
+ kernel_weight_norm = paddle.where(kernel_weight_norm > 1e-5,
+ kernel_weight_norm,
+ paddle.ones_like(kernel_weight_norm))
+ kernel_weight = kernel_weight / kernel_weight_norm
+ self.kernel = self.create_parameter(
+ [embedding_size, class_num],
+ attr=paddle.nn.initializer.Assign(kernel_weight))
+
+ # initial kernel
+ # self.kernel.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
+ self.m = m
+ self.eps = 1e-3
+ self.h = h
+ self.s = s
+
+ # ema prep
+ self.t_alpha = t_alpha
+ self.register_buffer('t', paddle.zeros([1]), persistable=True)
+ self.register_buffer(
+ 'batch_mean', paddle.ones([1]) * 20, persistable=True)
+ self.register_buffer(
+ 'batch_std', paddle.ones([1]) * 100, persistable=True)
+
+ def forward(self, embbedings, label):
+
+ norms = paddle.norm(embbedings, 2, 1, True)
+ embbedings = paddle.divide(embbedings, norms)
+ kernel_norm = l2_norm(self.kernel, axis=0)
+ cosine = paddle.mm(embbedings, kernel_norm)
+ cosine = paddle.clip(cosine, -1 + self.eps,
+ 1 - self.eps) # for stability
+
+ safe_norms = paddle.clip(norms, min=0.001, max=100) # for stability
+ safe_norms = safe_norms.clone().detach()
+
+ # update batchmean batchstd
+ with paddle.no_grad():
+ mean = safe_norms.mean().detach()
+ std = safe_norms.std().detach()
+ self.batch_mean = mean * self.t_alpha + (1 - self.t_alpha
+ ) * self.batch_mean
+ self.batch_std = std * self.t_alpha + (1 - self.t_alpha
+ ) * self.batch_std
+
+ margin_scaler = (safe_norms - self.batch_mean) / (
+ self.batch_std + self.eps) # 66% between -1, 1
+ margin_scaler = margin_scaler * self.h # 68% between -0.333 ,0.333 when h:0.333
+ margin_scaler = paddle.clip(margin_scaler, -1, 1)
+
+ # g_angular
+ m_arc = paddle.nn.functional.one_hot(
+ label.reshape([-1]), self.classnum)
+ g_angular = self.m * margin_scaler * -1
+ m_arc = m_arc * g_angular
+ theta = paddle.acos(cosine)
+ theta_m = paddle.clip(
+ theta + m_arc, min=self.eps, max=math.pi - self.eps)
+ cosine = paddle.cos(theta_m)
+
+ # g_additive
+ m_cos = paddle.nn.functional.one_hot(
+ label.reshape([-1]), self.classnum)
+ g_add = self.m + (self.m * margin_scaler)
+ m_cos = m_cos * g_add
+ cosine = cosine - m_cos
+
+ # scale
+ scaled_cosine_m = cosine * self.s
+ return scaled_cosine_m
diff --git a/ppcls/arch/slim/quant.py b/ppcls/arch/slim/quant.py
index b8f59a78fdd9a8f1f3e613f5ee44d4fa68266e30..9fb9ff51e7ad2f03c94be824eef877d03d32229a 100644
--- a/ppcls/arch/slim/quant.py
+++ b/ppcls/arch/slim/quant.py
@@ -40,12 +40,14 @@ QUANT_CONFIG = {
}
-def quantize_model(config, model):
+def quantize_model(config, model, mode="train"):
if config.get("Slim", False) and config["Slim"].get("quant", False):
from paddleslim.dygraph.quant import QAT
assert config["Slim"]["quant"]["name"].lower(
) == 'pact', 'Only PACT quantization method is supported now'
QUANT_CONFIG["activation_preprocess_type"] = "PACT"
+ if mode in ["infer", "export"]:
+ QUANT_CONFIG['activation_preprocess_type'] = None
model.quanter = QAT(config=QUANT_CONFIG)
model.quanter.quantize(model)
logger.info("QAT model summary:")
diff --git a/ppcls/configs/Attr/StrongBaselineAttr.yaml b/ppcls/configs/Attr/StrongBaselineAttr.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2324015d667a09a56570677713792b16f1b2ed03
--- /dev/null
+++ b/ppcls/configs/Attr/StrongBaselineAttr.yaml
@@ -0,0 +1,113 @@
+# global configs
+Global:
+ checkpoints: null
+ pretrained_model: null
+ output_dir: "./output/"
+ device: "gpu"
+ save_interval: 5
+ eval_during_train: True
+ eval_interval: 1
+ epochs: 30
+ print_batch_step: 20
+ use_visualdl: False
+ # used for static mode and model export
+ image_shape: [3, 256, 192]
+ save_inference_dir: "./inference"
+ use_multilabel: True
+
+# model architecture
+Arch:
+ name: "ResNet50"
+ pretrained: True
+ class_num: 26
+ infer_add_softmax: False
+
+# loss function config for traing/eval process
+Loss:
+ Train:
+ - MultiLabelLoss:
+ weight: 1.0
+ weight_ratio: True
+ size_sum: True
+ Eval:
+ - MultiLabelLoss:
+ weight: 1.0
+ weight_ratio: True
+ size_sum: True
+
+Optimizer:
+ name: Adam
+ lr:
+ name: Piecewise
+ decay_epochs: [12, 18, 24, 28]
+ values: [0.0001, 0.00001, 0.000001, 0.0000001]
+ regularizer:
+ name: 'L2'
+ coeff: 0.0005
+ clip_norm: 10
+
+# data loader for train and eval
+DataLoader:
+ Train:
+ dataset:
+ name: MultiLabelDataset
+ image_root: "dataset/attribute/data/"
+ cls_label_path: "dataset/attribute/trainval.txt"
+ label_ratio: True
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ size: [192, 256]
+ - Padv2:
+ size: [212, 276]
+ pad_mode: 1
+ fill_value: 0
+ - RandomCropImage:
+ size: [192, 256]
+ - RandFlipImage:
+ flip_code: 1
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: True
+ shuffle: True
+ loader:
+ num_workers: 4
+ use_shared_memory: True
+ Eval:
+ dataset:
+ name: MultiLabelDataset
+ image_root: "dataset/attribute/data/"
+ cls_label_path: "dataset/attribute/test.txt"
+ label_ratio: True
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ size: [192, 256]
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: False
+ shuffle: False
+ loader:
+ num_workers: 4
+ use_shared_memory: True
+
+
+Metric:
+ Eval:
+ - ATTRMetric:
diff --git a/ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eabccd4b712ab48886c74caf6b784b4c193f6913
--- /dev/null
+++ b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml
@@ -0,0 +1,164 @@
+# global configs
+Global:
+ checkpoints: null
+ pretrained_model: null
+ output_dir: ./output/
+ device: gpu
+ save_interval: 1
+ eval_during_train: True
+ eval_interval: 1
+ epochs: 600
+ print_batch_step: 10
+ use_visualdl: False
+ # used for static mode and model export
+ image_shape: [3, 224, 224]
+ save_inference_dir: ./inference
+ # training model under @to_static
+ to_static: False
+ use_dali: False
+
+# mixed precision training
+AMP:
+ scale_loss: 128.0
+ use_dynamic_loss_scaling: True
+ # O1: mixed fp16
+ level: O1
+
+# model architecture
+Arch:
+ name: PPHGNet_small
+ class_num: 1000
+
+# loss function config for traing/eval process
+Loss:
+ Train:
+ - CELoss:
+ weight: 1.0
+ epsilon: 0.1
+ Eval:
+ - CELoss:
+ weight: 1.0
+
+
+Optimizer:
+ name: Momentum
+ momentum: 0.9
+ lr:
+ name: Cosine
+ learning_rate: 0.5
+ warmup_epoch: 5
+ regularizer:
+ name: 'L2'
+ coeff: 0.00004
+
+
+# data loader for train and eval
+DataLoader:
+ Train:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/ILSVRC2012/
+ cls_label_path: ./dataset/ILSVRC2012/train_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - RandCropImage:
+ size: 224
+ interpolation: bicubic
+ backend: pil
+ - RandFlipImage:
+ flip_code: 1
+ - TimmAutoAugment:
+ config_str: rand-m7-mstd0.5-inc1
+ interpolation: bicubic
+ img_size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - RandomErasing:
+ EPSILON: 0.25
+ sl: 0.02
+ sh: 1.0/3.0
+ r1: 0.3
+ attempt: 10
+ use_log_aspect: True
+ mode: pixel
+ batch_transform_ops:
+ - OpSampler:
+ MixupOperator:
+ alpha: 0.2
+ prob: 0.5
+ CutmixOperator:
+ alpha: 1.0
+ prob: 0.5
+
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 128
+ drop_last: False
+ shuffle: True
+ loader:
+ num_workers: 16
+ use_shared_memory: True
+
+ Eval:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/ILSVRC2012/
+ cls_label_path: ./dataset/ILSVRC2012/val_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 236
+ interpolation: bicubic
+ backend: pil
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 128
+ drop_last: False
+ shuffle: False
+ loader:
+ num_workers: 16
+ use_shared_memory: True
+
+Infer:
+ infer_imgs: docs/images/inference_deployment/whl_demo.jpg
+ batch_size: 10
+ transforms:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 236
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - ToCHWImage:
+ PostProcess:
+ name: Topk
+ topk: 5
+ class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
+
+Metric:
+ Train:
+ - TopkAcc:
+ topk: [1, 5]
+ Eval:
+ - TopkAcc:
+ topk: [1, 5]
diff --git a/ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e423c866b131aefda13b0186eca7ac27d3c84733
--- /dev/null
+++ b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml
@@ -0,0 +1,164 @@
+# global configs
+Global:
+ checkpoints: null
+ pretrained_model: null
+ output_dir: ./output/
+ device: gpu
+ save_interval: 1
+ eval_during_train: True
+ eval_interval: 1
+ epochs: 600
+ print_batch_step: 10
+ use_visualdl: False
+ # used for static mode and model export
+ image_shape: [3, 224, 224]
+ save_inference_dir: ./inference
+ # training model under @to_static
+ to_static: False
+ use_dali: False
+
+# mixed precision training
+AMP:
+ scale_loss: 128.0
+ use_dynamic_loss_scaling: True
+ # O1: mixed fp16
+ level: O1
+
+# model architecture
+Arch:
+ name: PPHGNet_tiny
+ class_num: 1000
+
+# loss function config for traing/eval process
+Loss:
+ Train:
+ - CELoss:
+ weight: 1.0
+ epsilon: 0.1
+ Eval:
+ - CELoss:
+ weight: 1.0
+
+
+Optimizer:
+ name: Momentum
+ momentum: 0.9
+ lr:
+ name: Cosine
+ learning_rate: 0.5
+ warmup_epoch: 5
+ regularizer:
+ name: 'L2'
+ coeff: 0.00004
+
+
+# data loader for train and eval
+DataLoader:
+ Train:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/ILSVRC2012/
+ cls_label_path: ./dataset/ILSVRC2012/train_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - RandCropImage:
+ size: 224
+ interpolation: bicubic
+ backend: pil
+ - RandFlipImage:
+ flip_code: 1
+ - TimmAutoAugment:
+ config_str: rand-m7-mstd0.5-inc1
+ interpolation: bicubic
+ img_size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - RandomErasing:
+ EPSILON: 0.25
+ sl: 0.02
+ sh: 1.0/3.0
+ r1: 0.3
+ attempt: 10
+ use_log_aspect: True
+ mode: pixel
+ batch_transform_ops:
+ - OpSampler:
+ MixupOperator:
+ alpha: 0.2
+ prob: 0.5
+ CutmixOperator:
+ alpha: 1.0
+ prob: 0.5
+
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 128
+ drop_last: False
+ shuffle: True
+ loader:
+ num_workers: 16
+ use_shared_memory: True
+
+ Eval:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/ILSVRC2012/
+ cls_label_path: ./dataset/ILSVRC2012/val_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 232
+ interpolation: bicubic
+ backend: pil
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 128
+ drop_last: False
+ shuffle: False
+ loader:
+ num_workers: 16
+ use_shared_memory: True
+
+Infer:
+ infer_imgs: docs/images/inference_deployment/whl_demo.jpg
+ batch_size: 10
+ transforms:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 232
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - ToCHWImage:
+ PostProcess:
+ name: Topk
+ topk: 5
+ class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
+
+Metric:
+ Train:
+ - TopkAcc:
+ topk: [1, 5]
+ Eval:
+ - TopkAcc:
+ topk: [1, 5]
diff --git a/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml b/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml
index 6a4425b4048ce5c2881ca5bc55e4902b5f50396b..01ba0169af8eaa58a3bf53b60be6249cb04bb737 100644
--- a/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml
+++ b/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml
@@ -105,7 +105,6 @@ DataLoader:
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- output_fp16: True
channel_num: *image_channel
sampler:
name: DistributedBatchSampler
@@ -132,7 +131,6 @@ Infer:
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- output_fp16: True
channel_num: *image_channel
- ToCHWImage:
PostProcess:
diff --git a/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml b/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml
index af987ed7f59ff9c9576d4fb417c48e112afa3986..72857c2cea5500cf3e728cc2edddf69343cc4814 100644
--- a/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml
+++ b/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml
@@ -15,6 +15,13 @@ Global:
image_shape: [*image_channel, 224, 224]
save_inference_dir: ./inference
+# mixed precision training
+AMP:
+ scale_loss: 128.0
+ use_dynamic_loss_scaling: True
+ # O2: pure fp16
+ level: O2
+
# model architecture
Arch:
name: SE_ResNeXt101_32x4d
@@ -32,13 +39,6 @@ Loss:
- CELoss:
weight: 1.0
-# mixed precision training
-AMP:
- scale_loss: 128.0
- use_dynamic_loss_scaling: True
- # O2: pure fp16
- level: O2
-
Optimizer:
name: Momentum
momentum: 0.9
@@ -99,10 +99,9 @@ DataLoader:
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- output_fp16: True
channel_num: *image_channel
sampler:
- name: BatchSampler
+ name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
@@ -126,7 +125,6 @@ Infer:
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- output_fp16: True
channel_num: *image_channel
- ToCHWImage:
PostProcess:
diff --git a/ppcls/configs/PULC/person/Distillation/PPLCNet_x1_0_distillation.yaml b/ppcls/configs/PULC/person/Distillation/PPLCNet_x1_0_distillation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..afb9b43a0dfad4153bdc761a13c61a4d0e5fd47d
--- /dev/null
+++ b/ppcls/configs/PULC/person/Distillation/PPLCNet_x1_0_distillation.yaml
@@ -0,0 +1,168 @@
+# global configs
+Global:
+ checkpoints: null
+ pretrained_model: null
+ output_dir: ./output
+ device: gpu
+ save_interval: 1
+ eval_during_train: True
+ start_eval_epoch: 1
+ eval_interval: 1
+ epochs: 20
+ print_batch_step: 10
+ use_visualdl: False
+ # used for static mode and model export
+ image_shape: [3, 224, 224]
+ save_inference_dir: ./inference
+ # training model under @to_static
+ to_static: False
+ use_dali: False
+
+# model architecture
+Arch:
+ name: "DistillationModel"
+ class_num: &class_num 2
+ # if not null, its lengths should be same as models
+ pretrained_list:
+ # if not null, its lengths should be same as models
+ freeze_params_list:
+ - True
+ - False
+ use_sync_bn: True
+ models:
+ - Teacher:
+ name: ResNet101_vd
+ class_num: *class_num
+ - Student:
+ name: PPLCNet_x1_0
+ class_num: *class_num
+ pretrained: True
+ use_ssld: True
+
+ infer_model_name: "Student"
+
+# loss function config for traing/eval process
+Loss:
+ Train:
+ - DistillationDMLLoss:
+ weight: 1.0
+ model_name_pairs:
+ - ["Student", "Teacher"]
+ Eval:
+ - CELoss:
+ weight: 1.0
+
+
+Optimizer:
+ name: Momentum
+ momentum: 0.9
+ lr:
+ name: Cosine
+ learning_rate: 0.01
+ warmup_epoch: 5
+ regularizer:
+ name: 'L2'
+ coeff: 0.00004
+
+
+# data loader for train and eval
+DataLoader:
+ Train:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/train_list_for_distill.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - RandCropImage:
+ size: 192
+ - RandFlipImage:
+ flip_code: 1
+ - TimmAutoAugment:
+ prob: 0.0
+ config_str: rand-m9-mstd0.5-inc1
+ interpolation: bicubic
+ img_size: 192
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - RandomErasing:
+ EPSILON: 0.1
+ sl: 0.02
+ sh: 1.0/3.0
+ r1: 0.3
+ attempt: 10
+ use_log_aspect: True
+ mode: pixel
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: False
+ shuffle: True
+ loader:
+ num_workers: 16
+ use_shared_memory: True
+
+ Eval:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/val_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: False
+ shuffle: False
+ loader:
+ num_workers: 4
+ use_shared_memory: True
+
+Infer:
+ infer_imgs: docs/images/inference_deployment/whl_demo.jpg
+ batch_size: 10
+ transforms:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - ToCHWImage:
+ PostProcess:
+ name: ThreshOutput
+ threshold: 0.9
+ label_0: nobody
+ label_1: someone
+
+Metric:
+ Train:
+ - DistillationTopkAcc:
+ model_key: "Student"
+ topk: [1, 2]
+ Eval:
+ - TprAtFpr:
+ - TopkAcc:
+ topk: [1, 2]
diff --git a/ppcls/configs/PULC/person/OtherModels/MobileNetV3_large_x1_0.yaml b/ppcls/configs/PULC/person/OtherModels/MobileNetV3_large_x1_0.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d69bb933fdbf5592d497651cad79995a492cdf28
--- /dev/null
+++ b/ppcls/configs/PULC/person/OtherModels/MobileNetV3_large_x1_0.yaml
@@ -0,0 +1,145 @@
+# global configs
+Global:
+ checkpoints: null
+ pretrained_model: null
+ output_dir: ./output/
+ device: gpu
+ save_interval: 1
+ eval_during_train: True
+ eval_interval: 1
+ start_eval_epoch: 10
+ epochs: 20
+ print_batch_step: 10
+ use_visualdl: False
+ # used for static mode and model export
+ image_shape: [3, 224, 224]
+ save_inference_dir: ./inference
+ # training model under @to_static
+ to_static: False
+ use_dali: False
+
+# mixed precision training
+AMP:
+ scale_loss: 128.0
+ use_dynamic_loss_scaling: True
+ # O1: mixed fp16
+ level: O1
+
+# model architecture
+Arch:
+ name: MobileNetV3_large_x1_0
+ class_num: 2
+ pretrained: True
+ use_sync_bn: True
+
+# loss function config for traing/eval process
+Loss:
+ Train:
+ - CELoss:
+ weight: 1.0
+ epsilon: 0.1
+ Eval:
+ - CELoss:
+ weight: 1.0
+
+
+Optimizer:
+ name: Momentum
+ momentum: 0.9
+ lr:
+ name: Cosine
+ learning_rate: 0.13
+ warmup_epoch: 5
+ regularizer:
+ name: 'L2'
+ coeff: 0.00002
+
+
+# data loader for train and eval
+DataLoader:
+ Train:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/train_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - RandCropImage:
+ size: 224
+ - RandFlipImage:
+ flip_code: 1
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 512
+ drop_last: False
+ shuffle: True
+ loader:
+ num_workers: 8
+ use_shared_memory: True
+
+ Eval:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/val_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: False
+ shuffle: False
+ loader:
+ num_workers: 4
+ use_shared_memory: True
+
+Infer:
+ infer_imgs: docs/images/inference_deployment/whl_demo.jpg
+ batch_size: 10
+ transforms:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - ToCHWImage:
+ PostProcess:
+ name: ThreshOutput
+ threshold: 0.9
+ label_0: nobody
+ label_1: someone
+
+Metric:
+ Train:
+ - TopkAcc:
+ topk: [1, 2]
+ Eval:
+ - TprAtFpr:
+ - TopkAcc:
+ topk: [1, 2]
diff --git a/ppcls/configs/PULC/person/OtherModels/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/PULC/person/OtherModels/SwinTransformer_tiny_patch4_window7_224.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e2248e98529b511c7821b49ced6cf0625016553
--- /dev/null
+++ b/ppcls/configs/PULC/person/OtherModels/SwinTransformer_tiny_patch4_window7_224.yaml
@@ -0,0 +1,168 @@
+# global configs
+Global:
+ checkpoints: null
+ pretrained_model: null
+ output_dir: ./output/
+ device: gpu
+ save_interval: 1
+ eval_during_train: True
+ eval_interval: 1
+ start_eval_epoch: 10
+ epochs: 20
+ print_batch_step: 10
+ use_visualdl: False
+ # used for static mode and model export
+ image_shape: [3, 224, 224]
+ save_inference_dir: ./inference
+ # training model under @to_static
+ to_static: False
+ use_dali: False
+
+# mixed precision training
+AMP:
+ scale_loss: 128.0
+ use_dynamic_loss_scaling: True
+ # O1: mixed fp16
+ level: O1
+
+# model architecture
+Arch:
+ name: SwinTransformer_tiny_patch4_window7_224
+ class_num: 2
+ pretrained: True
+
+# loss function config for traing/eval process
+Loss:
+ Train:
+ - CELoss:
+ weight: 1.0
+ epsilon: 0.1
+ Eval:
+ - CELoss:
+ weight: 1.0
+
+Optimizer:
+ name: AdamW
+ beta1: 0.9
+ beta2: 0.999
+ epsilon: 1e-8
+ weight_decay: 0.05
+ no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm
+ one_dim_param_no_weight_decay: True
+ lr:
+ name: Cosine
+ learning_rate: 1e-4
+ eta_min: 2e-6
+ warmup_epoch: 5
+ warmup_start_lr: 2e-7
+
+
+# data loader for train and eval
+DataLoader:
+ Train:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/train_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - RandCropImage:
+ size: 224
+ interpolation: bicubic
+ backend: pil
+ - RandFlipImage:
+ flip_code: 1
+ - TimmAutoAugment:
+ config_str: rand-m9-mstd0.5-inc1
+ interpolation: bicubic
+ img_size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - RandomErasing:
+ EPSILON: 0.25
+ sl: 0.02
+ sh: 1.0/3.0
+ r1: 0.3
+ attempt: 10
+ use_log_aspect: True
+ mode: pixel
+ batch_transform_ops:
+ - OpSampler:
+ MixupOperator:
+ alpha: 0.8
+ prob: 0.5
+ CutmixOperator:
+ alpha: 1.0
+ prob: 0.5
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 128
+ drop_last: False
+ shuffle: True
+ loader:
+ num_workers: 8
+ use_shared_memory: True
+
+ Eval:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/val_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: False
+ shuffle: False
+ loader:
+ num_workers: 8
+ use_shared_memory: True
+
+Infer:
+ infer_imgs: docs/images/inference_deployment/whl_demo.jpg
+ batch_size: 10
+ transforms:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - ToCHWImage:
+ PostProcess:
+ name: ThreshOutput
+ threshold: 0.9
+ label_0: nobody
+ label_1: someone
+
+Metric:
+ Train:
+ - TopkAcc:
+ topk: [1, 2]
+ Eval:
+ - TprAtFpr:
+ - TopkAcc:
+ topk: [1, 2]
diff --git a/ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e196547923a345a9535f5b63a568817b2784c6d7
--- /dev/null
+++ b/ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0.yaml
@@ -0,0 +1,151 @@
+# global configs
+Global:
+ checkpoints: null
+ pretrained_model: null
+ output_dir: ./output/
+ device: gpu
+ save_interval: 1
+ eval_during_train: True
+ eval_interval: 1
+ start_eval_epoch: 10
+ epochs: 20
+ print_batch_step: 10
+ use_visualdl: False
+ # used for static mode and model export
+ image_shape: [3, 224, 224]
+ save_inference_dir: ./inference
+ # training model under @to_static
+ to_static: False
+ use_dali: False
+
+
+# model architecture
+Arch:
+ name: PPLCNet_x1_0
+ class_num: 2
+ pretrained: True
+ use_ssld: True
+ use_sync_bn: True
+
+# loss function config for traing/eval process
+Loss:
+ Train:
+ - CELoss:
+ weight: 1.0
+ Eval:
+ - CELoss:
+ weight: 1.0
+
+
+Optimizer:
+ name: Momentum
+ momentum: 0.9
+ lr:
+ name: Cosine
+ learning_rate: 0.01
+ warmup_epoch: 5
+ regularizer:
+ name: 'L2'
+ coeff: 0.00004
+
+
+# data loader for train and eval
+DataLoader:
+ Train:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/train_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - RandCropImage:
+ size: 192
+ - RandFlipImage:
+ flip_code: 1
+ - TimmAutoAugment:
+ prob: 0.0
+ config_str: rand-m9-mstd0.5-inc1
+ interpolation: bicubic
+ img_size: 192
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - RandomErasing:
+ EPSILON: 0.1
+ sl: 0.02
+ sh: 1.0/3.0
+ r1: 0.3
+ attempt: 10
+ use_log_aspect: True
+ mode: pixel
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: False
+ shuffle: True
+ loader:
+ num_workers: 8
+ use_shared_memory: True
+
+ Eval:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/val_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: False
+ shuffle: False
+ loader:
+ num_workers: 4
+ use_shared_memory: True
+
+Infer:
+ infer_imgs: docs/images/inference_deployment/whl_demo.jpg
+ batch_size: 10
+ transforms:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - ToCHWImage:
+ PostProcess:
+ name: ThreshOutput
+ threshold: 0.9
+ label_0: nobody
+ label_1: someone
+
+Metric:
+ Train:
+ - TopkAcc:
+ topk: [1, 2]
+ Eval:
+ - TprAtFpr:
+ - TopkAcc:
+ topk: [1, 2]
diff --git a/ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0_search.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b2126b69f9d773d918df6b1f03361cac06ee44f8
--- /dev/null
+++ b/ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0_search.yaml
@@ -0,0 +1,151 @@
+# global configs
+Global:
+ checkpoints: null
+ pretrained_model: null
+ output_dir: ./output/
+ device: gpu
+ save_interval: 1
+ eval_during_train: True
+ eval_interval: 1
+ start_eval_epoch: 10
+ epochs: 20
+ print_batch_step: 10
+ use_visualdl: False
+ # used for static mode and model export
+ image_shape: [3, 224, 224]
+ save_inference_dir: ./inference
+ # training model under @to_static
+ to_static: False
+ use_dali: False
+
+
+# model architecture
+Arch:
+ name: PPLCNet_x1_0
+ class_num: 2
+ pretrained: True
+ use_ssld: True
+ use_sync_bn: True
+
+# loss function config for traing/eval process
+Loss:
+ Train:
+ - CELoss:
+ weight: 1.0
+ Eval:
+ - CELoss:
+ weight: 1.0
+
+
+Optimizer:
+ name: Momentum
+ momentum: 0.9
+ lr:
+ name: Cosine
+ learning_rate: 0.01
+ warmup_epoch: 5
+ regularizer:
+ name: 'L2'
+ coeff: 0.00004
+
+
+# data loader for train and eval
+DataLoader:
+ Train:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/train_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - RandCropImage:
+ size: 224
+ - RandFlipImage:
+ flip_code: 1
+ - TimmAutoAugment:
+ prob: 0.0
+ config_str: rand-m9-mstd0.5-inc1
+ interpolation: bicubic
+ img_size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - RandomErasing:
+ EPSILON: 0.0
+ sl: 0.02
+ sh: 1.0/3.0
+ r1: 0.3
+ attempt: 10
+ use_log_aspect: True
+ mode: pixel
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: False
+ shuffle: True
+ loader:
+ num_workers: 8
+ use_shared_memory: True
+
+ Eval:
+ dataset:
+ name: ImageNetDataset
+ image_root: ./dataset/person/
+ cls_label_path: ./dataset/person/val_list.txt
+ transform_ops:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 64
+ drop_last: False
+ shuffle: False
+ loader:
+ num_workers: 4
+ use_shared_memory: True
+
+Infer:
+ infer_imgs: docs/images/inference_deployment/whl_demo.jpg
+ batch_size: 10
+ transforms:
+ - DecodeImage:
+ to_rgb: True
+ channel_first: False
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 1.0/255.0
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - ToCHWImage:
+ PostProcess:
+ name: ThreshOutput
+ threshold: 0.9
+ label_0: nobody
+ label_1: someone
+
+Metric:
+ Train:
+ - TopkAcc:
+ topk: [1, 2]
+ Eval:
+ - TprAtFpr:
+ - TopkAcc:
+ topk: [1, 2]
diff --git a/ppcls/configs/StrategySearch/person.yaml b/ppcls/configs/StrategySearch/person.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..906635595f33417cf564ca54a430c3c648fd738d
--- /dev/null
+++ b/ppcls/configs/StrategySearch/person.yaml
@@ -0,0 +1,40 @@
+base_config_file: ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0_search.yaml
+distill_config_file: ppcls/configs/PULC/person/Distillation/PPLCNet_x1_0_distillation.yaml
+
+gpus: 0,1,2,3
+output_dir: output/search_person
+search_times: 1
+search_dict:
+ - search_key: lrs
+ replace_config:
+ - Optimizer.lr.learning_rate
+ search_values: [0.0075, 0.01, 0.0125]
+ - search_key: resolutions
+ replace_config:
+ - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size
+ - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size
+ search_values: [176, 192, 224]
+ - search_key: ra_probs
+ replace_config:
+ - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob
+ search_values: [0.0, 0.1, 0.5]
+ - search_key: re_probs
+ replace_config:
+ - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON
+ search_values: [0.0, 0.1, 0.5]
+ - search_key: lr_mult_list
+ replace_config:
+ - Arch.lr_mult_list
+ search_values:
+ - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
+ - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0]
+ - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
+teacher:
+ rm_keys:
+ - Arch.lr_mult_list
+ search_values:
+ - ResNet101_vd
+ - ResNet50_vd
+final_replace:
+ Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list
+
diff --git a/ppcls/configs/metric_learning/adaface_ir18.yaml b/ppcls/configs/metric_learning/adaface_ir18.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2cbfe5da43763701b244b2422bf9ad82b19ef4d6
--- /dev/null
+++ b/ppcls/configs/metric_learning/adaface_ir18.yaml
@@ -0,0 +1,105 @@
+# global configs
+Global:
+ checkpoints: null
+ pretrained_model: null
+ output_dir: "./output/"
+ device: "gpu"
+ save_interval: 1
+ eval_during_train: True
+ eval_interval: 1
+ epochs: 26
+ print_batch_step: 10
+ use_visualdl: False
+ # used for static mode and model export
+ image_shape: [3, 112, 112]
+ save_inference_dir: "./inference"
+ eval_mode: "adaface"
+
+# model architecture
+Arch:
+ name: "RecModel"
+ infer_output_key: "features"
+ infer_add_softmax: False
+ Backbone:
+ name: "AdaFace_IR_18"
+ input_size: [112, 112]
+ Head:
+ name: "AdaMargin"
+ embedding_size: 512
+ class_num: 70722
+ m: 0.4
+ s: 64
+ h: 0.333
+ t_alpha: 0.01
+
+# loss function config for traing/eval process
+Loss:
+ Train:
+ - CELoss:
+ weight: 1.0
+
+Optimizer:
+ name: Momentum
+ momentum: 0.9
+ lr:
+ name: Piecewise
+ learning_rate: 0.1
+ decay_epochs: [12, 20, 24]
+ values: [0.1, 0.01, 0.001, 0.0001]
+ regularizer:
+ name: 'L2'
+ coeff: 0.0005
+
+# data loader for train and eval
+DataLoader:
+ Train:
+ dataset:
+ name: "AdaFaceDataset"
+ root_dir: "dataset/face/"
+ label_path: "dataset/face/train_filter_label.txt"
+ transform:
+ - CropWithPadding:
+ prob: 0.2
+ padding_num: 0
+ size: [112, 112]
+ scale: [0.2, 1.0]
+ ratio: [0.75, 1.3333333333333333]
+ - RandomInterpolationAugment:
+ prob: 0.2
+ - ColorJitter:
+ prob: 0.2
+ brightness: 0.5
+ contrast: 0.5
+ saturation: 0.5
+ hue: 0
+ - RandomHorizontalFlip:
+ - ToTensor:
+ - Normalize:
+ mean: [0.5, 0.5, 0.5]
+ std: [0.5, 0.5, 0.5]
+ sampler:
+ name: DistributedBatchSampler
+ batch_size: 256
+ drop_last: False
+ shuffle: True
+ loader:
+ num_workers: 6
+ use_shared_memory: True
+
+ Eval:
+ dataset:
+ name: FiveValidationDataset
+ val_data_path: dataset/face/faces_emore
+ concat_mem_file_name: dataset/face/faces_emore/concat_validation_memfile
+ sampler:
+ name: BatchSampler
+ batch_size: 256
+ drop_last: False
+ shuffle: True
+ loader:
+ num_workers: 6
+ use_shared_memory: True
+Metric:
+ Train:
+ - TopkAcc:
+ topk: [1, 5]
\ No newline at end of file
diff --git a/ppcls/configs/reid/strong_baseline/baseline.yaml b/ppcls/configs/reid/strong_baseline/baseline.yaml
index d49deeeb4d27f304ac8de76b5feb6d314ed18a48..be9d9b5c8a04e4cb95e054ebccc3e029aa826cf1 100644
--- a/ppcls/configs/reid/strong_baseline/baseline.yaml
+++ b/ppcls/configs/reid/strong_baseline/baseline.yaml
@@ -12,6 +12,7 @@ Global:
use_visualdl: False
eval_mode: "retrieval"
retrieval_feature_from: "backbone" # 'backbone' or 'neck'
+ re_ranking: False
# used for static mode and model export
image_shape: [3, 256, 128]
save_inference_dir: "./inference"
@@ -23,7 +24,7 @@ Arch:
infer_add_softmax: False
Backbone:
name: "ResNet50"
- pretrained: True
+ pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams
stem_act: null
BackboneStopLayer:
name: "flatten"
diff --git a/ppcls/configs/reid/strong_baseline/softmax_triplet.yaml b/ppcls/configs/reid/strong_baseline/softmax_triplet.yaml
index c4d52cd1ef24745cd1d5fac7f7dec26fd98609e0..9694373b045c04eadc0dda7a6b69726966102182 100644
--- a/ppcls/configs/reid/strong_baseline/softmax_triplet.yaml
+++ b/ppcls/configs/reid/strong_baseline/softmax_triplet.yaml
@@ -12,6 +12,7 @@ Global:
use_visualdl: False
eval_mode: "retrieval"
retrieval_feature_from: "features" # 'backbone' or 'features'
+ re_ranking: False
# used for static mode and model export
image_shape: [3, 256, 128]
save_inference_dir: "./inference"
@@ -23,7 +24,7 @@ Arch:
infer_add_softmax: False
Backbone:
name: "ResNet50_last_stage_stride1"
- pretrained: True
+ pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams
stem_act: null
BackboneStopLayer:
name: "flatten"
diff --git a/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml b/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml
index 2d14c3f9ce83f231c5a664f8d24177b25543b421..b225ebd86ae28e6769f6ec631e527ee46e781f9e 100644
--- a/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml
+++ b/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml
@@ -12,6 +12,7 @@ Global:
use_visualdl: False
eval_mode: "retrieval"
retrieval_feature_from: "features" # 'backbone' or 'features'
+ re_ranking: False
# used for static mode and model export
image_shape: [3, 256, 128]
save_inference_dir: "./inference"
@@ -23,7 +24,7 @@ Arch:
infer_add_softmax: False
Backbone:
name: "ResNet50_last_stage_stride1"
- pretrained: True
+ pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams
stem_act: null
BackboneStopLayer:
name: "flatten"
diff --git a/ppcls/data/__init__.py b/ppcls/data/__init__.py
index 9fc4d760be545ffa93652c80d285e17ad0c8ae57..80cf3bc9af826e935fe0fe6ccf8cad8d6924d370 100644
--- a/ppcls/data/__init__.py
+++ b/ppcls/data/__init__.py
@@ -30,6 +30,7 @@ from ppcls.data.dataloader.icartoon_dataset import ICartoonDataset
from ppcls.data.dataloader.mix_dataset import MixDataset
from ppcls.data.dataloader.multi_scale_dataset import MultiScaleDataset
from ppcls.data.dataloader.person_dataset import Market1501, MSMT17
+from ppcls.data.dataloader.face_dataset import FiveValidationDataset, AdaFaceDataset
# sampler
@@ -88,7 +89,7 @@ def build_dataloader(config, mode, device, use_dali=False, seed=None):
# build sampler
config_sampler = config[mode]['sampler']
- if "name" not in config_sampler:
+ if config_sampler and "name" not in config_sampler:
batch_sampler = None
batch_size = config_sampler["batch_size"]
drop_last = config_sampler["drop_last"]
diff --git a/ppcls/data/dataloader/__init__.py b/ppcls/data/dataloader/__init__.py
index 2b1d92b76bd202e36086f21a3a092c3673277690..796f4b458410e5b4b8540b72dd663711c4ad9f46 100644
--- a/ppcls/data/dataloader/__init__.py
+++ b/ppcls/data/dataloader/__init__.py
@@ -10,3 +10,4 @@ from ppcls.data.dataloader.mix_sampler import MixSampler
from ppcls.data.dataloader.multi_scale_sampler import MultiScaleSampler
from ppcls.data.dataloader.pk_sampler import PKSampler
from ppcls.data.dataloader.person_dataset import Market1501, MSMT17
+from ppcls.data.dataloader.face_dataset import AdaFaceDataset, FiveValidationDataset
diff --git a/ppcls/data/dataloader/common_dataset.py b/ppcls/data/dataloader/common_dataset.py
index b7b03d8b9e06aa7aa190fb325c2221db3b666c5c..88bab0f1d059a53b5dc062a25e7286637086abb7 100644
--- a/ppcls/data/dataloader/common_dataset.py
+++ b/ppcls/data/dataloader/common_dataset.py
@@ -44,11 +44,11 @@ def create_operators(params):
class CommonDataset(Dataset):
- def __init__(
- self,
- image_root,
- cls_label_path,
- transform_ops=None, ):
+ def __init__(self,
+ image_root,
+ cls_label_path,
+ transform_ops=None,
+ label_ratio=False):
self._img_root = image_root
self._cls_path = cls_label_path
if transform_ops:
@@ -56,7 +56,10 @@ class CommonDataset(Dataset):
self.images = []
self.labels = []
- self._load_anno()
+ if label_ratio:
+ self.label_ratio = self._load_anno(label_ratio=label_ratio)
+ else:
+ self._load_anno()
def _load_anno(self):
pass
diff --git a/ppcls/data/dataloader/face_dataset.py b/ppcls/data/dataloader/face_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..a32cc2c5f89aa8c8e4904e7decc6ec5fb996aab3
--- /dev/null
+++ b/ppcls/data/dataloader/face_dataset.py
@@ -0,0 +1,163 @@
+import os
+import json
+import numpy as np
+from PIL import Image
+import cv2
+import paddle
+import paddle.vision.datasets as datasets
+from paddle.vision import transforms
+from paddle.vision.transforms import functional as F
+from paddle.io import Dataset
+from .common_dataset import create_operators
+from ppcls.data.preprocess import transform as transform_func
+
+# code is based on AdaFace: https://github.com/mk-minchul/AdaFace
+
+
+class AdaFaceDataset(Dataset):
+ def __init__(self, root_dir, label_path, transform=None):
+ self.root_dir = root_dir
+ self.transform = create_operators(transform)
+
+ with open(label_path) as fd:
+ lines = fd.readlines()
+ self.samples = []
+ for l in lines:
+ l = l.strip().split()
+ self.samples.append([os.path.join(root_dir, l[0]), int(l[1])])
+
+ def __len__(self):
+ return len(self.samples)
+
+ def __getitem__(self, index):
+ """
+ Args:
+ index (int): Index
+
+ Returns:
+ tuple: (sample, target) where target is class_index of the target class.
+ """
+ [path, target] = self.samples[index]
+ with open(path, 'rb') as f:
+ img = Image.open(f)
+ sample = img.convert('RGB')
+
+ # if 'WebFace' in self.root:
+ # # swap rgb to bgr since image is in rgb for webface
+ # sample = Image.fromarray(np.asarray(sample)[:, :, ::-1]
+ if self.transform is not None:
+ sample = transform_func(sample, self.transform)
+ return sample, target
+
+
+class FiveValidationDataset(Dataset):
+ def __init__(self, val_data_path, concat_mem_file_name):
+ '''
+ concatenates all validation datasets from emore
+ val_data_dict = {
+ 'agedb_30': (agedb_30, agedb_30_issame),
+ "cfp_fp": (cfp_fp, cfp_fp_issame),
+ "lfw": (lfw, lfw_issame),
+ "cplfw": (cplfw, cplfw_issame),
+ "calfw": (calfw, calfw_issame),
+ }
+ agedb_30: 0
+ cfp_fp: 1
+ lfw: 2
+ cplfw: 3
+ calfw: 4
+ '''
+ val_data = get_val_data(val_data_path)
+ age_30, cfp_fp, lfw, age_30_issame, cfp_fp_issame, lfw_issame, cplfw, cplfw_issame, calfw, calfw_issame = val_data
+ val_data_dict = {
+ 'agedb_30': (age_30, age_30_issame),
+ "cfp_fp": (cfp_fp, cfp_fp_issame),
+ "lfw": (lfw, lfw_issame),
+ "cplfw": (cplfw, cplfw_issame),
+ "calfw": (calfw, calfw_issame),
+ }
+ self.dataname_to_idx = {
+ "agedb_30": 0,
+ "cfp_fp": 1,
+ "lfw": 2,
+ "cplfw": 3,
+ "calfw": 4
+ }
+
+ self.val_data_dict = val_data_dict
+ # concat all dataset
+ all_imgs = []
+ all_issame = []
+ all_dataname = []
+ key_orders = []
+ for key, (imgs, issame) in val_data_dict.items():
+ all_imgs.append(imgs)
+ dup_issame = [
+ ] # hacky way to make the issame length same as imgs. [1, 1, 0, 0, ...]
+ for same in issame:
+ dup_issame.append(same)
+ dup_issame.append(same)
+ all_issame.append(dup_issame)
+ all_dataname.append([self.dataname_to_idx[key]] * len(imgs))
+ key_orders.append(key)
+ assert key_orders == ['agedb_30', 'cfp_fp', 'lfw', 'cplfw', 'calfw']
+
+ if isinstance(all_imgs[0], np.memmap):
+ self.all_imgs = read_memmap(concat_mem_file_name)
+ else:
+ self.all_imgs = np.concatenate(all_imgs)
+
+ self.all_issame = np.concatenate(all_issame)
+ self.all_dataname = np.concatenate(all_dataname)
+
+ def __getitem__(self, index):
+ x_np = self.all_imgs[index].copy()
+ x = paddle.to_tensor(x_np)
+ y = self.all_issame[index]
+ dataname = self.all_dataname[index]
+ return x, y, dataname, index
+
+ def __len__(self):
+ return len(self.all_imgs)
+
+
+def read_memmap(mem_file_name):
+ # r+ mode: Open existing file for reading and writing
+ with open(mem_file_name + '.conf', 'r') as file:
+ memmap_configs = json.load(file)
+ return np.memmap(mem_file_name, mode='r+', \
+ shape=tuple(memmap_configs['shape']), \
+ dtype=memmap_configs['dtype'])
+
+
+def get_val_pair(path, name, use_memfile=True):
+ # installing bcolz should set proxy to access internet
+ import bcolz
+ if use_memfile:
+ mem_file_dir = os.path.join(path, name, 'memfile')
+ mem_file_name = os.path.join(mem_file_dir, 'mem_file.dat')
+ if os.path.isdir(mem_file_dir):
+ print('laoding validation data memfile')
+ np_array = read_memmap(mem_file_name)
+ else:
+ os.makedirs(mem_file_dir)
+ carray = bcolz.carray(rootdir=os.path.join(path, name), mode='r')
+ np_array = np.array(carray)
+ # mem_array = make_memmap(mem_file_name, np_array)
+ # del np_array, mem_array
+ del np_array
+ np_array = read_memmap(mem_file_name)
+ else:
+ np_array = bcolz.carray(rootdir=os.path.join(path, name), mode='r')
+
+ issame = np.load(os.path.join(path, '{}_list.npy'.format(name)))
+ return np_array, issame
+
+
+def get_val_data(data_path):
+ agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')
+ cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')
+ lfw, lfw_issame = get_val_pair(data_path, 'lfw')
+ cplfw, cplfw_issame = get_val_pair(data_path, 'cplfw')
+ calfw, calfw_issame = get_val_pair(data_path, 'calfw')
+ return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame, cplfw, cplfw_issame, calfw, calfw_issame
diff --git a/ppcls/data/dataloader/multilabel_dataset.py b/ppcls/data/dataloader/multilabel_dataset.py
index 2c1ed770388035d2a9fa5a670948d9e1623a0406..25dfc12b5730129dcb54bfd6eab95a440560b4aa 100644
--- a/ppcls/data/dataloader/multilabel_dataset.py
+++ b/ppcls/data/dataloader/multilabel_dataset.py
@@ -25,7 +25,7 @@ from .common_dataset import CommonDataset
class MultiLabelDataset(CommonDataset):
- def _load_anno(self):
+ def _load_anno(self, label_ratio=False):
assert os.path.exists(self._cls_path)
assert os.path.exists(self._img_root)
self.images = []
@@ -41,6 +41,8 @@ class MultiLabelDataset(CommonDataset):
self.labels.append(labels)
assert os.path.exists(self.images[-1])
+ if label_ratio:
+ return np.array(self.labels).mean(0).astype("float32")
def __getitem__(self, idx):
try:
@@ -50,7 +52,10 @@ class MultiLabelDataset(CommonDataset):
img = transform(img, self._transform_ops)
img = img.transpose((2, 0, 1))
label = np.array(self.labels[idx]).astype("float32")
- return (img, label)
+ if self.label_ratio is not None:
+ return (img, np.array([label, self.label_ratio]))
+ else:
+ return (img, label)
except Exception as ex:
logger.error("Exception occured when parse line: {} with msg: {}".
diff --git a/ppcls/data/postprocess/__init__.py b/ppcls/data/postprocess/__init__.py
index 831a4da0008ba70824203be3a6f46c9700225457..54678dc443ebab5bf55d54d9284d328bbc4523b3 100644
--- a/ppcls/data/postprocess/__init__.py
+++ b/ppcls/data/postprocess/__init__.py
@@ -14,9 +14,10 @@
import copy
import importlib
-from . import topk
+from . import topk, threshoutput
from .topk import Topk, MultiLabelTopk
+from .threshoutput import ThreshOutput
def build_postprocess(config):
diff --git a/ppcls/data/postprocess/threshoutput.py b/ppcls/data/postprocess/threshoutput.py
new file mode 100644
index 0000000000000000000000000000000000000000..607aecbfdeae018a5334f723effd658fb480713a
--- /dev/null
+++ b/ppcls/data/postprocess/threshoutput.py
@@ -0,0 +1,36 @@
+# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.nn.functional as F
+
+
+class ThreshOutput(object):
+ def __init__(self, threshold, label_0="0", label_1="1"):
+ self.threshold = threshold
+ self.label_0 = label_0
+ self.label_1 = label_1
+
+ def __call__(self, x, file_names=None):
+ y = []
+ x = F.softmax(x, axis=-1).numpy()
+ for idx, probs in enumerate(x):
+ score = probs[1]
+ if score < self.threshold:
+ result = {"class_ids": [0], "scores": [1 - score], "label_names": [self.label_0]}
+ else:
+ result = {"class_ids": [1], "scores": [score], "label_names": [self.label_1]}
+ if file_names is not None:
+ result["file_name"] = file_names[idx]
+ y.append(result)
+ return y
diff --git a/ppcls/data/preprocess/__init__.py b/ppcls/data/preprocess/__init__.py
index 62066016a47c8cef7bd31bc7d238f202ea6455f0..d0cfcf2409d2d890adcf03ef0e03b2475625ead8 100644
--- a/ppcls/data/preprocess/__init__.py
+++ b/ppcls/data/preprocess/__init__.py
@@ -33,11 +33,18 @@ from ppcls.data.preprocess.ops.operators import AugMix
from ppcls.data.preprocess.ops.operators import Pad
from ppcls.data.preprocess.ops.operators import ToTensor
from ppcls.data.preprocess.ops.operators import Normalize
+from ppcls.data.preprocess.ops.operators import RandomHorizontalFlip
+from ppcls.data.preprocess.ops.operators import CropWithPadding
+from ppcls.data.preprocess.ops.operators import RandomInterpolationAugment
+from ppcls.data.preprocess.ops.operators import ColorJitter
+from ppcls.data.preprocess.ops.operators import RandomCropImage
+from ppcls.data.preprocess.ops.operators import Padv2
from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator
import numpy as np
from PIL import Image
+import random
def transform(data, ops=[]):
@@ -88,16 +95,16 @@ class RandAugment(RawRandAugment):
class TimmAutoAugment(RawTimmAutoAugment):
""" TimmAutoAugment wrapper to auto fit different img tyeps. """
- def __init__(self, *args, **kwargs):
+ def __init__(self, prob=1.0, *args, **kwargs):
super().__init__(*args, **kwargs)
+ self.prob = prob
def __call__(self, img):
if not isinstance(img, Image.Image):
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
-
- img = super().__call__(img)
-
+ if random.random() < self.prob:
+ img = super().__call__(img)
if isinstance(img, Image.Image):
img = np.asarray(img)
diff --git a/ppcls/data/preprocess/ops/operators.py b/ppcls/data/preprocess/ops/operators.py
index 157f44f1ab15ffd1162aeada37dba9296ee0ca00..d31ec4b8c4f40dcaa4d53b864996725c7138a393 100644
--- a/ppcls/data/preprocess/ops/operators.py
+++ b/ppcls/data/preprocess/ops/operators.py
@@ -25,8 +25,8 @@ import cv2
import numpy as np
from PIL import Image, ImageOps, __version__ as PILLOW_VERSION
from paddle.vision.transforms import ColorJitter as RawColorJitter
-from paddle.vision.transforms import ToTensor, Normalize
-
+from paddle.vision.transforms import ToTensor, Normalize, RandomHorizontalFlip, RandomResizedCrop
+from paddle.vision.transforms import functional as F
from .autoaugment import ImageNetPolicy
from .functional import augmentations
from ppcls.utils import logger
@@ -93,6 +93,42 @@ class UnifiedResize(object):
return self.resize_func(src, size)
+class RandomInterpolationAugment(object):
+ def __init__(self, prob):
+ self.prob = prob
+
+ def _aug(self, img):
+ img_shape = img.shape
+ side_ratio = np.random.uniform(0.2, 1.0)
+ small_side = int(side_ratio * img_shape[0])
+ interpolation = np.random.choice([
+ cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA,
+ cv2.INTER_CUBIC, cv2.INTER_LANCZOS4
+ ])
+ small_img = cv2.resize(
+ img, (small_side, small_side), interpolation=interpolation)
+ interpolation = np.random.choice([
+ cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA,
+ cv2.INTER_CUBIC, cv2.INTER_LANCZOS4
+ ])
+ aug_img = cv2.resize(
+ small_img, (img_shape[1], img_shape[0]),
+ interpolation=interpolation)
+ return aug_img
+
+ def __call__(self, img):
+ if np.random.random() < self.prob:
+ if isinstance(img, np.ndarray):
+ return self._aug(img)
+ else:
+ pil_img = np.array(img)
+ aug_img = self._aug(pil_img)
+ img = Image.fromarray(aug_img.astype(np.uint8))
+ return img
+ else:
+ return img
+
+
class OperatorParamError(ValueError):
""" OperatorParamError
"""
@@ -170,6 +206,52 @@ class ResizeImage(object):
return self._resize_func(img, (w, h))
+class CropWithPadding(RandomResizedCrop):
+ """
+ crop image and padding to original size
+ """
+
+ def __init__(self,
+ prob=1,
+ padding_num=0,
+ size=224,
+ scale=(0.08, 1.0),
+ ratio=(3. / 4, 4. / 3),
+ interpolation='bilinear',
+ key=None):
+ super().__init__(size, scale, ratio, interpolation, key)
+ self.prob = prob
+ self.padding_num = padding_num
+
+ def __call__(self, img):
+ is_cv2_img = False
+ if isinstance(img, np.ndarray):
+ flag = True
+ if np.random.random() < self.prob:
+ # RandomResizedCrop augmentation
+ new = np.zeros_like(np.array(img)) + self.padding_num
+ # orig_W, orig_H = F._get_image_size(sample)
+ orig_W, orig_H = self._get_image_size(img)
+ i, j, h, w = self._get_param(img)
+ cropped = F.crop(img, i, j, h, w)
+ new[i:i + h, j:j + w, :] = np.array(cropped)
+ if not isinstance:
+ new = Image.fromarray(new.astype(np.uint8))
+ return new
+ else:
+ return img
+
+ def _get_image_size(self, img):
+ if F._is_pil_image(img):
+ return img.size
+ elif F._is_numpy_image(img):
+ return img.shape[:2][::-1]
+ elif F._is_tensor_image(img):
+ return img.shape[1:][::-1] # chw
+ else:
+ raise TypeError("Unexpected type {}".format(type(img)))
+
+
class CropImage(object):
""" crop image """
@@ -190,6 +272,105 @@ class CropImage(object):
return img[h_start:h_end, w_start:w_end, :]
+class Padv2(object):
+ def __init__(self,
+ size=None,
+ size_divisor=32,
+ pad_mode=0,
+ offsets=None,
+ fill_value=(127.5, 127.5, 127.5)):
+ """
+ Pad image to a specified size or multiple of size_divisor.
+ Args:
+ size (int, list): image target size, if None, pad to multiple of size_divisor, default None
+ size_divisor (int): size divisor, default 32
+ pad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets
+ if 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top
+ offsets (list): [offset_x, offset_y], specify offset while padding, only supported pad_mode=-1
+ fill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5)
+ """
+
+ if not isinstance(size, (int, list)):
+ raise TypeError(
+ "Type of target_size is invalid when random_size is True. \
+ Must be List, now is {}".format(type(size)))
+
+ if isinstance(size, int):
+ size = [size, size]
+
+ assert pad_mode in [
+ -1, 0, 1, 2
+ ], 'currently only supports four modes [-1, 0, 1, 2]'
+ if pad_mode == -1:
+ assert offsets, 'if pad_mode is -1, offsets should not be None'
+
+ self.size = size
+ self.size_divisor = size_divisor
+ self.pad_mode = pad_mode
+ self.fill_value = fill_value
+ self.offsets = offsets
+
+ def apply_image(self, image, offsets, im_size, size):
+ x, y = offsets
+ im_h, im_w = im_size
+ h, w = size
+ canvas = np.ones((h, w, 3), dtype=np.float32)
+ canvas *= np.array(self.fill_value, dtype=np.float32)
+ canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)
+ return canvas
+
+ def __call__(self, img):
+ im_h, im_w = img.shape[:2]
+ if self.size:
+ w, h = self.size
+ assert (
+ im_h <= h and im_w <= w
+ ), '(h, w) of target size should be greater than (im_h, im_w)'
+ else:
+ h = int(np.ceil(im_h / self.size_divisor) * self.size_divisor)
+ w = int(np.ceil(im_w / self.size_divisor) * self.size_divisor)
+
+ if h == im_h and w == im_w:
+ return img.astype(np.float32)
+
+ if self.pad_mode == -1:
+ offset_x, offset_y = self.offsets
+ elif self.pad_mode == 0:
+ offset_y, offset_x = 0, 0
+ elif self.pad_mode == 1:
+ offset_y, offset_x = (h - im_h) // 2, (w - im_w) // 2
+ else:
+ offset_y, offset_x = h - im_h, w - im_w
+
+ offsets, im_size, size = [offset_x, offset_y], [im_h, im_w], [h, w]
+
+ return self.apply_image(img, offsets, im_size, size)
+
+
+class RandomCropImage(object):
+ """Random crop image only
+ """
+
+ def __init__(self, size):
+ super(RandomCropImage, self).__init__()
+ if isinstance(size, int):
+ size = [size, size]
+ self.size = size
+
+ def __call__(self, img):
+
+ h, w = img.shape[:2]
+ tw, th = self.size
+ i = random.randint(0, h - th)
+ j = random.randint(0, w - tw)
+
+ img = img[i:i + th, j:j + tw, :]
+ if img.shape[0] != 256 or img.shape[1] != 192:
+ raise ValueError('sample: ', h, w, i, j, th, tw, img.shape)
+
+ return img
+
+
class RandCropImage(object):
""" random crop image """
@@ -434,16 +615,18 @@ class ColorJitter(RawColorJitter):
"""ColorJitter.
"""
- def __init__(self, *args, **kwargs):
+ def __init__(self, prob=2, *args, **kwargs):
super().__init__(*args, **kwargs)
+ self.prob = prob
def __call__(self, img):
- if not isinstance(img, Image.Image):
- img = np.ascontiguousarray(img)
- img = Image.fromarray(img)
- img = super()._apply_image(img)
- if isinstance(img, Image.Image):
- img = np.asarray(img)
+ if np.random.random() < self.prob:
+ if not isinstance(img, Image.Image):
+ img = np.ascontiguousarray(img)
+ img = Image.fromarray(img)
+ img = super()._apply_image(img)
+ if isinstance(img, Image.Image):
+ img = np.asarray(img)
return img
@@ -463,8 +646,8 @@ class Pad(object):
# Process fill color for affine transforms
major_found, minor_found = (int(v)
for v in PILLOW_VERSION.split('.')[:2])
- major_required, minor_required = (
- int(v) for v in min_pil_version.split('.')[:2])
+ major_required, minor_required = (int(v) for v in
+ min_pil_version.split('.')[:2])
if major_found < major_required or (major_found == major_required and
minor_found < minor_required):
if fill is None:
diff --git a/ppcls/engine/engine.py b/ppcls/engine/engine.py
index 5b5c4da8a6500ab90c31f33097075db5f8ee5f89..2c0ab83f4d4a875901b6655e9ccf91af1737cc73 100644
--- a/ppcls/engine/engine.py
+++ b/ppcls/engine/engine.py
@@ -75,8 +75,9 @@ class Engine(object):
print_config(config)
# init train_func and eval_func
- assert self.eval_mode in ["classification", "retrieval"], logger.error(
- "Invalid eval mode: {}".format(self.eval_mode))
+ assert self.eval_mode in [
+ "classification", "retrieval", "adaface"
+ ], logger.error("Invalid eval mode: {}".format(self.eval_mode))
self.train_epoch_func = train_epoch
self.eval_func = getattr(evaluation, self.eval_mode + "_eval")
@@ -115,7 +116,7 @@ class Engine(object):
self.config["DataLoader"], "Train", self.device, self.use_dali)
if self.mode == "eval" or (self.mode == "train" and
self.config["Global"]["eval_during_train"]):
- if self.eval_mode == "classification":
+ if self.eval_mode in ["classification", "adaface"]:
self.eval_dataloader = build_dataloader(
self.config["DataLoader"], "Eval", self.device,
self.use_dali)
@@ -189,7 +190,7 @@ class Engine(object):
self.eval_metric_func = None
# build model
- self.model = build_model(self.config)
+ self.model = build_model(self.config, self.mode)
# set @to_static for benchmark, skip this by default.
apply_to_static(self.config, self.model)
@@ -239,7 +240,7 @@ class Engine(object):
self.amp_eval = self.config["AMP"].get("use_fp16_test", False)
# TODO(gaotingquan): Paddle not yet support FP32 evaluation when training with AMPO2
- if self.config["Global"].get(
+ if self.mode == "train" and self.config["Global"].get(
"eval_during_train",
True) and self.amp_level == "O2" and self.amp_eval == False:
msg = "PaddlePaddle only support FP16 evaluation when training with AMP O2 now. "
@@ -269,10 +270,11 @@ class Engine(object):
save_dtype='float32')
# paddle version >= 2.3.0 or develop
else:
- self.model = paddle.amp.decorate(
- models=self.model,
- level=self.amp_level,
- save_dtype='float32')
+ if self.mode == "train" or self.amp_eval:
+ self.model = paddle.amp.decorate(
+ models=self.model,
+ level=self.amp_level,
+ save_dtype='float32')
if self.mode == "train" and len(self.train_loss_func.parameters(
)) > 0:
@@ -312,7 +314,7 @@ class Engine(object):
print_batch_step = self.config['Global']['print_batch_step']
save_interval = self.config["Global"]["save_interval"]
best_metric = {
- "metric": 0.0,
+ "metric": -1.0,
"epoch": 0,
}
# key:
@@ -344,18 +346,18 @@ class Engine(object):
if self.use_dali:
self.train_dataloader.reset()
- metric_msg = ", ".join([
- "{}: {:.5f}".format(key, self.output_info[key].avg)
- for key in self.output_info
- ])
+ metric_msg = ", ".join(
+ [self.output_info[key].avg_info for key in self.output_info])
logger.info("[Train][Epoch {}/{}][Avg]{}".format(
epoch_id, self.config["Global"]["epochs"], metric_msg))
self.output_info.clear()
# eval model and save model if possible
+ start_eval_epoch = self.config["Global"].get("start_eval_epoch",
+ 0) - 1
if self.config["Global"][
"eval_during_train"] and epoch_id % self.config["Global"][
- "eval_interval"] == 0:
+ "eval_interval"] == 0 and epoch_id > start_eval_epoch:
acc = self.eval(epoch_id)
if acc > best_metric["metric"]:
best_metric["metric"] = acc
@@ -367,7 +369,8 @@ class Engine(object):
self.output_dir,
model_name=self.config["Arch"]["name"],
prefix="best_model",
- loss=self.train_loss_func)
+ loss=self.train_loss_func,
+ save_student_model=True)
logger.info("[Eval][Epoch {}][best metric: {}]".format(
epoch_id, best_metric["metric"]))
logger.scaler(
@@ -431,7 +434,17 @@ class Engine(object):
image_file_list.append(image_file)
if len(batch_data) >= batch_size or idx == len(image_list) - 1:
batch_tensor = paddle.to_tensor(batch_data)
- out = self.model(batch_tensor)
+
+ if self.amp and self.amp_eval:
+ with paddle.amp.auto_cast(
+ custom_black_list={
+ "flatten_contiguous_range", "greater_than"
+ },
+ level=self.amp_level):
+ out = self.model(batch_tensor)
+ else:
+ out = self.model(batch_tensor)
+
if isinstance(out, list):
out = out[0]
if isinstance(out, dict) and "logits" in out:
@@ -445,33 +458,40 @@ class Engine(object):
def export(self):
assert self.mode == "export"
- use_multilabel = self.config["Global"].get("use_multilabel", False)
+ use_multilabel = self.config["Global"].get(
+ "use_multilabel",
+ False) and not "ATTRMetric" in self.config["Metric"]["Eval"][0]
model = ExportModel(self.config["Arch"], self.model, use_multilabel)
if self.config["Global"]["pretrained_model"] is not None:
load_dygraph_pretrain(model.base_model,
self.config["Global"]["pretrained_model"])
model.eval()
+
+ # for rep nets
+ for layer in self.model.sublayers():
+ if hasattr(layer, "rep"):
+ layer.rep()
+
save_path = os.path.join(self.config["Global"]["save_inference_dir"],
"inference")
- if model.quanter:
- model.quanter.save_quantized_model(
- model.base_model,
- save_path,
- input_spec=[
- paddle.static.InputSpec(
- shape=[None] + self.config["Global"]["image_shape"],
- dtype='float32')
- ])
+
+ model = paddle.jit.to_static(
+ model,
+ input_spec=[
+ paddle.static.InputSpec(
+ shape=[None] + self.config["Global"]["image_shape"],
+ dtype='float32')
+ ])
+ if hasattr(model.base_model,
+ "quanter") and model.base_model.quanter is not None:
+ model.base_model.quanter.save_quantized_model(model,
+ save_path + "_int8")
else:
- model = paddle.jit.to_static(
- model,
- input_spec=[
- paddle.static.InputSpec(
- shape=[None] + self.config["Global"]["image_shape"],
- dtype='float32')
- ])
paddle.jit.save(model, save_path)
+ logger.info(
+ f"Export succeeded! The inference model exported has been saved in \"{self.config['Global']['save_inference_dir']}\"."
+ )
class ExportModel(TheseusLayer):
diff --git a/ppcls/engine/evaluation/__init__.py b/ppcls/engine/evaluation/__init__.py
index e0cd778887bf6f0e7ce05c18b587e5b54bcf6b3f..a301ad7fda34b87a959b59251b6dd0fffe9eb3e9 100644
--- a/ppcls/engine/evaluation/__init__.py
+++ b/ppcls/engine/evaluation/__init__.py
@@ -14,3 +14,4 @@
from ppcls.engine.evaluation.classification import classification_eval
from ppcls.engine.evaluation.retrieval import retrieval_eval
+from ppcls.engine.evaluation.adaface import adaface_eval
\ No newline at end of file
diff --git a/ppcls/engine/evaluation/adaface.py b/ppcls/engine/evaluation/adaface.py
new file mode 100644
index 0000000000000000000000000000000000000000..e62144b5cb374a14a93616c33e56ee74bef0eb01
--- /dev/null
+++ b/ppcls/engine/evaluation/adaface.py
@@ -0,0 +1,260 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import time
+import numpy as np
+import platform
+import paddle
+import sklearn
+from sklearn.model_selection import KFold
+from sklearn.decomposition import PCA
+
+from ppcls.utils.misc import AverageMeter
+from ppcls.utils import logger
+
+
+def fuse_features_with_norm(stacked_embeddings, stacked_norms):
+ assert stacked_embeddings.ndim == 3 # (n_features_to_fuse, batch_size, channel)
+ assert stacked_norms.ndim == 3 # (n_features_to_fuse, batch_size, 1)
+ pre_norm_embeddings = stacked_embeddings * stacked_norms
+ fused = pre_norm_embeddings.sum(axis=0)
+ norm = paddle.norm(fused, 2, 1, True)
+ fused = paddle.divide(fused, norm)
+ return fused, norm
+
+
+def adaface_eval(engine, epoch_id=0):
+ output_info = dict()
+ time_info = {
+ "batch_cost": AverageMeter(
+ "batch_cost", '.5f', postfix=" s,"),
+ "reader_cost": AverageMeter(
+ "reader_cost", ".5f", postfix=" s,"),
+ }
+ print_batch_step = engine.config["Global"]["print_batch_step"]
+
+ metric_key = None
+ tic = time.time()
+ unique_dict = {}
+ for iter_id, batch in enumerate(engine.eval_dataloader):
+ images, labels, dataname, image_index = batch
+ if iter_id == 5:
+ for key in time_info:
+ time_info[key].reset()
+ time_info["reader_cost"].update(time.time() - tic)
+ batch_size = images.shape[0]
+ batch[0] = paddle.to_tensor(images)
+ embeddings = engine.model(images, labels)['features']
+ norms = paddle.divide(embeddings, paddle.norm(embeddings, 2, 1, True))
+ embeddings = paddle.divide(embeddings, norms)
+ fliped_images = paddle.flip(images, axis=[3])
+ flipped_embeddings = engine.model(fliped_images, labels)['features']
+ flipped_norms = paddle.divide(
+ flipped_embeddings, paddle.norm(flipped_embeddings, 2, 1, True))
+ flipped_embeddings = paddle.divide(flipped_embeddings, flipped_norms)
+ stacked_embeddings = paddle.stack(
+ [embeddings, flipped_embeddings], axis=0)
+ stacked_norms = paddle.stack([norms, flipped_norms], axis=0)
+ embeddings, norms = fuse_features_with_norm(stacked_embeddings,
+ stacked_norms)
+
+ for out, nor, label, data, idx in zip(embeddings, norms, labels,
+ dataname, image_index):
+ unique_dict[int(idx.numpy())] = {
+ 'output': out,
+ 'norm': nor,
+ 'target': label,
+ 'dataname': data
+ }
+ # calc metric
+ time_info["batch_cost"].update(time.time() - tic)
+ if iter_id % print_batch_step == 0:
+ time_msg = "s, ".join([
+ "{}: {:.5f}".format(key, time_info[key].avg)
+ for key in time_info
+ ])
+
+ ips_msg = "ips: {:.5f} images/sec".format(
+ batch_size / time_info["batch_cost"].avg)
+
+ metric_msg = ", ".join([
+ "{}: {:.5f}".format(key, output_info[key].val)
+ for key in output_info
+ ])
+ logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format(
+ epoch_id, iter_id,
+ len(engine.eval_dataloader), metric_msg, time_msg, ips_msg))
+
+ tic = time.time()
+
+ unique_keys = sorted(unique_dict.keys())
+ all_output_tensor = paddle.stack(
+ [unique_dict[key]['output'] for key in unique_keys], axis=0)
+ all_norm_tensor = paddle.stack(
+ [unique_dict[key]['norm'] for key in unique_keys], axis=0)
+ all_target_tensor = paddle.stack(
+ [unique_dict[key]['target'] for key in unique_keys], axis=0)
+ all_dataname_tensor = paddle.stack(
+ [unique_dict[key]['dataname'] for key in unique_keys], axis=0)
+
+ eval_result = cal_metric(all_output_tensor, all_norm_tensor,
+ all_target_tensor, all_dataname_tensor)
+
+ metric_msg = ", ".join([
+ "{}: {:.5f}".format(key, output_info[key].avg) for key in output_info
+ ])
+ face_msg = ", ".join([
+ "{}: {:.5f}".format(key, eval_result[key])
+ for key in eval_result.keys()
+ ])
+ logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg + ", " +
+ face_msg))
+
+ # return 1st metric in the dict
+ return eval_result['all_test_acc']
+
+
+def cal_metric(all_output_tensor, all_norm_tensor, all_target_tensor,
+ all_dataname_tensor):
+ all_target_tensor = all_target_tensor.reshape([-1])
+ all_dataname_tensor = all_dataname_tensor.reshape([-1])
+ dataname_to_idx = {
+ "agedb_30": 0,
+ "cfp_fp": 1,
+ "lfw": 2,
+ "cplfw": 3,
+ "calfw": 4
+ }
+ idx_to_dataname = {val: key for key, val in dataname_to_idx.items()}
+ test_logs = {}
+ # _, indices = paddle.unique(all_dataname_tensor, return_index=True, return_inverse=False, return_counts=False)
+ for dataname_idx in all_dataname_tensor.unique():
+ dataname = idx_to_dataname[dataname_idx.item()]
+ # per dataset evaluation
+ embeddings = all_output_tensor[all_dataname_tensor ==
+ dataname_idx].numpy()
+ labels = all_target_tensor[all_dataname_tensor == dataname_idx].numpy()
+ issame = labels[0::2]
+ tpr, fpr, accuracy, best_thresholds = evaluate_face(
+ embeddings, issame, nrof_folds=10)
+ acc, best_threshold = accuracy.mean(), best_thresholds.mean()
+
+ num_test_samples = len(embeddings)
+ test_logs[f'{dataname}_test_acc'] = acc
+ test_logs[f'{dataname}_test_best_threshold'] = best_threshold
+ test_logs[f'{dataname}_num_test_samples'] = num_test_samples
+
+ test_acc = np.mean([
+ test_logs[f'{dataname}_test_acc']
+ for dataname in dataname_to_idx.keys()
+ if f'{dataname}_test_acc' in test_logs
+ ])
+
+ test_logs['all_test_acc'] = test_acc
+ return test_logs
+
+
+def evaluate_face(embeddings, actual_issame, nrof_folds=10, pca=0):
+ # Calculate evaluation metrics
+ thresholds = np.arange(0, 4, 0.01)
+ embeddings1 = embeddings[0::2]
+ embeddings2 = embeddings[1::2]
+ tpr, fpr, accuracy, best_thresholds = calculate_roc(
+ thresholds,
+ embeddings1,
+ embeddings2,
+ np.asarray(actual_issame),
+ nrof_folds=nrof_folds,
+ pca=pca)
+ return tpr, fpr, accuracy, best_thresholds
+
+
+def calculate_roc(thresholds,
+ embeddings1,
+ embeddings2,
+ actual_issame,
+ nrof_folds=10,
+ pca=0):
+ assert (embeddings1.shape[0] == embeddings2.shape[0])
+ assert (embeddings1.shape[1] == embeddings2.shape[1])
+ nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
+ nrof_thresholds = len(thresholds)
+ k_fold = KFold(n_splits=nrof_folds, shuffle=False)
+
+ tprs = np.zeros((nrof_folds, nrof_thresholds))
+ fprs = np.zeros((nrof_folds, nrof_thresholds))
+ accuracy = np.zeros((nrof_folds))
+ best_thresholds = np.zeros((nrof_folds))
+ indices = np.arange(nrof_pairs)
+ # print('pca', pca)
+ dist = None
+
+ if pca == 0:
+ diff = np.subtract(embeddings1, embeddings2)
+ dist = np.sum(np.square(diff), 1)
+
+ for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
+ # print('train_set', train_set)
+ # print('test_set', test_set)
+ if pca > 0:
+ print('doing pca on', fold_idx)
+ embed1_train = embeddings1[train_set]
+ embed2_train = embeddings2[train_set]
+ _embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
+ # print(_embed_train.shape)
+ pca_model = PCA(n_components=pca)
+ pca_model.fit(_embed_train)
+ embed1 = pca_model.transform(embeddings1)
+ embed2 = pca_model.transform(embeddings2)
+ embed1 = sklearn.preprocessing.normalize(embed1)
+ embed2 = sklearn.preprocessing.normalize(embed2)
+ # print(embed1.shape, embed2.shape)
+ diff = np.subtract(embed1, embed2)
+ dist = np.sum(np.square(diff), 1)
+
+ # Find the best threshold for the fold
+ acc_train = np.zeros((nrof_thresholds))
+ for threshold_idx, threshold in enumerate(thresholds):
+ _, _, acc_train[threshold_idx] = calculate_accuracy(
+ threshold, dist[train_set], actual_issame[train_set])
+ best_threshold_index = np.argmax(acc_train)
+ best_thresholds[fold_idx] = thresholds[best_threshold_index]
+ for threshold_idx, threshold in enumerate(thresholds):
+ tprs[fold_idx, threshold_idx], fprs[
+ fold_idx, threshold_idx], _ = calculate_accuracy(
+ threshold, dist[test_set], actual_issame[test_set])
+ _, _, accuracy[fold_idx] = calculate_accuracy(
+ thresholds[best_threshold_index], dist[test_set],
+ actual_issame[test_set])
+
+ tpr = np.mean(tprs, 0)
+ fpr = np.mean(fprs, 0)
+ return tpr, fpr, accuracy, best_thresholds
+
+
+def calculate_accuracy(threshold, dist, actual_issame):
+ predict_issame = np.less(dist, threshold)
+ tp = np.sum(np.logical_and(predict_issame, actual_issame))
+ fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
+ tn = np.sum(
+ np.logical_and(
+ np.logical_not(predict_issame), np.logical_not(actual_issame)))
+ fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
+
+ tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
+ fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
+ acc = float(tp + tn) / dist.size
+ return tpr, fpr, acc
diff --git a/ppcls/engine/evaluation/classification.py b/ppcls/engine/evaluation/classification.py
index 60595e6a9014b4003ab8008b8144d92d628a2acd..1f9b55fc33ff6b49e9e7f7bd3e9bcebdbf3e0093 100644
--- a/ppcls/engine/evaluation/classification.py
+++ b/ppcls/engine/evaluation/classification.py
@@ -23,6 +23,8 @@ from ppcls.utils import logger
def classification_eval(engine, epoch_id=0):
+ if hasattr(engine.eval_metric_func, "reset"):
+ engine.eval_metric_func.reset()
output_info = dict()
time_info = {
"batch_cost": AverageMeter(
@@ -80,6 +82,7 @@ def classification_eval(engine, epoch_id=0):
# gather Tensor when distributed
if paddle.distributed.get_world_size() > 1:
label_list = []
+
paddle.distributed.all_gather(label_list, batch[1])
labels = paddle.concat(label_list, 0)
@@ -121,18 +124,10 @@ def classification_eval(engine, epoch_id=0):
output_info[key] = AverageMeter(key, '7.5f')
output_info[key].update(loss_dict[key].numpy()[0],
current_samples)
+
# calc metric
if engine.eval_metric_func is not None:
- metric_dict = engine.eval_metric_func(preds, labels)
- for key in metric_dict:
- if metric_key is None:
- metric_key = key
- if key not in output_info:
- output_info[key] = AverageMeter(key, '7.5f')
-
- output_info[key].update(metric_dict[key].numpy()[0],
- current_samples)
-
+ engine.eval_metric_func(preds, labels)
time_info["batch_cost"].update(time.time() - tic)
if iter_id % print_batch_step == 0:
@@ -144,10 +139,14 @@ def classification_eval(engine, epoch_id=0):
ips_msg = "ips: {:.5f} images/sec".format(
batch_size / time_info["batch_cost"].avg)
- metric_msg = ", ".join([
- "{}: {:.5f}".format(key, output_info[key].val)
- for key in output_info
- ])
+ if "ATTRMetric" in engine.config["Metric"]["Eval"][0]:
+ metric_msg = ""
+ else:
+ metric_msg = ", ".join([
+ "{}: {:.5f}".format(key, output_info[key].val)
+ for key in output_info
+ ])
+ metric_msg += ", {}".format(engine.eval_metric_func.avg_info)
logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format(
epoch_id, iter_id,
len(engine.eval_dataloader), metric_msg, time_msg, ips_msg))
@@ -155,13 +154,29 @@ def classification_eval(engine, epoch_id=0):
tic = time.time()
if engine.use_dali:
engine.eval_dataloader.reset()
- metric_msg = ", ".join([
- "{}: {:.5f}".format(key, output_info[key].avg) for key in output_info
- ])
- logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))
-
- # do not try to save best eval.model
- if engine.eval_metric_func is None:
- return -1
- # return 1st metric in the dict
- return output_info[metric_key].avg
+
+ if "ATTRMetric" in engine.config["Metric"]["Eval"][0]:
+ metric_msg = ", ".join([
+ "evalres: ma: {:.5f} label_f1: {:.5f} label_pos_recall: {:.5f} label_neg_recall: {:.5f} instance_f1: {:.5f} instance_acc: {:.5f} instance_prec: {:.5f} instance_recall: {:.5f}".
+ format(*engine.eval_metric_func.attr_res())
+ ])
+ logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))
+
+ # do not try to save best eval.model
+ if engine.eval_metric_func is None:
+ return -1
+ # return 1st metric in the dict
+ return engine.eval_metric_func.attr_res()[0]
+ else:
+ metric_msg = ", ".join([
+ "{}: {:.5f}".format(key, output_info[key].avg)
+ for key in output_info
+ ])
+ metric_msg += ", {}".format(engine.eval_metric_func.avg_info)
+ logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))
+
+ # do not try to save best eval.model
+ if engine.eval_metric_func is None:
+ return -1
+ # return 1st metric in the dict
+ return engine.eval_metric_func.avg
diff --git a/ppcls/engine/evaluation/retrieval.py b/ppcls/engine/evaluation/retrieval.py
index 05c5d0c35d0f6fdfcd0a8f1dc1a8a121026ede99..f68902285cae9896f76eca30cbabbbacaf5a2b3f 100644
--- a/ppcls/engine/evaluation/retrieval.py
+++ b/ppcls/engine/evaluation/retrieval.py
@@ -16,6 +16,9 @@ from __future__ import division
from __future__ import print_function
import platform
+from typing import Optional
+
+import numpy as np
import paddle
from ppcls.utils import logger
@@ -48,34 +51,67 @@ def retrieval_eval(engine, epoch_id=0):
if engine.eval_loss_func is None:
metric_dict = {metric_key: 0.}
else:
+ reranking_flag = engine.config['Global'].get('re_ranking', False)
+ logger.info(f"re_ranking={reranking_flag}")
metric_dict = dict()
- for block_idx, block_fea in enumerate(fea_blocks):
- similarity_matrix = paddle.matmul(
- block_fea, gallery_feas, transpose_y=True)
- if query_query_id is not None:
- query_id_block = query_id_blocks[block_idx]
- query_id_mask = (query_id_block != gallery_unique_id.t())
-
- image_id_block = image_id_blocks[block_idx]
- image_id_mask = (image_id_block != gallery_img_id.t())
-
- keep_mask = paddle.logical_or(query_id_mask, image_id_mask)
- similarity_matrix = similarity_matrix * keep_mask.astype(
- "float32")
- else:
- keep_mask = None
-
- metric_tmp = engine.eval_metric_func(similarity_matrix,
- image_id_blocks[block_idx],
- gallery_img_id, keep_mask)
+ if reranking_flag:
+ # set the order from small to large
+ for i in range(len(engine.eval_metric_func.metric_func_list)):
+ if hasattr(engine.eval_metric_func.metric_func_list[i], 'descending') \
+ and engine.eval_metric_func.metric_func_list[i].descending is True:
+ engine.eval_metric_func.metric_func_list[
+ i].descending = False
+ logger.warning(
+ f"re_ranking=True,{engine.eval_metric_func.metric_func_list[i].__class__.__name__}.descending has been set to False"
+ )
+
+ # compute distance matrix(The smaller the value, the more similar)
+ distmat = re_ranking(
+ query_feas, gallery_feas, k1=20, k2=6, lambda_value=0.3)
+ # compute keep mask
+ query_id_mask = (query_query_id != gallery_unique_id.t())
+ image_id_mask = (query_img_id != gallery_img_id.t())
+ keep_mask = paddle.logical_or(query_id_mask, image_id_mask)
+
+ # set inf(1e9) distance to those exist in gallery
+ distmat = distmat * keep_mask.astype("float32")
+ inf_mat = (paddle.logical_not(keep_mask).astype("float32")) * 1e20
+ distmat = distmat + inf_mat
+
+ # compute metric
+ metric_tmp = engine.eval_metric_func(distmat, query_img_id,
+ gallery_img_id, keep_mask)
for key in metric_tmp:
- if key not in metric_dict:
- metric_dict[key] = metric_tmp[key] * block_fea.shape[
- 0] / len(query_feas)
+ metric_dict[key] = metric_tmp[key]
+ else:
+ for block_idx, block_fea in enumerate(fea_blocks):
+ similarity_matrix = paddle.matmul(
+ block_fea, gallery_feas, transpose_y=True) # [n,m]
+ if query_query_id is not None:
+ query_id_block = query_id_blocks[block_idx]
+ query_id_mask = (query_id_block != gallery_unique_id.t())
+
+ image_id_block = image_id_blocks[block_idx]
+ image_id_mask = (image_id_block != gallery_img_id.t())
+
+ keep_mask = paddle.logical_or(query_id_mask, image_id_mask)
+ similarity_matrix = similarity_matrix * keep_mask.astype(
+ "float32")
else:
- metric_dict[key] += metric_tmp[key] * block_fea.shape[
- 0] / len(query_feas)
+ keep_mask = None
+
+ metric_tmp = engine.eval_metric_func(
+ similarity_matrix, image_id_blocks[block_idx],
+ gallery_img_id, keep_mask)
+
+ for key in metric_tmp:
+ if key not in metric_dict:
+ metric_dict[key] = metric_tmp[key] * block_fea.shape[
+ 0] / len(query_feas)
+ else:
+ metric_dict[key] += metric_tmp[key] * block_fea.shape[
+ 0] / len(query_feas)
metric_info_list = []
for key in metric_dict:
@@ -185,3 +221,109 @@ def cal_feature(engine, name='gallery'):
logger.info("Build {} done, all feat shape: {}, begin to eval..".format(
name, all_feas.shape))
return all_feas, all_img_id, all_unique_id
+
+
+def re_ranking(query_feas: paddle.Tensor,
+ gallery_feas: paddle.Tensor,
+ k1: int=20,
+ k2: int=6,
+ lambda_value: int=0.5,
+ local_distmat: Optional[np.ndarray]=None,
+ only_local: bool=False) -> paddle.Tensor:
+ """re-ranking, most computed with numpy
+
+ code heavily based on
+ https://github.com/michuanhaohao/reid-strong-baseline/blob/3da7e6f03164a92e696cb6da059b1cd771b0346d/utils/reid_metric.py
+
+ Args:
+ query_feas (paddle.Tensor): query features, [num_query, num_features]
+ gallery_feas (paddle.Tensor): gallery features, [num_gallery, num_features]
+ k1 (int, optional): k1. Defaults to 20.
+ k2 (int, optional): k2. Defaults to 6.
+ lambda_value (int, optional): lambda. Defaults to 0.5.
+ local_distmat (Optional[np.ndarray], optional): local_distmat. Defaults to None.
+ only_local (bool, optional): only_local. Defaults to False.
+
+ Returns:
+ paddle.Tensor: final_dist matrix after re-ranking, [num_query, num_gallery]
+ """
+ query_num = query_feas.shape[0]
+ all_num = query_num + gallery_feas.shape[0]
+ if only_local:
+ original_dist = local_distmat
+ else:
+ feat = paddle.concat([query_feas, gallery_feas])
+ logger.info('using GPU to compute original distance')
+
+ # L2 distance
+ distmat = paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([all_num, all_num]) + \
+ paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([all_num, all_num]).t()
+ distmat = distmat.addmm(x=feat, y=feat.t(), alpha=-2.0, beta=1.0)
+
+ original_dist = distmat.cpu().numpy()
+ del feat
+ if local_distmat is not None:
+ original_dist = original_dist + local_distmat
+
+ gallery_num = original_dist.shape[0]
+ original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
+ V = np.zeros_like(original_dist).astype(np.float16)
+ initial_rank = np.argsort(original_dist).astype(np.int32)
+ logger.info('starting re_ranking')
+ for i in range(all_num):
+ # k-reciprocal neighbors
+ forward_k_neigh_index = initial_rank[i, :k1 + 1]
+ backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
+ fi = np.where(backward_k_neigh_index == i)[0]
+ k_reciprocal_index = forward_k_neigh_index[fi]
+ k_reciprocal_expansion_index = k_reciprocal_index
+ for j in range(len(k_reciprocal_index)):
+ candidate = k_reciprocal_index[j]
+ candidate_forward_k_neigh_index = initial_rank[candidate, :int(
+ np.around(k1 / 2)) + 1]
+ candidate_backward_k_neigh_index = initial_rank[
+ candidate_forward_k_neigh_index, :int(np.around(k1 / 2)) + 1]
+ fi_candidate = np.where(
+ candidate_backward_k_neigh_index == candidate)[0]
+ candidate_k_reciprocal_index = candidate_forward_k_neigh_index[
+ fi_candidate]
+ if len(
+ np.intersect1d(candidate_k_reciprocal_index,
+ k_reciprocal_index)) > 2 / 3 * len(
+ candidate_k_reciprocal_index):
+ k_reciprocal_expansion_index = np.append(
+ k_reciprocal_expansion_index, candidate_k_reciprocal_index)
+
+ k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
+ weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
+ V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
+ original_dist = original_dist[:query_num, ]
+ if k2 != 1:
+ V_qe = np.zeros_like(V, dtype=np.float16)
+ for i in range(all_num):
+ V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
+ V = V_qe
+ del V_qe
+ del initial_rank
+ invIndex = []
+ for i in range(gallery_num):
+ invIndex.append(np.where(V[:, i] != 0)[0])
+
+ jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
+ for i in range(query_num):
+ temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16)
+ indNonZero = np.where(V[i, :] != 0)[0]
+ indImages = [invIndex[ind] for ind in indNonZero]
+ for j in range(len(indNonZero)):
+ temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(
+ V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])
+ jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
+
+ final_dist = jaccard_dist * (1 - lambda_value
+ ) + original_dist * lambda_value
+ del original_dist
+ del V
+ del jaccard_dist
+ final_dist = final_dist[:query_num, query_num:]
+ final_dist = paddle.to_tensor(final_dist)
+ return final_dist
diff --git a/ppcls/loss/multilabelloss.py b/ppcls/loss/multilabelloss.py
index d30d5b8d18083385567d0bcdffaa1fd2da4876f5..a88d8265a0c1fe9f21708ae27cabf6a5144f052d 100644
--- a/ppcls/loss/multilabelloss.py
+++ b/ppcls/loss/multilabelloss.py
@@ -3,16 +3,29 @@ import paddle.nn as nn
import paddle.nn.functional as F
+def ratio2weight(targets, ratio):
+ pos_weights = targets * (1. - ratio)
+ neg_weights = (1. - targets) * ratio
+ weights = paddle.exp(neg_weights + pos_weights)
+
+ # for RAP dataloader, targets element may be 2, with or without smooth, some element must great than 1
+ weights = weights - weights * (targets > 1)
+
+ return weights
+
+
class MultiLabelLoss(nn.Layer):
"""
Multi-label loss
"""
- def __init__(self, epsilon=None):
+ def __init__(self, epsilon=None, size_sum=False, weight_ratio=False):
super().__init__()
if epsilon is not None and (epsilon <= 0 or epsilon >= 1):
epsilon = None
self.epsilon = epsilon
+ self.weight_ratio = weight_ratio
+ self.size_sum = size_sum
def _labelsmoothing(self, target, class_num):
if target.ndim == 1 or target.shape[-1] != class_num:
@@ -24,13 +37,21 @@ class MultiLabelLoss(nn.Layer):
return soft_target
def _binary_crossentropy(self, input, target, class_num):
+ if self.weight_ratio:
+ target, label_ratio = target[:, 0, :], target[:, 1, :]
if self.epsilon is not None:
target = self._labelsmoothing(target, class_num)
- cost = F.binary_cross_entropy_with_logits(
- logit=input, label=target)
- else:
- cost = F.binary_cross_entropy_with_logits(
- logit=input, label=target)
+ cost = F.binary_cross_entropy_with_logits(
+ logit=input, label=target, reduction='none')
+
+ if self.weight_ratio:
+ targets_mask = paddle.cast(target > 0.5, 'float32')
+ weight = ratio2weight(targets_mask, paddle.to_tensor(label_ratio))
+ weight = weight * (target > -1)
+ cost = cost * weight
+
+ if self.size_sum:
+ cost = cost.sum(1).mean() if self.size_sum else cost.mean()
return cost
diff --git a/ppcls/metric/__init__.py b/ppcls/metric/__init__.py
index 94721235bca5ab4c27ddba36dd265a01cea003ad..1f49cc2d9c4e8a70287b416447c0d1d98a582113 100644
--- a/ppcls/metric/__init__.py
+++ b/ppcls/metric/__init__.py
@@ -12,17 +12,19 @@
#See the License for the specific language governing permissions and
#limitations under the License.
-from paddle import nn
import copy
from collections import OrderedDict
+from .avg_metrics import AvgMetrics
from .metrics import TopkAcc, mAP, mINP, Recallk, Precisionk
from .metrics import DistillationTopkAcc
from .metrics import GoogLeNetTopkAcc
from .metrics import HammingDistance, AccuracyScore
+from .metrics import ATTRMetric
+from .metrics import TprAtFpr
-class CombinedMetrics(nn.Layer):
+class CombinedMetrics(AvgMetrics):
def __init__(self, config_list):
super().__init__()
self.metric_func_list = []
@@ -38,13 +40,30 @@ class CombinedMetrics(nn.Layer):
eval(metric_name)(**metric_params))
else:
self.metric_func_list.append(eval(metric_name)())
+ self.reset()
- def __call__(self, *args, **kwargs):
+ def forward(self, *args, **kwargs):
metric_dict = OrderedDict()
for idx, metric_func in enumerate(self.metric_func_list):
metric_dict.update(metric_func(*args, **kwargs))
return metric_dict
+ @property
+ def avg_info(self):
+ return ", ".join([metric.avg_info for metric in self.metric_func_list])
+
+ @property
+ def avg(self):
+ return self.metric_func_list[0].avg
+
+ def attr_res(self):
+ return self.metric_func_list[0].attrmeter.res()
+
+ def reset(self):
+ for metric in self.metric_func_list:
+ if hasattr(metric, "reset"):
+ metric.reset()
+
def build_metrics(config):
metrics_list = CombinedMetrics(copy.deepcopy(config))
diff --git a/ppcls/metric/avg_metrics.py b/ppcls/metric/avg_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f4b62290b3d03879f8910b197b59b5448cb7247
--- /dev/null
+++ b/ppcls/metric/avg_metrics.py
@@ -0,0 +1,20 @@
+from paddle import nn
+
+
+class AvgMetrics(nn.Layer):
+ def __init__(self):
+ super().__init__()
+ self.avg_meters = {}
+
+ def reset(self):
+ self.avg_meters = {}
+
+ @property
+ def avg(self):
+ if self.avg_meters:
+ for metric_key in self.avg_meters:
+ return self.avg_meters[metric_key].avg
+
+ @property
+ def avg_info(self):
+ return ", ".join([self.avg_meters[key].avg_info for key in self.avg_meters])
diff --git a/ppcls/metric/metrics.py b/ppcls/metric/metrics.py
index 03e742082b57439227746d21695379b498e7f1d8..2161ca86ae51c1c1aa551dd08c1924adc3d9c59b 100644
--- a/ppcls/metric/metrics.py
+++ b/ppcls/metric/metrics.py
@@ -22,14 +22,26 @@ from sklearn.metrics import accuracy_score as accuracy_metric
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.preprocessing import binarize
+from easydict import EasyDict
-class TopkAcc(nn.Layer):
+from ppcls.metric.avg_metrics import AvgMetrics
+from ppcls.utils.misc import AverageMeter, AttrMeter
+
+
+class TopkAcc(AvgMetrics):
def __init__(self, topk=(1, 5)):
super().__init__()
assert isinstance(topk, (int, list, tuple))
if isinstance(topk, int):
topk = [topk]
self.topk = topk
+ self.reset()
+
+ def reset(self):
+ self.avg_meters = {
+ "top{}".format(k): AverageMeter("top{}".format(k))
+ for k in self.topk
+ }
def forward(self, x, label):
if isinstance(x, dict):
@@ -39,19 +51,21 @@ class TopkAcc(nn.Layer):
for k in self.topk:
metric_dict["top{}".format(k)] = paddle.metric.accuracy(
x, label, k=k)
+ self.avg_meters["top{}".format(k)].update(metric_dict["top{}".format(k)], x.shape[0])
return metric_dict
class mAP(nn.Layer):
- def __init__(self):
+ def __init__(self, descending=True):
super().__init__()
+ self.descending = descending
def forward(self, similarities_matrix, query_img_id, gallery_img_id,
keep_mask):
metric_dict = dict()
choosen_indices = paddle.argsort(
- similarities_matrix, axis=1, descending=True)
+ similarities_matrix, axis=1, descending=self.descending)
gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
gallery_labels_transpose = paddle.broadcast_to(
gallery_labels_transpose,
@@ -87,15 +101,16 @@ class mAP(nn.Layer):
class mINP(nn.Layer):
- def __init__(self):
+ def __init__(self, descending=True):
super().__init__()
+ self.descending = descending
def forward(self, similarities_matrix, query_img_id, gallery_img_id,
keep_mask):
metric_dict = dict()
choosen_indices = paddle.argsort(
- similarities_matrix, axis=1, descending=True)
+ similarities_matrix, axis=1, descending=self.descending)
gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
gallery_labels_transpose = paddle.broadcast_to(
gallery_labels_transpose,
@@ -106,7 +121,7 @@ class mINP(nn.Layer):
choosen_indices)
equal_flag = paddle.equal(choosen_label, query_img_id)
if keep_mask is not None:
- keep_mask = paddle.index_sample(
+ keep_mask = paddle.indechmx_sample(
keep_mask.astype('float32'), choosen_indices)
equal_flag = paddle.logical_and(equal_flag,
keep_mask.astype('bool'))
@@ -129,13 +144,69 @@ class mINP(nn.Layer):
return metric_dict
+class TprAtFpr(nn.Layer):
+ def __init__(self, max_fpr=1 / 1000.):
+ super().__init__()
+ self.gt_pos_score_list = []
+ self.gt_neg_score_list = []
+ self.softmax = nn.Softmax(axis=-1)
+ self.max_fpr = max_fpr
+ self.max_tpr = 0.
+
+ def forward(self, x, label):
+ if isinstance(x, dict):
+ x = x["logits"]
+ x = self.softmax(x)
+ for i, label_i in enumerate(label):
+ if label_i[0] == 0:
+ self.gt_neg_score_list.append(x[i][1].numpy())
+ else:
+ self.gt_pos_score_list.append(x[i][1].numpy())
+ return {}
+
+ def reset(self):
+ self.gt_pos_score_list = []
+ self.gt_neg_score_list = []
+ self.max_tpr = 0.
+
+ @property
+ def avg(self):
+ return self.max_tpr
+
+ @property
+ def avg_info(self):
+ max_tpr = 0.
+ result = ""
+ gt_pos_score_list = np.array(self.gt_pos_score_list)
+ gt_neg_score_list = np.array(self.gt_neg_score_list)
+ for i in range(0, 10000):
+ threshold = i / 10000.
+ if len(gt_pos_score_list) == 0:
+ continue
+ tpr = np.sum(
+ gt_pos_score_list > threshold) / len(gt_pos_score_list)
+ if len(gt_neg_score_list) == 0 and tpr > max_tpr:
+ max_tpr = tpr
+ result = "threshold: {}, fpr: {}, tpr: {:.5f}".format(
+ threshold, fpr, tpr)
+ fpr = np.sum(
+ gt_neg_score_list > threshold) / len(gt_neg_score_list)
+ if fpr <= self.max_fpr and tpr > max_tpr:
+ max_tpr = tpr
+ result = "threshold: {}, fpr: {}, tpr: {:.5f}".format(
+ threshold, fpr, tpr)
+ self.max_tpr = max_tpr
+ return result
+
+
class Recallk(nn.Layer):
- def __init__(self, topk=(1, 5)):
+ def __init__(self, topk=(1, 5), descending=True):
super().__init__()
assert isinstance(topk, (int, list, tuple))
if isinstance(topk, int):
topk = [topk]
self.topk = topk
+ self.descending = descending
def forward(self, similarities_matrix, query_img_id, gallery_img_id,
keep_mask):
@@ -143,7 +214,7 @@ class Recallk(nn.Layer):
#get cmc
choosen_indices = paddle.argsort(
- similarities_matrix, axis=1, descending=True)
+ similarities_matrix, axis=1, descending=self.descending)
gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
gallery_labels_transpose = paddle.broadcast_to(
gallery_labels_transpose,
@@ -175,12 +246,13 @@ class Recallk(nn.Layer):
class Precisionk(nn.Layer):
- def __init__(self, topk=(1, 5)):
+ def __init__(self, topk=(1, 5), descending=True):
super().__init__()
assert isinstance(topk, (int, list, tuple))
if isinstance(topk, int):
topk = [topk]
self.topk = topk
+ self.descending = descending
def forward(self, similarities_matrix, query_img_id, gallery_img_id,
keep_mask):
@@ -188,7 +260,7 @@ class Precisionk(nn.Layer):
#get cmc
choosen_indices = paddle.argsort(
- similarities_matrix, axis=1, descending=True)
+ similarities_matrix, axis=1, descending=self.descending)
gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
gallery_labels_transpose = paddle.broadcast_to(
gallery_labels_transpose,
@@ -241,20 +313,17 @@ class GoogLeNetTopkAcc(TopkAcc):
return super().forward(x[0], label)
-class MutiLabelMetric(object):
- def __init__(self):
- pass
-
- def _multi_hot_encode(self, logits, threshold=0.5):
- return binarize(logits, threshold=threshold)
+class MultiLabelMetric(AvgMetrics):
+ def __init__(self, bi_threshold=0.5):
+ super().__init__()
+ self.bi_threshold = bi_threshold
- def __call__(self, output):
- output = F.sigmoid(output)
- preds = self._multi_hot_encode(logits=output.numpy(), threshold=0.5)
- return preds
+ def _multi_hot_encode(self, output):
+ logits = F.sigmoid(output).numpy()
+ return binarize(logits, threshold=self.bi_threshold)
-class HammingDistance(MutiLabelMetric):
+class HammingDistance(MultiLabelMetric):
"""
Soft metric based label for multilabel classification
Returns:
@@ -263,16 +332,22 @@ class HammingDistance(MutiLabelMetric):
def __init__(self):
super().__init__()
+ self.reset()
- def __call__(self, output, target):
- preds = super().__call__(output)
+ def reset(self):
+ self.avg_meters = {"HammingDistance": AverageMeter("HammingDistance")}
+
+ def forward(self, output, target):
+ preds = super()._multi_hot_encode(output)
metric_dict = dict()
metric_dict["HammingDistance"] = paddle.to_tensor(
hamming_loss(target, preds))
+ self.avg_meters["HammingDistance"].update(
+ metric_dict["HammingDistance"].numpy()[0], output.shape[0])
return metric_dict
-class AccuracyScore(MutiLabelMetric):
+class AccuracyScore(MultiLabelMetric):
"""
Hard metric for multilabel classification
Args:
@@ -288,9 +363,13 @@ class AccuracyScore(MutiLabelMetric):
assert base in ["sample", "label"
], 'must be one of ["sample", "label"]'
self.base = base
+ self.reset()
+
+ def reset(self):
+ self.avg_meters = {"AccuracyScore": AverageMeter("AccuracyScore")}
- def __call__(self, output, target):
- preds = super().__call__(output)
+ def forward(self, output, target):
+ preds = super()._multi_hot_encode(output)
metric_dict = dict()
if self.base == "sample":
accuracy = accuracy_metric(target, preds)
@@ -303,4 +382,67 @@ class AccuracyScore(MutiLabelMetric):
accuracy = (sum(tps) + sum(tns)) / (
sum(tps) + sum(tns) + sum(fns) + sum(fps))
metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy)
+ self.avg_meters["AccuracyScore"].update(
+ metric_dict["AccuracyScore"].numpy()[0], output.shape[0])
+ return metric_dict
+
+
+def get_attr_metrics(gt_label, preds_probs, threshold):
+ """
+ index: evaluated label index
+ adapted from "https://github.com/valencebond/Rethinking_of_PAR/blob/master/metrics/pedestrian_metrics.py"
+ """
+ pred_label = (preds_probs > threshold).astype(int)
+
+ eps = 1e-20
+ result = EasyDict()
+
+ has_fuyi = gt_label == -1
+ pred_label[has_fuyi] = -1
+
+ ###############################
+ # label metrics
+ # TP + FN
+ result.gt_pos = np.sum((gt_label == 1), axis=0).astype(float)
+ # TN + FP
+ result.gt_neg = np.sum((gt_label == 0), axis=0).astype(float)
+ # TP
+ result.true_pos = np.sum((gt_label == 1) * (pred_label == 1),
+ axis=0).astype(float)
+ # TN
+ result.true_neg = np.sum((gt_label == 0) * (pred_label == 0),
+ axis=0).astype(float)
+ # FP
+ result.false_pos = np.sum(((gt_label == 0) * (pred_label == 1)),
+ axis=0).astype(float)
+ # FN
+ result.false_neg = np.sum(((gt_label == 1) * (pred_label == 0)),
+ axis=0).astype(float)
+
+ ################
+ # instance metrics
+ result.gt_pos_ins = np.sum((gt_label == 1), axis=1).astype(float)
+ result.true_pos_ins = np.sum((pred_label == 1), axis=1).astype(float)
+ # true positive
+ result.intersect_pos = np.sum((gt_label == 1) * (pred_label == 1),
+ axis=1).astype(float)
+ # IOU
+ result.union_pos = np.sum(((gt_label == 1) + (pred_label == 1)),
+ axis=1).astype(float)
+
+ return result
+
+
+class ATTRMetric(nn.Layer):
+ def __init__(self, threshold=0.5):
+ super().__init__()
+ self.threshold = threshold
+
+ def reset(self):
+ self.attrmeter = AttrMeter(threshold=0.5)
+
+ def forward(self, output, target):
+ metric_dict = get_attr_metrics(target[:, 0, :].numpy(),
+ output.numpy(), self.threshold)
+ self.attrmeter.update(metric_dict)
return metric_dict
diff --git a/ppcls/static/program.py b/ppcls/static/program.py
index 29107c9c1c1d8f571f0f8cf1cf0b7357ae3100ea..7f2313a58f45bcf05de3c8c92fd205eeabcb4c3e 100644
--- a/ppcls/static/program.py
+++ b/ppcls/static/program.py
@@ -439,8 +439,7 @@ def run(dataloader,
logger.info("END {:s} {:s} {:s}".format(mode, end_str, ips_info))
else:
end_epoch_str = "END epoch:{:<3d}".format(epoch)
- logger.info("{:s} {:s} {:s} {:s}".format(end_epoch_str, mode, end_str,
- ips_info))
+ logger.info("{:s} {:s} {:s}".format(end_epoch_str, mode, end_str))
if use_dali:
dataloader.reset()
diff --git a/ppcls/utils/cls_demo/person_label_list.txt b/ppcls/utils/cls_demo/person_label_list.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8eea2b6dc2433abf303a0ea508021698559b749b
--- /dev/null
+++ b/ppcls/utils/cls_demo/person_label_list.txt
@@ -0,0 +1,2 @@
+0 nobody
+1 someone
diff --git a/ppcls/utils/misc.py b/ppcls/utils/misc.py
index 08ab7b6f77cb85b0a822713ee7d573d561762d14..8015552437998264322661518ba3ce40c7cd7db5 100644
--- a/ppcls/utils/misc.py
+++ b/ppcls/utils/misc.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import paddle
+
__all__ = ['AverageMeter']
@@ -42,6 +44,12 @@ class AverageMeter(object):
self.count += n
self.avg = self.sum / self.count
+ @property
+ def avg_info(self):
+ if isinstance(self.avg, paddle.Tensor):
+ self.avg = self.avg.numpy()[0]
+ return "{}: {:.5f}".format(self.name, self.avg)
+
@property
def total(self):
return '{self.name}_sum: {self.sum:{self.fmt}}{self.postfix}'.format(
@@ -61,3 +69,87 @@ class AverageMeter(object):
def value(self):
return '{self.name}: {self.val:{self.fmt}}{self.postfix}'.format(
self=self)
+
+
+class AttrMeter(object):
+ """
+ Computes and stores the average and current value
+ Code was based on https://github.com/pytorch/examples/blob/master/imagenet/main.py
+ """
+
+ def __init__(self, threshold=0.5):
+ self.threshold = threshold
+ self.reset()
+
+ def reset(self):
+ self.gt_pos = 0
+ self.gt_neg = 0
+ self.true_pos = 0
+ self.true_neg = 0
+ self.false_pos = 0
+ self.false_neg = 0
+
+ self.gt_pos_ins = []
+ self.true_pos_ins = []
+ self.intersect_pos = []
+ self.union_pos = []
+
+ def update(self, metric_dict):
+ self.gt_pos += metric_dict['gt_pos']
+ self.gt_neg += metric_dict['gt_neg']
+ self.true_pos += metric_dict['true_pos']
+ self.true_neg += metric_dict['true_neg']
+ self.false_pos += metric_dict['false_pos']
+ self.false_neg += metric_dict['false_neg']
+
+ self.gt_pos_ins += metric_dict['gt_pos_ins'].tolist()
+ self.true_pos_ins += metric_dict['true_pos_ins'].tolist()
+ self.intersect_pos += metric_dict['intersect_pos'].tolist()
+ self.union_pos += metric_dict['union_pos'].tolist()
+
+ def res(self):
+ import numpy as np
+ eps = 1e-20
+ label_pos_recall = 1.0 * self.true_pos / (
+ self.gt_pos + eps) # true positive
+ label_neg_recall = 1.0 * self.true_neg / (
+ self.gt_neg + eps) # true negative
+ # mean accuracy
+ label_ma = (label_pos_recall + label_neg_recall) / 2
+
+ label_pos_recall = np.mean(label_pos_recall)
+ label_neg_recall = np.mean(label_neg_recall)
+ label_prec = (self.true_pos / (self.true_pos + self.false_pos + eps))
+ label_acc = (self.true_pos /
+ (self.true_pos + self.false_pos + self.false_neg + eps))
+ label_f1 = np.mean(2 * label_prec * label_pos_recall /
+ (label_prec + label_pos_recall + eps))
+
+ ma = (np.mean(label_ma))
+
+ self.gt_pos_ins = np.array(self.gt_pos_ins)
+ self.true_pos_ins = np.array(self.true_pos_ins)
+ self.intersect_pos = np.array(self.intersect_pos)
+ self.union_pos = np.array(self.union_pos)
+ instance_acc = self.intersect_pos / (self.union_pos + eps)
+ instance_prec = self.intersect_pos / (self.true_pos_ins + eps)
+ instance_recall = self.intersect_pos / (self.gt_pos_ins + eps)
+ instance_f1 = 2 * instance_prec * instance_recall / (
+ instance_prec + instance_recall + eps)
+
+ instance_acc = np.mean(instance_acc)
+ instance_prec = np.mean(instance_prec)
+ instance_recall = np.mean(instance_recall)
+ instance_f1 = 2 * instance_prec * instance_recall / (
+ instance_prec + instance_recall + eps)
+
+ instance_acc = np.mean(instance_acc)
+ instance_prec = np.mean(instance_prec)
+ instance_recall = np.mean(instance_recall)
+ instance_f1 = np.mean(instance_f1)
+
+ res = [
+ ma, label_f1, label_pos_recall, label_neg_recall, instance_f1,
+ instance_acc, instance_prec, instance_recall
+ ]
+ return res
diff --git a/ppcls/utils/save_load.py b/ppcls/utils/save_load.py
index 4e27f12c1d4830f2f16580bfa976cf3ace78d934..04486cc273bbfe9e3d9863b4c4ded6a8d283eee3 100644
--- a/ppcls/utils/save_load.py
+++ b/ppcls/utils/save_load.py
@@ -42,6 +42,14 @@ def _mkdir_if_not_exist(path):
raise OSError('Failed to mkdir {}'.format(path))
+def _extract_student_weights(all_params, student_prefix="Student."):
+ s_params = {
+ key[len(student_prefix):]: all_params[key]
+ for key in all_params if student_prefix in key
+ }
+ return s_params
+
+
def load_dygraph_pretrain(model, path=None):
if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):
raise ValueError("Model pretrain path {}.pdparams does not "
@@ -105,7 +113,8 @@ def init_model(config, net, optimizer=None, loss: paddle.nn.Layer=None):
net.set_state_dict(para_dict)
loss.set_state_dict(para_dict)
for i in range(len(optimizer)):
- optimizer[i].set_state_dict(opti_dict)
+ optimizer[i].set_state_dict(opti_dict[i] if isinstance(
+ opti_dict, list) else opti_dict)
logger.info("Finish load checkpoints from {}".format(checkpoints))
return metric_dict
@@ -117,7 +126,7 @@ def init_model(config, net, optimizer=None, loss: paddle.nn.Layer=None):
else: # common load
load_dygraph_pretrain(net, path=pretrained_model)
logger.info("Finish load pretrained model from {}".format(
- pretrained_model))
+ pretrained_model))
def save_model(net,
@@ -126,7 +135,8 @@ def save_model(net,
model_path,
model_name="",
prefix='ppcls',
- loss: paddle.nn.Layer=None):
+ loss: paddle.nn.Layer=None,
+ save_student_model=False):
"""
save model to the target path
"""
@@ -137,11 +147,18 @@ def save_model(net,
model_path = os.path.join(model_path, prefix)
params_state_dict = net.state_dict()
- loss_state_dict = loss.state_dict()
- keys_inter = set(params_state_dict.keys()) & set(loss_state_dict.keys())
- assert len(keys_inter) == 0, \
- f"keys in model and loss state_dict must be unique, but got intersection {keys_inter}"
- params_state_dict.update(loss_state_dict)
+ if loss is not None:
+ loss_state_dict = loss.state_dict()
+ keys_inter = set(params_state_dict.keys()) & set(loss_state_dict.keys(
+ ))
+ assert len(keys_inter) == 0, \
+ f"keys in model and loss state_dict must be unique, but got intersection {keys_inter}"
+ params_state_dict.update(loss_state_dict)
+
+ if save_student_model:
+ s_params = _extract_student_weights(params_state_dict)
+ if len(s_params) > 0:
+ paddle.save(s_params, model_path + "_student.pdparams")
paddle.save(params_state_dict, model_path + ".pdparams")
paddle.save([opt.state_dict() for opt in optimizer], model_path + ".pdopt")
diff --git a/requirements.txt b/requirements.txt
index 79f548c2232dc0af5e77390afd23bfb938a2b103..4787aa84805e84c26a1030f773fbd89826e1aa56 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,8 +4,9 @@ opencv-python==4.4.0.46
pillow
tqdm
PyYAML
-visualdl >= 2.2.0
+visualdl>=2.2.0
scipy
-scikit-learn==0.23.2
+scikit-learn>=0.21.0
gast==0.3.3
faiss-cpu==1.7.1.post2
+easydict
diff --git a/test_tipc/config/PPHGNet/PPHGNet_small_train_infer_python.txt b/test_tipc/config/PPHGNet/PPHGNet_small_train_infer_python.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e787bb0521500ac257a94ed30e892eb4a016a738
--- /dev/null
+++ b/test_tipc/config/PPHGNet/PPHGNet_small_train_infer_python.txt
@@ -0,0 +1,53 @@
+===========================train_params===========================
+model_name:PPHGNet_small
+python:python3.7
+gpu_list:0|0,1
+-o Global.device:gpu
+-o Global.auto_cast:null
+-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120
+-o Global.output_dir:./output/
+-o DataLoader.Train.sampler.batch_size:8
+-o Global.pretrained_model:null
+train_model_name:latest
+train_infer_img_dir:./dataset/ILSVRC2012/val
+null:null
+##
+trainer:norm_train
+norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False
+pact_train:null
+fpgm_train:null
+distill_train:null
+null:null
+null:null
+##
+===========================eval_params===========================
+eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml
+null:null
+##
+===========================infer_params==========================
+-o Global.save_inference_dir:./inference
+-o Global.pretrained_model:
+norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml
+quant_export:null
+fpgm_export:null
+distill_export:null
+kl_quant:null
+export2:null
+pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams
+infer_model:../inference/
+infer_export:True
+infer_quant:Fasle
+inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236
+-o Global.use_gpu:True|False
+-o Global.enable_mkldnn:True|False
+-o Global.cpu_num_threads:1|6
+-o Global.batch_size:1|16
+-o Global.use_tensorrt:True|False
+-o Global.use_fp16:True|False
+-o Global.inference_model_dir:../inference
+-o Global.infer_imgs:../dataset/ILSVRC2012/val
+-o Global.save_log_path:null
+-o Global.benchmark:True
+null:null
+===========================infer_benchmark_params==========================
+random_infer_input:[{float32,[3,224,224]}]
diff --git a/test_tipc/config/PPHGNet/PPHGNet_tiny_train_infer_python.txt b/test_tipc/config/PPHGNet/PPHGNet_tiny_train_infer_python.txt
new file mode 100644
index 0000000000000000000000000000000000000000..546b9fa1ef5de70730e9e4a6425c23bf729ef017
--- /dev/null
+++ b/test_tipc/config/PPHGNet/PPHGNet_tiny_train_infer_python.txt
@@ -0,0 +1,53 @@
+===========================train_params===========================
+model_name:PPHGNet_tiny
+python:python3.7
+gpu_list:0|0,1
+-o Global.device:gpu
+-o Global.auto_cast:null
+-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120
+-o Global.output_dir:./output/
+-o DataLoader.Train.sampler.batch_size:8
+-o Global.pretrained_model:null
+train_model_name:latest
+train_infer_img_dir:./dataset/ILSVRC2012/val
+null:null
+##
+trainer:norm_train
+norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False
+pact_train:null
+fpgm_train:null
+distill_train:null
+null:null
+null:null
+##
+===========================eval_params===========================
+eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml
+null:null
+##
+===========================infer_params==========================
+-o Global.save_inference_dir:./inference
+-o Global.pretrained_model:
+norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml
+quant_export:null
+fpgm_export:null
+distill_export:null
+kl_quant:null
+export2:null
+pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams
+infer_model:../inference/
+infer_export:True
+infer_quant:Fasle
+inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=232
+-o Global.use_gpu:True|False
+-o Global.enable_mkldnn:True|False
+-o Global.cpu_num_threads:1|6
+-o Global.batch_size:1|16
+-o Global.use_tensorrt:True|False
+-o Global.use_fp16:True|False
+-o Global.inference_model_dir:../inference
+-o Global.infer_imgs:../dataset/ILSVRC2012/val
+-o Global.save_log_path:null
+-o Global.benchmark:True
+null:null
+===========================infer_benchmark_params==========================
+random_infer_input:[{float32,[3,224,224]}]
diff --git a/tools/run.sh b/tools/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..accf628f4bdc87142279e774abfa25634f1e243a
--- /dev/null
+++ b/tools/run.sh
@@ -0,0 +1,302 @@
+#!/usr/bin/env bash
+GPU_IDS="0,1,2,3"
+
+# Basic Config
+CONFIG="ppcls/configs/cls_demo/person/PPLCNet/PPLCNet_x1_0.yaml"
+EPOCHS=1
+OUTPUT="output_debug4"
+STATUS_LOG="${OUTPUT}/status_result.log"
+RESULT="${OUTPUT}/result.log"
+
+
+# Search Options
+LR_LIST=( 0.0075 0.01 0.0125 )
+RESOLUTION_LIST=( 176 192 224 )
+RA_PROB_LIST=( 0.0 0.1 0.5 )
+RE_PROB_LIST=( 0.0 0.1 0.5 )
+LR_MULT_LIST=( [0.0,0.2,0.4,0.6,0.8,1.0] [0.0,0.4,0.4,0.8,0.8,1.0] )
+TEACHER_LIST=( "ResNet101_vd" "ResNet50_vd" )
+
+
+# Train Mode
+declare -A MODE_MAP
+MODE_MAP=(["search_lr"]=1 ["search_resolution"]=1 ["search_ra_prob"]=1 ["search_re_prob"]=1 ["search_lr_mult_list"]=1 ["search_teacher"]=1 ["train_distillation_model"]=1)
+
+export CUDA_VISIBLE_DEVICES=${GPU_IDS}
+
+
+function status_check(){
+ last_status=$1 # the exit code
+ run_command=$2
+ run_log=$3
+ if [ $last_status -eq 0 ]; then
+ echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log}
+ else
+ echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log}
+ fi
+}
+
+
+function get_max_value(){
+ array=($*)
+ max=${array[0]}
+ index=0
+ for (( i=0; i<${#array[*]-1}; i++ )); do
+ if [[ $(echo "${array[$i]} > $max"|bc) -eq 1 ]]; then
+ max=${array[$i]}
+ index=${i}
+ else
+ continue
+ fi
+ done
+ echo ${max}
+ echo ${index}
+}
+
+function get_best_info(){
+ _parameter=$1
+ params_index=2
+ if [[ ${_parameter} == "TEACHER" ]]; then
+ params_index=3
+ fi
+ parameters_list=$(find ${OUTPUT}/${_parameter}* -name train.log | awk -v params_index=${params_index} -F "/" '{print $params_index}')
+ metric_list=$(find ${OUTPUT}/${_parameter}* -name train.log | xargs cat | grep "best" | grep "Epoch ${EPOCHS}" | awk -F " " '{print substr($NF,0,7)}')
+ best_info=$(get_max_value ${metric_list[*]})
+ best_metric=$(echo $best_info | awk -F " " '{print $1}')
+ best_index=$(echo $best_info | awk -F " " '{print $2}')
+ best_parameter=$(echo $parameters_list | awk -v best=$(($best_index+1)) '{print $best}' | awk -F "_" '{print $2}')
+ echo ${best_metric}
+ echo ${best_parameter}
+}
+
+
+function search_lr(){
+ for lr in ${LR_LIST[*]}; do
+ cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
+ -c ${CONFIG} \
+ -o Global.output_dir=${OUTPUT}/LR_${lr} \
+ -o Optimizer.lr.learning_rate=${lr} \
+ -o Global.epochs=${EPOCHS}"
+ eval ${cmd_train}
+ status_check $? "${cmd_train}" "${STATUS_LOG}"
+ cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
+ eval ${cmd}
+ done
+}
+
+
+function search_resolution(){
+ _lr=$1
+ for resolution in ${RESOLUTION_LIST[*]}; do
+ cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
+ -c ${CONFIG} \
+ -o Global.output_dir=${OUTPUT}/RESOLUTION_${resolution} \
+ -o Optimizer.lr.learning_rate=${_lr} \
+ -o Global.epochs=${EPOCHS} \
+ -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${resolution}"
+ eval ${cmd_train}
+ status_check $? "${cmd_train}" "${STATUS_LOG}"
+ cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
+ eval ${cmd}
+ done
+}
+
+
+
+function search_ra_prob(){
+ _lr=$1
+ _resolution=$2
+ for ra_prob in ${RA_PROB_LIST[*]}; do
+ cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
+ -c ${CONFIG} \
+ -o Global.output_dir=${OUTPUT}/RA_${ra_prob} \
+ -o Optimizer.lr.learning_rate=${_lr} \
+ -o Global.epochs=${EPOCHS} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${ra_prob} \
+ -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution}"
+ eval ${cmd_train}
+ status_check $? "${cmd_train}" "${STATUS_LOG}"
+ cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
+ eval ${cmd}
+ done
+}
+
+
+
+function search_re_prob(){
+ _lr=$1
+ _resolution=$2
+ _ra_prob=$3
+ for re_prob in ${RE_PROB_LIST[*]}; do
+ cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
+ -c ${CONFIG} \
+ -o Global.output_dir=${OUTPUT}/RE_${re_prob} \
+ -o Optimizer.lr.learning_rate=${_lr} \
+ -o Global.epochs=${EPOCHS} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \
+ -o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${re_prob} \
+ -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution}"
+ eval ${cmd_train}
+ status_check $? "${cmd_train}" "${STATUS_LOG}"
+ cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
+ eval ${cmd}
+ done
+}
+
+
+function search_lr_mult_list(){
+ _lr=$1
+ _resolution=$2
+ _ra_prob=$3
+ _re_prob=$4
+
+ for lr_mult in ${LR_MULT_LIST[*]}; do
+ cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
+ -c ${CONFIG} \
+ -o Global.output_dir=${OUTPUT}/LR_MULT_${lr_mult} \
+ -o Optimizer.lr.learning_rate=${_lr} \
+ -o Global.epochs=${EPOCHS} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \
+ -o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${_re_prob} \
+ -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution} \
+ -o Arch.lr_mult_list=${lr_mult}"
+ eval ${cmd_train}
+ status_check $? "${cmd_train}" "${STATUS_LOG}"
+ cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
+ eval ${cmd}
+ done
+
+}
+
+
+function search_teacher(){
+ _lr=$1
+ _resolution=$2
+ _ra_prob=$3
+ _re_prob=$4
+
+ for teacher in ${TEACHER_LIST[*]}; do
+ cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \
+ -c ${CONFIG} \
+ -o Global.output_dir=${OUTPUT}/TEACHER_${teacher} \
+ -o Optimizer.lr.learning_rate=${_lr} \
+ -o Global.epochs=${EPOCHS} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \
+ -o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${_re_prob} \
+ -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution} \
+ -o Arch.name=${teacher}"
+ eval ${cmd_train}
+ status_check $? "${cmd_train}" "${STATUS_LOG}"
+ cmd="find ${OUTPUT}/* -name epoch* | xargs rm -rf"
+ eval ${cmd}
+ done
+}
+
+
+# train the model for knowledge distillation
+function train_distillation_model(){
+ _lr=$1
+ _resolution=$2
+ _ra_prob=$3
+ _re_prob=$4
+ _lr_mult=$5
+ teacher=$6
+ t_pretrained_model="${OUTPUT}/TEACHER_${teacher}/${teacher}/best_model"
+ config="ppcls/configs/cls_demo/person/Distillation/PPLCNet_x1_0_distillation.yaml"
+ combined_label_list="./dataset/person/train_list_for_distill.txt"
+
+ cmd_train="python3.7 -m paddle.distributed.launch \
+ --gpus=${GPU_IDS} \
+ tools/train.py -c ${config} \
+ -o Global.output_dir=${OUTPUT}/kd_teacher \
+ -o Optimizer.lr.learning_rate=${_lr} \
+ -o Global.epochs=${EPOCHS} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \
+ -o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${_re_prob} \
+ -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \
+ -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution} \
+ -o DataLoader.Train.dataset.cls_label_path=${combined_label_list} \
+ -o Arch.models.0.Teacher.name="${teacher}" \
+ -o Arch.models.0.Teacher.pretrained="${t_pretrained_model}" \
+ -o Arch.models.1.Student.lr_mult_list=${_lr_mult}"
+ eval ${cmd_train}
+ status_check $? "${cmd_train}" "${STATUS_LOG}"
+ cmd="find ${OUTPUT} -name epoch* | xargs rm -rf"
+ eval ${cmd}
+}
+
+######## Train PaddleClas ########
+rm -rf ${OUTPUT}
+
+# Train and get best lr
+best_lr=0.01
+if [[ ${MODE_MAP["search_lr"]} -eq 1 ]]; then
+ search_lr
+ best_info=$(get_best_info "LR_[0-9]")
+ best_metric=$(echo $best_info | awk -F " " '{print $1}')
+ best_lr=$(echo $best_info | awk -F " " '{print $2}')
+ echo "The best lr is ${best_lr}, and the best metric is ${best_metric}" >> ${RESULT}
+fi
+
+# Train and get best resolution
+best_resolution=192
+if [[ ${MODE_MAP["search_resolution"]} -eq 1 ]]; then
+ search_resolution "${best_lr}"
+ best_info=$(get_best_info "RESOLUTION")
+ best_metric=$(echo $best_info | awk -F " " '{print $1}')
+ best_resolution=$(echo $best_info | awk -F " " '{print $2}')
+ echo "The best resolution is ${best_resolution}, and the best metric is ${best_metric}" >> ${RESULT}
+fi
+
+# Train and get best ra_prob
+best_ra_prob=0.0
+if [[ ${MODE_MAP["search_ra_prob"]} -eq 1 ]]; then
+ search_ra_prob "${best_lr}" "${best_resolution}"
+ best_info=$(get_best_info "RA")
+ best_metric=$(echo $best_info | awk -F " " '{print $1}')
+ best_ra_prob=$(echo $best_info | awk -F " " '{print $2}')
+ echo "The best ra_prob is ${best_ra_prob}, and the best metric is ${best_metric}" >> ${RESULT}
+fi
+
+# Train and get best re_prob
+best_re_prob=0.1
+if [[ ${MODE_MAP["search_re_prob"]} -eq 1 ]]; then
+ search_re_prob "${best_lr}" "${best_resolution}" "${best_ra_prob}"
+ best_info=$(get_best_info "RE")
+ best_metric=$(echo $best_info | awk -F " " '{print $1}')
+ best_re_prob=$(echo $best_info | awk -F " " '{print $2}')
+ echo "The best re_prob is ${best_re_prob}, and the best metric is ${best_metric}" >> ${RESULT}
+fi
+
+# Train and get best lr_mult_list
+best_lr_mult_list=[1.0,1.0,1.0,1.0,1.0,1.0]
+if [[ ${MODE_MAP["search_lr_mult_list"]} -eq 1 ]]; then
+ search_lr_mult_list "${best_lr}" "${best_resolution}" "${best_ra_prob}" "${best_re_prob}"
+ best_info=$(get_best_info "LR_MULT")
+ best_metric=$(echo $best_info | awk -F " " '{print $1}')
+ best_lr_mult_list=$(echo $best_info | awk -F " " '{print $2}')
+ echo "The best lr_mult_list is ${best_lr_mult_list}, and the best metric is ${best_metric}" >> ${RESULT}
+fi
+
+# train and get best teacher
+best_teacher="ResNet101_vd"
+if [[ ${MODE_MAP["search_teacher"]} -eq 1 ]]; then
+ search_teacher "${best_lr}" "${best_resolution}" "${best_ra_prob}" "${best_re_prob}"
+ best_info=$(get_best_info "TEACHER")
+ best_metric=$(echo $best_info | awk -F " " '{print $1}')
+ best_teacher=$(echo $best_info | awk -F " " '{print $2}')
+ echo "The best teacher is ${best_teacher}, and the best metric is ${best_metric}" >> ${RESULT}
+fi
+
+# train the distillation model
+if [[ ${MODE_MAP["train_distillation_model"]} -eq 1 ]]; then
+ train_distillation_model "${best_lr}" "${best_resolution}" "${best_ra_prob}" "${best_re_prob}" "${best_lr_mult_list}" ${best_teacher}
+ best_info=$(get_best_info "kd_teacher/DistillationModel")
+ best_metric=$(echo $best_info | awk -F " " '{print $1}')
+ echo "the distillation best metric is ${best_metric}, it is global best metric!" >> ${RESULT}
+fi
+
diff --git a/tools/search_strategy.py b/tools/search_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..15f4aa71be67bbd0f5ec92d240bbc53896684d91
--- /dev/null
+++ b/tools/search_strategy.py
@@ -0,0 +1,112 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import os
+import sys
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+import subprocess
+import numpy as np
+
+from ppcls.utils import config
+
+
+def get_result(log_dir):
+ log_file = "{}/train.log".format(log_dir)
+ with open(log_file, "r") as f:
+ raw = f.read()
+ res = float(raw.split("best metric: ")[-1].split("]")[0])
+ return res
+
+
+def search_train(search_list, base_program, base_output_dir, search_key,
+ config_replace_value, model_name, search_times=1):
+ best_res = 0.
+ best = search_list[0]
+ all_result = {}
+ for search_i in search_list:
+ program = base_program.copy()
+ for v in config_replace_value:
+ program += ["-o", "{}={}".format(v, search_i)]
+ if v == "Arch.name":
+ model_name = search_i
+ res_list = []
+ for j in range(search_times):
+ output_dir = "{}/{}_{}_{}".format(base_output_dir, search_key, search_i, j).replace(".", "_")
+ program += ["-o", "Global.output_dir={}".format(output_dir)]
+ process = subprocess.Popen(program)
+ process.communicate()
+ res = get_result("{}/{}".format(output_dir, model_name))
+ res_list.append(res)
+ all_result[str(search_i)] = res_list
+
+ if np.mean(res_list) > best_res:
+ best = search_i
+ best_res = np.mean(res_list)
+ all_result["best"] = best
+ return all_result
+
+
+def search_strategy():
+ args = config.parse_args()
+ configs = config.get_config(args.config, overrides=args.override, show=False)
+ base_config_file = configs["base_config_file"]
+ distill_config_file = configs["distill_config_file"]
+ model_name = config.get_config(base_config_file)["Arch"]["name"]
+ gpus = configs["gpus"]
+ gpus = ",".join([str(i) for i in gpus])
+ base_program = ["python3.7", "-m", "paddle.distributed.launch", "--gpus={}".format(gpus),
+ "tools/train.py", "-c", base_config_file]
+ base_output_dir = configs["output_dir"]
+ search_times = configs["search_times"]
+ search_dict = configs.get("search_dict")
+ all_results = {}
+ for search_i in search_dict:
+ search_key = search_i["search_key"]
+ search_values = search_i["search_values"]
+ replace_config = search_i["replace_config"]
+ res = search_train(search_values, base_program, base_output_dir,
+ search_key, replace_config, model_name, search_times)
+ all_results[search_key] = res
+ best = res.get("best")
+ for v in replace_config:
+ base_program += ["-o", "{}={}".format(v, best)]
+
+ teacher_configs = configs.get("teacher", None)
+ if teacher_configs is not None:
+ teacher_program = base_program.copy()
+ # remove incompatible keys
+ teacher_rm_keys = teacher_configs["rm_keys"]
+ rm_indices = []
+ for rm_k in teacher_rm_keys:
+ for ind, ki in enumerate(base_program):
+ if rm_k in ki:
+ rm_indices.append(ind)
+ for rm_index in rm_indices[::-1]:
+ teacher_program.pop(rm_index)
+ teacher_program.pop(rm_index-1)
+ replace_config = ["Arch.name"]
+ teacher_list = teacher_configs["search_values"]
+ res = search_train(teacher_list, teacher_program, base_output_dir, "teacher", replace_config, model_name)
+ all_results["teacher"] = res
+ best = res.get("best")
+ t_pretrained = "{}/{}_{}_0/{}/best_model".format(base_output_dir, "teacher", best, best)
+ base_program += ["-o", "Arch.models.0.Teacher.name={}".format(best),
+ "-o", "Arch.models.0.Teacher.pretrained={}".format(t_pretrained)]
+ output_dir = "{}/search_res".format(base_output_dir)
+ base_program += ["-o", "Global.output_dir={}".format(output_dir)]
+ final_replace = configs.get('final_replace')
+ for i in range(len(base_program)):
+ base_program[i] = base_program[i].replace(base_config_file, distill_config_file)
+ for k in final_replace:
+ v = final_replace[k]
+ base_program[i] = base_program[i].replace(k, v)
+
+ process = subprocess.Popen(base_program)
+ process.communicate()
+ print(all_results, base_program)
+
+
+if __name__ == '__main__':
+ search_strategy()