diff --git a/.gitmodules b/.gitmodules
index 464b36ae3542d12aee39d1a421350fcbf80912f9..3986f972f89287a4ac41852170d2d8a240fa3c9f 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -13,3 +13,7 @@
[submodule "PaddleSpeech/DeepSpeech"]
path = PaddleSpeech/DeepSpeech
url = https://github.com/PaddlePaddle/DeepSpeech.git
+[submodule "PaddleCV/PaddleDetection"]
+ path = PaddleCV/PaddleDetection
+ url = https://github.com/PaddlePaddle/PaddleDetection.git
+ branch = release/0.2
diff --git a/PaddleCV/Paddle3D/PointNet++/.gitignore b/PaddleCV/3d_vision/PointNet++/.gitignore
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/.gitignore
rename to PaddleCV/3d_vision/PointNet++/.gitignore
diff --git a/PaddleCV/Paddle3D/PointNet++/README.md b/PaddleCV/3d_vision/PointNet++/README.md
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/README.md
rename to PaddleCV/3d_vision/PointNet++/README.md
diff --git a/PaddleCV/Paddle3D/PointNet++/README_en.md b/PaddleCV/3d_vision/PointNet++/README_en.md
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/README_en.md
rename to PaddleCV/3d_vision/PointNet++/README_en.md
diff --git a/PaddleCV/Paddle3D/PointNet++/data/__init__.py b/PaddleCV/3d_vision/PointNet++/data/__init__.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/data/__init__.py
rename to PaddleCV/3d_vision/PointNet++/data/__init__.py
diff --git a/PaddleCV/Paddle3D/PointNet++/data/data_utils.py b/PaddleCV/3d_vision/PointNet++/data/data_utils.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/data/data_utils.py
rename to PaddleCV/3d_vision/PointNet++/data/data_utils.py
diff --git a/PaddleCV/Paddle3D/PointNet++/data/indoor3d_reader.py b/PaddleCV/3d_vision/PointNet++/data/indoor3d_reader.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/data/indoor3d_reader.py
rename to PaddleCV/3d_vision/PointNet++/data/indoor3d_reader.py
diff --git a/PaddleCV/Paddle3D/PointNet++/data/modelnet40_reader.py b/PaddleCV/3d_vision/PointNet++/data/modelnet40_reader.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/data/modelnet40_reader.py
rename to PaddleCV/3d_vision/PointNet++/data/modelnet40_reader.py
diff --git a/PaddleCV/Paddle3D/PointNet++/dataset/Indoor3DSemSeg/download.sh b/PaddleCV/3d_vision/PointNet++/dataset/Indoor3DSemSeg/download.sh
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/dataset/Indoor3DSemSeg/download.sh
rename to PaddleCV/3d_vision/PointNet++/dataset/Indoor3DSemSeg/download.sh
diff --git a/PaddleCV/Paddle3D/PointNet++/dataset/ModelNet40/download.sh b/PaddleCV/3d_vision/PointNet++/dataset/ModelNet40/download.sh
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/dataset/ModelNet40/download.sh
rename to PaddleCV/3d_vision/PointNet++/dataset/ModelNet40/download.sh
diff --git a/PaddleCV/Paddle3D/PointNet++/eval_cls.py b/PaddleCV/3d_vision/PointNet++/eval_cls.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/eval_cls.py
rename to PaddleCV/3d_vision/PointNet++/eval_cls.py
diff --git a/PaddleCV/Paddle3D/PointNet++/eval_seg.py b/PaddleCV/3d_vision/PointNet++/eval_seg.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/eval_seg.py
rename to PaddleCV/3d_vision/PointNet++/eval_seg.py
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/README.md b/PaddleCV/3d_vision/PointNet++/ext_op/README.md
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/README.md
rename to PaddleCV/3d_vision/PointNet++/ext_op/README.md
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/__init__.py b/PaddleCV/3d_vision/PointNet++/ext_op/__init__.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/__init__.py
rename to PaddleCV/3d_vision/PointNet++/ext_op/__init__.py
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/pointnet_lib.py b/PaddleCV/3d_vision/PointNet++/ext_op/pointnet_lib.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/pointnet_lib.py
rename to PaddleCV/3d_vision/PointNet++/ext_op/pointnet_lib.py
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/farthest_point_sampling_op.cc b/PaddleCV/3d_vision/PointNet++/ext_op/src/farthest_point_sampling_op.cc
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/farthest_point_sampling_op.cc
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/farthest_point_sampling_op.cc
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/farthest_point_sampling_op.cu b/PaddleCV/3d_vision/PointNet++/ext_op/src/farthest_point_sampling_op.cu
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/farthest_point_sampling_op.cu
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/farthest_point_sampling_op.cu
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/gather_point_op.cc b/PaddleCV/3d_vision/PointNet++/ext_op/src/gather_point_op.cc
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/gather_point_op.cc
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/gather_point_op.cc
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/gather_point_op.cu b/PaddleCV/3d_vision/PointNet++/ext_op/src/gather_point_op.cu
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/gather_point_op.cu
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/gather_point_op.cu
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/group_points_op.cc b/PaddleCV/3d_vision/PointNet++/ext_op/src/group_points_op.cc
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/group_points_op.cc
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/group_points_op.cc
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/group_points_op.cu b/PaddleCV/3d_vision/PointNet++/ext_op/src/group_points_op.cu
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/group_points_op.cu
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/group_points_op.cu
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/make.sh b/PaddleCV/3d_vision/PointNet++/ext_op/src/make.sh
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/make.sh
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/make.sh
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/query_ball_op.cc b/PaddleCV/3d_vision/PointNet++/ext_op/src/query_ball_op.cc
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/query_ball_op.cc
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/query_ball_op.cc
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/query_ball_op.cu b/PaddleCV/3d_vision/PointNet++/ext_op/src/query_ball_op.cu
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/query_ball_op.cu
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/query_ball_op.cu
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/three_interp_op.cc b/PaddleCV/3d_vision/PointNet++/ext_op/src/three_interp_op.cc
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/three_interp_op.cc
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/three_interp_op.cc
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/three_interp_op.cu b/PaddleCV/3d_vision/PointNet++/ext_op/src/three_interp_op.cu
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/three_interp_op.cu
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/three_interp_op.cu
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/three_nn_op.cc b/PaddleCV/3d_vision/PointNet++/ext_op/src/three_nn_op.cc
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/three_nn_op.cc
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/three_nn_op.cc
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/three_nn_op.cu b/PaddleCV/3d_vision/PointNet++/ext_op/src/three_nn_op.cu
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/three_nn_op.cu
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/three_nn_op.cu
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/src/util.cu.h b/PaddleCV/3d_vision/PointNet++/ext_op/src/util.cu.h
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/src/util.cu.h
rename to PaddleCV/3d_vision/PointNet++/ext_op/src/util.cu.h
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_farthest_point_sampling_op.py b/PaddleCV/3d_vision/PointNet++/ext_op/tests/test_farthest_point_sampling_op.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_farthest_point_sampling_op.py
rename to PaddleCV/3d_vision/PointNet++/ext_op/tests/test_farthest_point_sampling_op.py
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_gather_point_op.py b/PaddleCV/3d_vision/PointNet++/ext_op/tests/test_gather_point_op.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_gather_point_op.py
rename to PaddleCV/3d_vision/PointNet++/ext_op/tests/test_gather_point_op.py
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_group_points_op.py b/PaddleCV/3d_vision/PointNet++/ext_op/tests/test_group_points_op.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_group_points_op.py
rename to PaddleCV/3d_vision/PointNet++/ext_op/tests/test_group_points_op.py
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_query_ball_op.py b/PaddleCV/3d_vision/PointNet++/ext_op/tests/test_query_ball_op.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_query_ball_op.py
rename to PaddleCV/3d_vision/PointNet++/ext_op/tests/test_query_ball_op.py
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_three_interp_op.py b/PaddleCV/3d_vision/PointNet++/ext_op/tests/test_three_interp_op.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_three_interp_op.py
rename to PaddleCV/3d_vision/PointNet++/ext_op/tests/test_three_interp_op.py
diff --git a/PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_three_nn_op.py b/PaddleCV/3d_vision/PointNet++/ext_op/tests/test_three_nn_op.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/ext_op/tests/test_three_nn_op.py
rename to PaddleCV/3d_vision/PointNet++/ext_op/tests/test_three_nn_op.py
diff --git a/PaddleCV/Paddle3D/PointNet++/image/pointnet2.jpg b/PaddleCV/3d_vision/PointNet++/image/pointnet2.jpg
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/image/pointnet2.jpg
rename to PaddleCV/3d_vision/PointNet++/image/pointnet2.jpg
diff --git a/PaddleCV/Paddle3D/PointNet++/models/__init__.py b/PaddleCV/3d_vision/PointNet++/models/__init__.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/models/__init__.py
rename to PaddleCV/3d_vision/PointNet++/models/__init__.py
diff --git a/PaddleCV/Paddle3D/PointNet++/models/pointnet2_cls.py b/PaddleCV/3d_vision/PointNet++/models/pointnet2_cls.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/models/pointnet2_cls.py
rename to PaddleCV/3d_vision/PointNet++/models/pointnet2_cls.py
diff --git a/PaddleCV/Paddle3D/PointNet++/models/pointnet2_modules.py b/PaddleCV/3d_vision/PointNet++/models/pointnet2_modules.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/models/pointnet2_modules.py
rename to PaddleCV/3d_vision/PointNet++/models/pointnet2_modules.py
diff --git a/PaddleCV/Paddle3D/PointNet++/models/pointnet2_seg.py b/PaddleCV/3d_vision/PointNet++/models/pointnet2_seg.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/models/pointnet2_seg.py
rename to PaddleCV/3d_vision/PointNet++/models/pointnet2_seg.py
diff --git a/PaddleCV/Paddle3D/PointNet++/scripts/eval_cls.sh b/PaddleCV/3d_vision/PointNet++/scripts/eval_cls.sh
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/scripts/eval_cls.sh
rename to PaddleCV/3d_vision/PointNet++/scripts/eval_cls.sh
diff --git a/PaddleCV/Paddle3D/PointNet++/scripts/eval_seg.sh b/PaddleCV/3d_vision/PointNet++/scripts/eval_seg.sh
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/scripts/eval_seg.sh
rename to PaddleCV/3d_vision/PointNet++/scripts/eval_seg.sh
diff --git a/PaddleCV/Paddle3D/PointNet++/scripts/train_cls.sh b/PaddleCV/3d_vision/PointNet++/scripts/train_cls.sh
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/scripts/train_cls.sh
rename to PaddleCV/3d_vision/PointNet++/scripts/train_cls.sh
diff --git a/PaddleCV/Paddle3D/PointNet++/scripts/train_seg.sh b/PaddleCV/3d_vision/PointNet++/scripts/train_seg.sh
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/scripts/train_seg.sh
rename to PaddleCV/3d_vision/PointNet++/scripts/train_seg.sh
diff --git a/PaddleCV/Paddle3D/PointNet++/train_cls.py b/PaddleCV/3d_vision/PointNet++/train_cls.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/train_cls.py
rename to PaddleCV/3d_vision/PointNet++/train_cls.py
diff --git a/PaddleCV/Paddle3D/PointNet++/train_seg.py b/PaddleCV/3d_vision/PointNet++/train_seg.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/train_seg.py
rename to PaddleCV/3d_vision/PointNet++/train_seg.py
diff --git a/PaddleCV/Paddle3D/PointNet++/utils.py b/PaddleCV/3d_vision/PointNet++/utils.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointNet++/utils.py
rename to PaddleCV/3d_vision/PointNet++/utils.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/.gitignore b/PaddleCV/3d_vision/PointRCNN/.gitignore
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/.gitignore
rename to PaddleCV/3d_vision/PointRCNN/.gitignore
diff --git a/PaddleCV/Paddle3D/PointRCNN/README.md b/PaddleCV/3d_vision/PointRCNN/README.md
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/README.md
rename to PaddleCV/3d_vision/PointRCNN/README.md
diff --git a/PaddleCV/Paddle3D/PointRCNN/build_and_install.sh b/PaddleCV/3d_vision/PointRCNN/build_and_install.sh
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/build_and_install.sh
rename to PaddleCV/3d_vision/PointRCNN/build_and_install.sh
diff --git a/PaddleCV/Paddle3D/PointRCNN/cfgs/default.yml b/PaddleCV/3d_vision/PointRCNN/cfgs/default.yml
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/cfgs/default.yml
rename to PaddleCV/3d_vision/PointRCNN/cfgs/default.yml
diff --git a/PaddleCV/Paddle3D/PointRCNN/data/KITTI/object/download.sh b/PaddleCV/3d_vision/PointRCNN/data/KITTI/object/download.sh
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/data/KITTI/object/download.sh
rename to PaddleCV/3d_vision/PointRCNN/data/KITTI/object/download.sh
diff --git a/PaddleCV/Paddle3D/PointRCNN/data/__init__.py b/PaddleCV/3d_vision/PointRCNN/data/__init__.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/data/__init__.py
rename to PaddleCV/3d_vision/PointRCNN/data/__init__.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/data/kitti_dataset.py b/PaddleCV/3d_vision/PointRCNN/data/kitti_dataset.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/data/kitti_dataset.py
rename to PaddleCV/3d_vision/PointRCNN/data/kitti_dataset.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/data/kitti_rcnn_reader.py b/PaddleCV/3d_vision/PointRCNN/data/kitti_rcnn_reader.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/data/kitti_rcnn_reader.py
rename to PaddleCV/3d_vision/PointRCNN/data/kitti_rcnn_reader.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/eval.py b/PaddleCV/3d_vision/PointRCNN/eval.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/eval.py
rename to PaddleCV/3d_vision/PointRCNN/eval.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/ext_op b/PaddleCV/3d_vision/PointRCNN/ext_op
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/ext_op
rename to PaddleCV/3d_vision/PointRCNN/ext_op
diff --git a/PaddleCV/Paddle3D/PointRCNN/images/teaser.png b/PaddleCV/3d_vision/PointRCNN/images/teaser.png
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/images/teaser.png
rename to PaddleCV/3d_vision/PointRCNN/images/teaser.png
diff --git a/PaddleCV/Paddle3D/PointRCNN/models/__init__.py b/PaddleCV/3d_vision/PointRCNN/models/__init__.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/models/__init__.py
rename to PaddleCV/3d_vision/PointRCNN/models/__init__.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/models/loss_utils.py b/PaddleCV/3d_vision/PointRCNN/models/loss_utils.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/models/loss_utils.py
rename to PaddleCV/3d_vision/PointRCNN/models/loss_utils.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/models/point_rcnn.py b/PaddleCV/3d_vision/PointRCNN/models/point_rcnn.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/models/point_rcnn.py
rename to PaddleCV/3d_vision/PointRCNN/models/point_rcnn.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/models/pointnet2_modules.py b/PaddleCV/3d_vision/PointRCNN/models/pointnet2_modules.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/models/pointnet2_modules.py
rename to PaddleCV/3d_vision/PointRCNN/models/pointnet2_modules.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/models/pointnet2_msg.py b/PaddleCV/3d_vision/PointRCNN/models/pointnet2_msg.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/models/pointnet2_msg.py
rename to PaddleCV/3d_vision/PointRCNN/models/pointnet2_msg.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/models/rcnn.py b/PaddleCV/3d_vision/PointRCNN/models/rcnn.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/models/rcnn.py
rename to PaddleCV/3d_vision/PointRCNN/models/rcnn.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/models/rpn.py b/PaddleCV/3d_vision/PointRCNN/models/rpn.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/models/rpn.py
rename to PaddleCV/3d_vision/PointRCNN/models/rpn.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/requirement.txt b/PaddleCV/3d_vision/PointRCNN/requirement.txt
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/requirement.txt
rename to PaddleCV/3d_vision/PointRCNN/requirement.txt
diff --git a/PaddleCV/Paddle3D/PointRCNN/tools/generate_aug_scene.py b/PaddleCV/3d_vision/PointRCNN/tools/generate_aug_scene.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/tools/generate_aug_scene.py
rename to PaddleCV/3d_vision/PointRCNN/tools/generate_aug_scene.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/tools/generate_gt_database.py b/PaddleCV/3d_vision/PointRCNN/tools/generate_gt_database.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/tools/generate_gt_database.py
rename to PaddleCV/3d_vision/PointRCNN/tools/generate_gt_database.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/tools/kitti_eval.py b/PaddleCV/3d_vision/PointRCNN/tools/kitti_eval.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/tools/kitti_eval.py
rename to PaddleCV/3d_vision/PointRCNN/tools/kitti_eval.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/LICENSE b/PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/LICENSE
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/LICENSE
rename to PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/LICENSE
diff --git a/PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/README.md b/PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/README.md
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/README.md
rename to PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/README.md
diff --git a/PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/eval.py b/PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/eval.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/eval.py
rename to PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/eval.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/evaluate.py b/PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/evaluate.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/evaluate.py
rename to PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/evaluate.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/kitti_common.py b/PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/kitti_common.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/kitti_common.py
rename to PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/kitti_common.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/rotate_iou.py b/PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/rotate_iou.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/tools/kitti_object_eval_python/rotate_iou.py
rename to PaddleCV/3d_vision/PointRCNN/tools/kitti_object_eval_python/rotate_iou.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/train.py b/PaddleCV/3d_vision/PointRCNN/train.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/train.py
rename to PaddleCV/3d_vision/PointRCNN/train.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/__init__.py b/PaddleCV/3d_vision/PointRCNN/utils/__init__.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/__init__.py
rename to PaddleCV/3d_vision/PointRCNN/utils/__init__.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/box_utils.py b/PaddleCV/3d_vision/PointRCNN/utils/box_utils.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/box_utils.py
rename to PaddleCV/3d_vision/PointRCNN/utils/box_utils.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/calibration.py b/PaddleCV/3d_vision/PointRCNN/utils/calibration.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/calibration.py
rename to PaddleCV/3d_vision/PointRCNN/utils/calibration.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/config.py b/PaddleCV/3d_vision/PointRCNN/utils/config.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/config.py
rename to PaddleCV/3d_vision/PointRCNN/utils/config.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/cyops/__init__.py b/PaddleCV/3d_vision/PointRCNN/utils/cyops/__init__.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/cyops/__init__.py
rename to PaddleCV/3d_vision/PointRCNN/utils/cyops/__init__.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/cyops/iou3d_utils.pyx b/PaddleCV/3d_vision/PointRCNN/utils/cyops/iou3d_utils.pyx
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/cyops/iou3d_utils.pyx
rename to PaddleCV/3d_vision/PointRCNN/utils/cyops/iou3d_utils.pyx
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/cyops/kitti_utils.pyx b/PaddleCV/3d_vision/PointRCNN/utils/cyops/kitti_utils.pyx
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/cyops/kitti_utils.pyx
rename to PaddleCV/3d_vision/PointRCNN/utils/cyops/kitti_utils.pyx
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/cyops/object3d.py b/PaddleCV/3d_vision/PointRCNN/utils/cyops/object3d.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/cyops/object3d.py
rename to PaddleCV/3d_vision/PointRCNN/utils/cyops/object3d.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/cyops/roipool3d_utils.pyx b/PaddleCV/3d_vision/PointRCNN/utils/cyops/roipool3d_utils.pyx
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/cyops/roipool3d_utils.pyx
rename to PaddleCV/3d_vision/PointRCNN/utils/cyops/roipool3d_utils.pyx
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/cyops/setup.py b/PaddleCV/3d_vision/PointRCNN/utils/cyops/setup.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/cyops/setup.py
rename to PaddleCV/3d_vision/PointRCNN/utils/cyops/setup.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/metric_utils.py b/PaddleCV/3d_vision/PointRCNN/utils/metric_utils.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/metric_utils.py
rename to PaddleCV/3d_vision/PointRCNN/utils/metric_utils.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/object3d.py b/PaddleCV/3d_vision/PointRCNN/utils/object3d.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/object3d.py
rename to PaddleCV/3d_vision/PointRCNN/utils/object3d.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/optimizer.py b/PaddleCV/3d_vision/PointRCNN/utils/optimizer.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/optimizer.py
rename to PaddleCV/3d_vision/PointRCNN/utils/optimizer.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/proposal_target.py b/PaddleCV/3d_vision/PointRCNN/utils/proposal_target.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/proposal_target.py
rename to PaddleCV/3d_vision/PointRCNN/utils/proposal_target.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/proposal_utils.py b/PaddleCV/3d_vision/PointRCNN/utils/proposal_utils.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/proposal_utils.py
rename to PaddleCV/3d_vision/PointRCNN/utils/proposal_utils.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/pts_utils/CMakeLists.txt b/PaddleCV/3d_vision/PointRCNN/utils/pts_utils/CMakeLists.txt
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/pts_utils/CMakeLists.txt
rename to PaddleCV/3d_vision/PointRCNN/utils/pts_utils/CMakeLists.txt
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/pts_utils/pts_utils.cpp b/PaddleCV/3d_vision/PointRCNN/utils/pts_utils/pts_utils.cpp
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/pts_utils/pts_utils.cpp
rename to PaddleCV/3d_vision/PointRCNN/utils/pts_utils/pts_utils.cpp
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/pts_utils/setup.py b/PaddleCV/3d_vision/PointRCNN/utils/pts_utils/setup.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/pts_utils/setup.py
rename to PaddleCV/3d_vision/PointRCNN/utils/pts_utils/setup.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/pts_utils/test.py b/PaddleCV/3d_vision/PointRCNN/utils/pts_utils/test.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/pts_utils/test.py
rename to PaddleCV/3d_vision/PointRCNN/utils/pts_utils/test.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/run_utils.py b/PaddleCV/3d_vision/PointRCNN/utils/run_utils.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/run_utils.py
rename to PaddleCV/3d_vision/PointRCNN/utils/run_utils.py
diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/save_utils.py b/PaddleCV/3d_vision/PointRCNN/utils/save_utils.py
similarity index 100%
rename from PaddleCV/Paddle3D/PointRCNN/utils/save_utils.py
rename to PaddleCV/3d_vision/PointRCNN/utils/save_utils.py
diff --git a/PaddleCV/PaddleDetection b/PaddleCV/PaddleDetection
new file mode 160000
index 0000000000000000000000000000000000000000..f24275a46f225e6111e8650d70baece90a37f324
--- /dev/null
+++ b/PaddleCV/PaddleDetection
@@ -0,0 +1 @@
+Subproject commit f24275a46f225e6111e8650d70baece90a37f324
diff --git a/PaddleCV/README.md b/PaddleCV/README.md
index b519951e660254aeabaea9d3b6381bb9be79d3bc..c3e199affcc55e9441ac4abb0fceef7890d5419e 100644
--- a/PaddleCV/README.md
+++ b/PaddleCV/README.md
@@ -6,23 +6,23 @@ PaddleCV
图像分类是根据图像的语义信息对不同类别图像进行区分,是计算机视觉中重要的基础问题,是物体检测、图像分割、物体跟踪、行为分析、人脸识别等其他高层视觉任务的基础,在许多领域都有着广泛的应用。如:安防领域的人脸识别和智能视频分析等,交通领域的交通场景识别,互联网领域基于内容的图像检索和相册自动归类,医学领域的图像识别等。
-在深度学习时代,图像分类的准确率大幅度提升,在图像分类任务中,我们向大家介绍了如何在经典的数据集ImageNet上,训练常用的模型,包括AlexNet、VGG系列、ResNet系列、ResNeXt系列、Inception系列、MobileNet系列、SENet系列、DarkNet、SqueezeNet、ShuffleNet系列等模型,也开源了[训练的模型](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README.md#已有模型及其性能) 方便用户下载使用。同时提供了能够将Caffe模型转换为PaddlePaddle
+在深度学习时代,图像分类的准确率大幅度提升,在图像分类任务中,我们向大家介绍了如何在经典的数据集ImageNet上,训练常用的模型,包括AlexNet、VGG系列、ResNet系列、ResNeXt系列、Inception系列、MobileNet系列、SENet系列、DarkNet、SqueezeNet、ShuffleNet系列等模型,也开源了[训练的模型](https://github.com/PaddlePaddle/models/blob/release/1.7/PaddleCV/image_classification/README.md#已有模型及其性能) 方便用户下载使用。同时提供了能够将Caffe模型转换为PaddlePaddle
Fluid模型配置和参数文件的工具。
-- [AlexNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [SqueezeNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [VGG Series](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [GoogleNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [ResNet Series](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [ResNeXt Series](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [ShuffleNet Series](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [DenseNet Series](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [Inception Series](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [MobileNet Series](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [SENet Series](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [DarkNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [ResNeXt101_wsl Series](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)
-- [Caffe模型转换为Paddle Fluid配置和模型文件工具](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/caffe2fluid)
+- [AlexNet](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [SqueezeNet](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [VGG Series](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [GoogleNet](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [ResNet Series](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [ResNeXt Series](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [ShuffleNet Series](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [DenseNet Series](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [Inception Series](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [MobileNet Series](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [SENet Series](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [DarkNet](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [ResNeXt101_wsl Series](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification/models)
+- [Caffe模型转换为Paddle Fluid配置和模型文件工具](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/caffe2fluid)
目标检测
--------
@@ -37,10 +37,10 @@ Faster RCNN模型是典型的两阶段目标检测器,相较于传统提取区
Mask RCNN模型是基于Faster RCNN模型的经典实例分割模型,在原有Faster RCNN模型基础上添加分割分支,得到掩码结果,实现了掩码和类别预测关系的解藕。
-- [Single Shot MultiBox Detector](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/ssd/README_cn.md)
-- [Face Detector: PyramidBox](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/face_detection/README_cn.md)
-- [Faster RCNN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/rcnn/README_cn.md)
-- [Mask RCNN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/rcnn/README_cn.md)
+- [Single Shot MultiBox Detector](https://github.com/PaddlePaddle/PaddleDetection)
+- [Face Detector: PyramidBox](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/face_detection/README_cn.md)
+- [Faster RCNN](https://github.com/PaddlePaddle/PaddleDetection)
+- [Mask RCNN](https://github.com/PaddlePaddle/PaddleDetection)
图像语义分割
------------
@@ -50,7 +50,7 @@ Mask RCNN模型是基于Faster RCNN模型的经典实例分割模型,在原有
在图像语义分割任务中,我们介绍如何基于图像级联网络(Image Cascade
Network,ICNet)进行语义分割,相比其他分割算法,ICNet兼顾了准确率和速度。
-- [ICNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/icnet)
+- [ICNet](https://github.com/PaddlePaddle/PaddleSeg)
图像生成
-----------
@@ -60,8 +60,8 @@ Network,ICNet)进行语义分割,相比其他分割算法,ICNet兼顾了准
在图像生成任务中,我们介绍了如何使用DCGAN和ConditioanlGAN来进行手写数字的生成,另外还介绍了用于风格迁移的CycleGAN.
-- [DCGAN & ConditionalGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN/c_gan)
-- [CycleGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN/cycle_gan)
+- [DCGAN & ConditionalGAN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan/c_gan)
+- [CycleGAN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan/cycle_gan)
场景文字识别
------------
@@ -70,8 +70,8 @@ Network,ICNet)进行语义分割,相比其他分割算法,ICNet兼顾了准
在场景文字识别任务中,我们介绍如何将基于CNN的图像特征提取和基于RNN的序列翻译技术结合,免除人工定义特征,避免字符分割,使用自动学习到的图像特征,完成字符识别。当前,介绍了CRNN-CTC模型和基于注意力机制的序列到序列模型。
-- [CRNN-CTC模型](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/ocr_recognition)
-- [Attention模型](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/ocr_recognition)
+- [CRNN-CTC模型](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/ocr_recognition)
+- [Attention模型](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/ocr_recognition)
度量学习
@@ -80,7 +80,7 @@ Network,ICNet)进行语义分割,相比其他分割算法,ICNet兼顾了准
度量学习也称作距离度量学习、相似度学习,通过学习对象之间的距离,度量学习能够用于分析对象时间的关联、比较关系,在实际问题中应用较为广泛,可应用于辅助分类、聚类问题,也广泛用于图像检索、人脸识别等领域。以往,针对不同的任务,需要选择合适的特征并手动构建距离函数,而度量学习可根据不同的任务来自主学习出针对特定任务的度量距离函数。度量学习和深度学习的结合,在人脸识别/验证、行人再识别(human Re-ID)、图像检索等领域均取得较好的性能,在这个任务中我们主要介绍了基于Fluid的深度度量学习模型,包含了三元组、四元组等损失函数。
-- [Metric Learning](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning)
+- [Metric Learning](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/metric_learning)
视频分类
@@ -89,4 +89,4 @@ Network,ICNet)进行语义分割,相比其他分割算法,ICNet兼顾了准
视频分类是视频理解任务的基础,与图像分类不同的是,分类的对象不再是静止的图像,而是一个由多帧图像构成的、包含语音数据、包含运动信息等的视频对象,因此理解视频需要获得更多的上下文信息,不仅要理解每帧图像是什么、包含什么,还需要结合不同帧,知道上下文的关联信息。视频分类方法主要包含基于卷积神经网络、基于循环神经网络、或将这两者结合的方法。该任务中我们介绍基于Fluid的视频分类模型,目前包含Temporal Segment Network(TSN)模型,后续会持续增加更多模型。
-- [TSN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/video_classification)
+- [TSN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video)
diff --git a/PaddleCV/PaddleGAN/README.md b/PaddleCV/gan/README.md
similarity index 100%
rename from PaddleCV/PaddleGAN/README.md
rename to PaddleCV/gan/README.md
diff --git a/PaddleCV/PaddleGAN/c_gan/.run_ce.sh b/PaddleCV/gan/c_gan/.run_ce.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/.run_ce.sh
rename to PaddleCV/gan/c_gan/.run_ce.sh
diff --git a/PaddleCV/PaddleGAN/c_gan/README.md b/PaddleCV/gan/c_gan/README.md
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/README.md
rename to PaddleCV/gan/c_gan/README.md
diff --git a/PaddleCV/PaddleGAN/c_gan/_ce.py b/PaddleCV/gan/c_gan/_ce.py
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/_ce.py
rename to PaddleCV/gan/c_gan/_ce.py
diff --git a/PaddleCV/PaddleGAN/c_gan/c_gan.py b/PaddleCV/gan/c_gan/c_gan.py
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/c_gan.py
rename to PaddleCV/gan/c_gan/c_gan.py
diff --git a/PaddleCV/PaddleGAN/c_gan/dc_gan.py b/PaddleCV/gan/c_gan/dc_gan.py
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/dc_gan.py
rename to PaddleCV/gan/c_gan/dc_gan.py
diff --git a/PaddleCV/PaddleGAN/c_gan/images/DCGAN_demo.png b/PaddleCV/gan/c_gan/images/DCGAN_demo.png
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/images/DCGAN_demo.png
rename to PaddleCV/gan/c_gan/images/DCGAN_demo.png
diff --git a/PaddleCV/PaddleGAN/c_gan/images/conditionalGAN_demo.png b/PaddleCV/gan/c_gan/images/conditionalGAN_demo.png
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/images/conditionalGAN_demo.png
rename to PaddleCV/gan/c_gan/images/conditionalGAN_demo.png
diff --git a/PaddleCV/PaddleGAN/c_gan/images/conditionalGAN_loss.png b/PaddleCV/gan/c_gan/images/conditionalGAN_loss.png
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/images/conditionalGAN_loss.png
rename to PaddleCV/gan/c_gan/images/conditionalGAN_loss.png
diff --git a/PaddleCV/PaddleGAN/c_gan/network.py b/PaddleCV/gan/c_gan/network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/network.py
rename to PaddleCV/gan/c_gan/network.py
diff --git a/PaddleCV/PaddleGAN/c_gan/utility.py b/PaddleCV/gan/c_gan/utility.py
similarity index 100%
rename from PaddleCV/PaddleGAN/c_gan/utility.py
rename to PaddleCV/gan/c_gan/utility.py
diff --git a/PaddleCV/PaddleGAN/cycle_gan/.run_ce.sh b/PaddleCV/gan/cycle_gan/.run_ce.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/.run_ce.sh
rename to PaddleCV/gan/cycle_gan/.run_ce.sh
diff --git a/PaddleCV/PaddleGAN/cycle_gan/README.md b/PaddleCV/gan/cycle_gan/README.md
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/README.md
rename to PaddleCV/gan/cycle_gan/README.md
diff --git a/PaddleCV/PaddleGAN/cycle_gan/_ce.py b/PaddleCV/gan/cycle_gan/_ce.py
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/_ce.py
rename to PaddleCV/gan/cycle_gan/_ce.py
diff --git a/PaddleCV/PaddleGAN/cycle_gan/data/horse2zebra/trainA.txt b/PaddleCV/gan/cycle_gan/data/horse2zebra/trainA.txt
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/data/horse2zebra/trainA.txt
rename to PaddleCV/gan/cycle_gan/data/horse2zebra/trainA.txt
diff --git a/PaddleCV/PaddleGAN/cycle_gan/data/horse2zebra/trainA/n02381460_1001.jpg b/PaddleCV/gan/cycle_gan/data/horse2zebra/trainA/n02381460_1001.jpg
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/data/horse2zebra/trainA/n02381460_1001.jpg
rename to PaddleCV/gan/cycle_gan/data/horse2zebra/trainA/n02381460_1001.jpg
diff --git a/PaddleCV/PaddleGAN/cycle_gan/data/horse2zebra/trainB.txt b/PaddleCV/gan/cycle_gan/data/horse2zebra/trainB.txt
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/data/horse2zebra/trainB.txt
rename to PaddleCV/gan/cycle_gan/data/horse2zebra/trainB.txt
diff --git a/PaddleCV/PaddleGAN/cycle_gan/data/horse2zebra/trainB/n02391049_10007.jpg b/PaddleCV/gan/cycle_gan/data/horse2zebra/trainB/n02391049_10007.jpg
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/data/horse2zebra/trainB/n02391049_10007.jpg
rename to PaddleCV/gan/cycle_gan/data/horse2zebra/trainB/n02391049_10007.jpg
diff --git a/PaddleCV/PaddleGAN/cycle_gan/data_reader.py b/PaddleCV/gan/cycle_gan/data_reader.py
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/data_reader.py
rename to PaddleCV/gan/cycle_gan/data_reader.py
diff --git a/PaddleCV/PaddleGAN/cycle_gan/images/A2B.jpg b/PaddleCV/gan/cycle_gan/images/A2B.jpg
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/images/A2B.jpg
rename to PaddleCV/gan/cycle_gan/images/A2B.jpg
diff --git a/PaddleCV/PaddleGAN/cycle_gan/images/B2A.jpg b/PaddleCV/gan/cycle_gan/images/B2A.jpg
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/images/B2A.jpg
rename to PaddleCV/gan/cycle_gan/images/B2A.jpg
diff --git a/PaddleCV/PaddleGAN/cycle_gan/images/cycleGAN_loss.png b/PaddleCV/gan/cycle_gan/images/cycleGAN_loss.png
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/images/cycleGAN_loss.png
rename to PaddleCV/gan/cycle_gan/images/cycleGAN_loss.png
diff --git a/PaddleCV/PaddleGAN/cycle_gan/infer.py b/PaddleCV/gan/cycle_gan/infer.py
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/infer.py
rename to PaddleCV/gan/cycle_gan/infer.py
diff --git a/PaddleCV/PaddleGAN/cycle_gan/layers.py b/PaddleCV/gan/cycle_gan/layers.py
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/layers.py
rename to PaddleCV/gan/cycle_gan/layers.py
diff --git a/PaddleCV/PaddleGAN/cycle_gan/model.py b/PaddleCV/gan/cycle_gan/model.py
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/model.py
rename to PaddleCV/gan/cycle_gan/model.py
diff --git a/PaddleCV/PaddleGAN/cycle_gan/train.py b/PaddleCV/gan/cycle_gan/train.py
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/train.py
rename to PaddleCV/gan/cycle_gan/train.py
diff --git a/PaddleCV/PaddleGAN/cycle_gan/trainer.py b/PaddleCV/gan/cycle_gan/trainer.py
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/trainer.py
rename to PaddleCV/gan/cycle_gan/trainer.py
diff --git a/PaddleCV/PaddleGAN/cycle_gan/utility.py b/PaddleCV/gan/cycle_gan/utility.py
similarity index 100%
rename from PaddleCV/PaddleGAN/cycle_gan/utility.py
rename to PaddleCV/gan/cycle_gan/utility.py
diff --git a/PaddleCV/PaddleGAN/data_reader.py b/PaddleCV/gan/data_reader.py
similarity index 100%
rename from PaddleCV/PaddleGAN/data_reader.py
rename to PaddleCV/gan/data_reader.py
diff --git a/PaddleCV/PaddleGAN/download.py b/PaddleCV/gan/download.py
similarity index 100%
rename from PaddleCV/PaddleGAN/download.py
rename to PaddleCV/gan/download.py
diff --git a/PaddleCV/PaddleGAN/images/attgan.jpg b/PaddleCV/gan/images/attgan.jpg
similarity index 100%
rename from PaddleCV/PaddleGAN/images/attgan.jpg
rename to PaddleCV/gan/images/attgan.jpg
diff --git a/PaddleCV/PaddleGAN/images/attgan_net.png b/PaddleCV/gan/images/attgan_net.png
similarity index 100%
rename from PaddleCV/PaddleGAN/images/attgan_net.png
rename to PaddleCV/gan/images/attgan_net.png
diff --git a/PaddleCV/PaddleGAN/images/female_stargan_attgan_stgan.png b/PaddleCV/gan/images/female_stargan_attgan_stgan.png
similarity index 100%
rename from PaddleCV/PaddleGAN/images/female_stargan_attgan_stgan.png
rename to PaddleCV/gan/images/female_stargan_attgan_stgan.png
diff --git a/PaddleCV/PaddleGAN/images/pix2pix_cyclegan.png b/PaddleCV/gan/images/pix2pix_cyclegan.png
similarity index 100%
rename from PaddleCV/PaddleGAN/images/pix2pix_cyclegan.png
rename to PaddleCV/gan/images/pix2pix_cyclegan.png
diff --git a/PaddleCV/PaddleGAN/images/pix2pix_gen.png b/PaddleCV/gan/images/pix2pix_gen.png
similarity index 100%
rename from PaddleCV/PaddleGAN/images/pix2pix_gen.png
rename to PaddleCV/gan/images/pix2pix_gen.png
diff --git a/PaddleCV/PaddleGAN/images/spade_net.png b/PaddleCV/gan/images/spade_net.png
similarity index 100%
rename from PaddleCV/PaddleGAN/images/spade_net.png
rename to PaddleCV/gan/images/spade_net.png
diff --git a/PaddleCV/PaddleGAN/images/stargan.jpg b/PaddleCV/gan/images/stargan.jpg
similarity index 100%
rename from PaddleCV/PaddleGAN/images/stargan.jpg
rename to PaddleCV/gan/images/stargan.jpg
diff --git a/PaddleCV/PaddleGAN/images/stargan_dis.png b/PaddleCV/gan/images/stargan_dis.png
similarity index 100%
rename from PaddleCV/PaddleGAN/images/stargan_dis.png
rename to PaddleCV/gan/images/stargan_dis.png
diff --git a/PaddleCV/PaddleGAN/images/stargan_gen.png b/PaddleCV/gan/images/stargan_gen.png
similarity index 100%
rename from PaddleCV/PaddleGAN/images/stargan_gen.png
rename to PaddleCV/gan/images/stargan_gen.png
diff --git a/PaddleCV/PaddleGAN/images/stgan.jpg b/PaddleCV/gan/images/stgan.jpg
similarity index 100%
rename from PaddleCV/PaddleGAN/images/stgan.jpg
rename to PaddleCV/gan/images/stgan.jpg
diff --git a/PaddleCV/PaddleGAN/images/stgan_net.png b/PaddleCV/gan/images/stgan_net.png
similarity index 100%
rename from PaddleCV/PaddleGAN/images/stgan_net.png
rename to PaddleCV/gan/images/stgan_net.png
diff --git a/PaddleCV/PaddleGAN/infer.py b/PaddleCV/gan/infer.py
similarity index 100%
rename from PaddleCV/PaddleGAN/infer.py
rename to PaddleCV/gan/infer.py
diff --git a/PaddleCV/PaddleGAN/network/AttGAN_network.py b/PaddleCV/gan/network/AttGAN_network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/AttGAN_network.py
rename to PaddleCV/gan/network/AttGAN_network.py
diff --git a/PaddleCV/PaddleGAN/network/CGAN_network.py b/PaddleCV/gan/network/CGAN_network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/CGAN_network.py
rename to PaddleCV/gan/network/CGAN_network.py
diff --git a/PaddleCV/PaddleGAN/network/CycleGAN_network.py b/PaddleCV/gan/network/CycleGAN_network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/CycleGAN_network.py
rename to PaddleCV/gan/network/CycleGAN_network.py
diff --git a/PaddleCV/PaddleGAN/network/DCGAN_network.py b/PaddleCV/gan/network/DCGAN_network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/DCGAN_network.py
rename to PaddleCV/gan/network/DCGAN_network.py
diff --git a/PaddleCV/PaddleGAN/network/Pix2pix_network.py b/PaddleCV/gan/network/Pix2pix_network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/Pix2pix_network.py
rename to PaddleCV/gan/network/Pix2pix_network.py
diff --git a/PaddleCV/PaddleGAN/network/SPADE_network.py b/PaddleCV/gan/network/SPADE_network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/SPADE_network.py
rename to PaddleCV/gan/network/SPADE_network.py
diff --git a/PaddleCV/PaddleGAN/network/STGAN_network.py b/PaddleCV/gan/network/STGAN_network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/STGAN_network.py
rename to PaddleCV/gan/network/STGAN_network.py
diff --git a/PaddleCV/PaddleGAN/network/StarGAN_network.py b/PaddleCV/gan/network/StarGAN_network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/StarGAN_network.py
rename to PaddleCV/gan/network/StarGAN_network.py
diff --git a/PaddleCV/PaddleGAN/network/__init__.py b/PaddleCV/gan/network/__init__.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/__init__.py
rename to PaddleCV/gan/network/__init__.py
diff --git a/PaddleCV/PaddleGAN/network/base_network.py b/PaddleCV/gan/network/base_network.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/base_network.py
rename to PaddleCV/gan/network/base_network.py
diff --git a/PaddleCV/PaddleGAN/network/vgg.py b/PaddleCV/gan/network/vgg.py
similarity index 100%
rename from PaddleCV/PaddleGAN/network/vgg.py
rename to PaddleCV/gan/network/vgg.py
diff --git a/PaddleCV/PaddleGAN/requirements.txt b/PaddleCV/gan/requirements.txt
similarity index 100%
rename from PaddleCV/PaddleGAN/requirements.txt
rename to PaddleCV/gan/requirements.txt
diff --git a/PaddleCV/PaddleGAN/scripts/infer_SPADE.sh b/PaddleCV/gan/scripts/infer_SPADE.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/infer_SPADE.sh
rename to PaddleCV/gan/scripts/infer_SPADE.sh
diff --git a/PaddleCV/PaddleGAN/scripts/infer_attgan.sh b/PaddleCV/gan/scripts/infer_attgan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/infer_attgan.sh
rename to PaddleCV/gan/scripts/infer_attgan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/infer_cgan.sh b/PaddleCV/gan/scripts/infer_cgan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/infer_cgan.sh
rename to PaddleCV/gan/scripts/infer_cgan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/infer_cyclegan.sh b/PaddleCV/gan/scripts/infer_cyclegan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/infer_cyclegan.sh
rename to PaddleCV/gan/scripts/infer_cyclegan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/infer_dcgan.sh b/PaddleCV/gan/scripts/infer_dcgan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/infer_dcgan.sh
rename to PaddleCV/gan/scripts/infer_dcgan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/infer_pix2pix.sh b/PaddleCV/gan/scripts/infer_pix2pix.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/infer_pix2pix.sh
rename to PaddleCV/gan/scripts/infer_pix2pix.sh
diff --git a/PaddleCV/PaddleGAN/scripts/infer_stargan.sh b/PaddleCV/gan/scripts/infer_stargan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/infer_stargan.sh
rename to PaddleCV/gan/scripts/infer_stargan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/infer_stgan.sh b/PaddleCV/gan/scripts/infer_stgan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/infer_stgan.sh
rename to PaddleCV/gan/scripts/infer_stgan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/make_pair_data.py b/PaddleCV/gan/scripts/make_pair_data.py
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/make_pair_data.py
rename to PaddleCV/gan/scripts/make_pair_data.py
diff --git a/PaddleCV/PaddleGAN/scripts/prepare_cityscapes_dataset.py b/PaddleCV/gan/scripts/prepare_cityscapes_dataset.py
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/prepare_cityscapes_dataset.py
rename to PaddleCV/gan/scripts/prepare_cityscapes_dataset.py
diff --git a/PaddleCV/PaddleGAN/scripts/run_SPADE.sh b/PaddleCV/gan/scripts/run_SPADE.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/run_SPADE.sh
rename to PaddleCV/gan/scripts/run_SPADE.sh
diff --git a/PaddleCV/PaddleGAN/scripts/run_attgan.sh b/PaddleCV/gan/scripts/run_attgan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/run_attgan.sh
rename to PaddleCV/gan/scripts/run_attgan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/run_cgan.sh b/PaddleCV/gan/scripts/run_cgan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/run_cgan.sh
rename to PaddleCV/gan/scripts/run_cgan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/run_cyclegan.sh b/PaddleCV/gan/scripts/run_cyclegan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/run_cyclegan.sh
rename to PaddleCV/gan/scripts/run_cyclegan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/run_dcgan.sh b/PaddleCV/gan/scripts/run_dcgan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/run_dcgan.sh
rename to PaddleCV/gan/scripts/run_dcgan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/run_pix2pix.sh b/PaddleCV/gan/scripts/run_pix2pix.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/run_pix2pix.sh
rename to PaddleCV/gan/scripts/run_pix2pix.sh
diff --git a/PaddleCV/PaddleGAN/scripts/run_stargan.sh b/PaddleCV/gan/scripts/run_stargan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/run_stargan.sh
rename to PaddleCV/gan/scripts/run_stargan.sh
diff --git a/PaddleCV/PaddleGAN/scripts/run_stgan.sh b/PaddleCV/gan/scripts/run_stgan.sh
similarity index 100%
rename from PaddleCV/PaddleGAN/scripts/run_stgan.sh
rename to PaddleCV/gan/scripts/run_stgan.sh
diff --git a/PaddleCV/PaddleGAN/train.py b/PaddleCV/gan/train.py
similarity index 100%
rename from PaddleCV/PaddleGAN/train.py
rename to PaddleCV/gan/train.py
diff --git a/PaddleCV/PaddleGAN/trainer/AttGAN.py b/PaddleCV/gan/trainer/AttGAN.py
similarity index 100%
rename from PaddleCV/PaddleGAN/trainer/AttGAN.py
rename to PaddleCV/gan/trainer/AttGAN.py
diff --git a/PaddleCV/PaddleGAN/trainer/CGAN.py b/PaddleCV/gan/trainer/CGAN.py
similarity index 100%
rename from PaddleCV/PaddleGAN/trainer/CGAN.py
rename to PaddleCV/gan/trainer/CGAN.py
diff --git a/PaddleCV/PaddleGAN/trainer/CycleGAN.py b/PaddleCV/gan/trainer/CycleGAN.py
similarity index 100%
rename from PaddleCV/PaddleGAN/trainer/CycleGAN.py
rename to PaddleCV/gan/trainer/CycleGAN.py
diff --git a/PaddleCV/PaddleGAN/trainer/DCGAN.py b/PaddleCV/gan/trainer/DCGAN.py
similarity index 100%
rename from PaddleCV/PaddleGAN/trainer/DCGAN.py
rename to PaddleCV/gan/trainer/DCGAN.py
diff --git a/PaddleCV/PaddleGAN/trainer/Pix2pix.py b/PaddleCV/gan/trainer/Pix2pix.py
similarity index 100%
rename from PaddleCV/PaddleGAN/trainer/Pix2pix.py
rename to PaddleCV/gan/trainer/Pix2pix.py
diff --git a/PaddleCV/PaddleGAN/trainer/SPADE.py b/PaddleCV/gan/trainer/SPADE.py
similarity index 100%
rename from PaddleCV/PaddleGAN/trainer/SPADE.py
rename to PaddleCV/gan/trainer/SPADE.py
diff --git a/PaddleCV/PaddleGAN/trainer/STGAN.py b/PaddleCV/gan/trainer/STGAN.py
similarity index 100%
rename from PaddleCV/PaddleGAN/trainer/STGAN.py
rename to PaddleCV/gan/trainer/STGAN.py
diff --git a/PaddleCV/PaddleGAN/trainer/StarGAN.py b/PaddleCV/gan/trainer/StarGAN.py
similarity index 100%
rename from PaddleCV/PaddleGAN/trainer/StarGAN.py
rename to PaddleCV/gan/trainer/StarGAN.py
diff --git a/PaddleCV/PaddleGAN/trainer/__init__.py b/PaddleCV/gan/trainer/__init__.py
similarity index 100%
rename from PaddleCV/PaddleGAN/trainer/__init__.py
rename to PaddleCV/gan/trainer/__init__.py
diff --git a/PaddleCV/PaddleGAN/util/__init__.py b/PaddleCV/gan/util/__init__.py
similarity index 100%
rename from PaddleCV/PaddleGAN/util/__init__.py
rename to PaddleCV/gan/util/__init__.py
diff --git a/PaddleCV/PaddleGAN/util/config.py b/PaddleCV/gan/util/config.py
similarity index 100%
rename from PaddleCV/PaddleGAN/util/config.py
rename to PaddleCV/gan/util/config.py
diff --git a/PaddleCV/PaddleGAN/util/utility.py b/PaddleCV/gan/util/utility.py
similarity index 100%
rename from PaddleCV/PaddleGAN/util/utility.py
rename to PaddleCV/gan/util/utility.py
diff --git a/PaddleCV/PaddleVideo/.gitignore b/PaddleCV/video/.gitignore
similarity index 100%
rename from PaddleCV/PaddleVideo/.gitignore
rename to PaddleCV/video/.gitignore
diff --git a/PaddleCV/PaddleVideo/README.md b/PaddleCV/video/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/README.md
rename to PaddleCV/video/README.md
diff --git a/PaddleCV/PaddleVideo/_ce.py b/PaddleCV/video/_ce.py
similarity index 100%
rename from PaddleCV/PaddleVideo/_ce.py
rename to PaddleCV/video/_ce.py
diff --git a/PaddleCV/PaddleVideo/configs/attention_cluster.yaml b/PaddleCV/video/configs/attention_cluster.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/attention_cluster.yaml
rename to PaddleCV/video/configs/attention_cluster.yaml
diff --git a/PaddleCV/PaddleVideo/configs/attention_lstm.yaml b/PaddleCV/video/configs/attention_lstm.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/attention_lstm.yaml
rename to PaddleCV/video/configs/attention_lstm.yaml
diff --git a/PaddleCV/PaddleVideo/configs/bmn.yaml b/PaddleCV/video/configs/bmn.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/bmn.yaml
rename to PaddleCV/video/configs/bmn.yaml
diff --git a/PaddleCV/PaddleVideo/configs/bsn_pem.yaml b/PaddleCV/video/configs/bsn_pem.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/bsn_pem.yaml
rename to PaddleCV/video/configs/bsn_pem.yaml
diff --git a/PaddleCV/PaddleVideo/configs/bsn_tem.yaml b/PaddleCV/video/configs/bsn_tem.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/bsn_tem.yaml
rename to PaddleCV/video/configs/bsn_tem.yaml
diff --git a/PaddleCV/PaddleVideo/configs/ctcn.yaml b/PaddleCV/video/configs/ctcn.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/ctcn.yaml
rename to PaddleCV/video/configs/ctcn.yaml
diff --git a/PaddleCV/PaddleVideo/configs/ets.yaml b/PaddleCV/video/configs/ets.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/ets.yaml
rename to PaddleCV/video/configs/ets.yaml
diff --git a/PaddleCV/PaddleVideo/configs/nextvlad.yaml b/PaddleCV/video/configs/nextvlad.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/nextvlad.yaml
rename to PaddleCV/video/configs/nextvlad.yaml
diff --git a/PaddleCV/PaddleVideo/configs/nonlocal.yaml b/PaddleCV/video/configs/nonlocal.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/nonlocal.yaml
rename to PaddleCV/video/configs/nonlocal.yaml
diff --git a/PaddleCV/PaddleVideo/configs/stnet.yaml b/PaddleCV/video/configs/stnet.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/stnet.yaml
rename to PaddleCV/video/configs/stnet.yaml
diff --git a/PaddleCV/PaddleVideo/configs/tall.yaml b/PaddleCV/video/configs/tall.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/tall.yaml
rename to PaddleCV/video/configs/tall.yaml
diff --git a/PaddleCV/PaddleVideo/configs/tsm.yaml b/PaddleCV/video/configs/tsm.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/tsm.yaml
rename to PaddleCV/video/configs/tsm.yaml
diff --git a/PaddleCV/PaddleVideo/configs/tsn.yaml b/PaddleCV/video/configs/tsn.yaml
similarity index 100%
rename from PaddleCV/PaddleVideo/configs/tsn.yaml
rename to PaddleCV/video/configs/tsn.yaml
diff --git a/PaddleCV/PaddleVideo/data/dataset/README.md b/PaddleCV/video/data/dataset/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/README.md
rename to PaddleCV/video/data/dataset/README.md
diff --git a/PaddleCV/PaddleVideo/data/dataset/bmn/README.md b/PaddleCV/video/data/dataset/bmn/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/bmn/README.md
rename to PaddleCV/video/data/dataset/bmn/README.md
diff --git a/PaddleCV/PaddleVideo/data/dataset/bmn/activitynet_1.3_annotations.json b/PaddleCV/video/data/dataset/bmn/activitynet_1.3_annotations.json
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/bmn/activitynet_1.3_annotations.json
rename to PaddleCV/video/data/dataset/bmn/activitynet_1.3_annotations.json
diff --git a/PaddleCV/PaddleVideo/data/dataset/bmn/gen_infer_list.py b/PaddleCV/video/data/dataset/bmn/gen_infer_list.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/bmn/gen_infer_list.py
rename to PaddleCV/video/data/dataset/bmn/gen_infer_list.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/bmn/infer.list b/PaddleCV/video/data/dataset/bmn/infer.list
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/bmn/infer.list
rename to PaddleCV/video/data/dataset/bmn/infer.list
diff --git a/PaddleCV/PaddleVideo/data/dataset/ctcn/README.md b/PaddleCV/video/data/dataset/ctcn/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/ctcn/README.md
rename to PaddleCV/video/data/dataset/ctcn/README.md
diff --git a/PaddleCV/PaddleVideo/data/dataset/ets/README.md b/PaddleCV/video/data/dataset/ets/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/ets/README.md
rename to PaddleCV/video/data/dataset/ets/README.md
diff --git a/PaddleCV/PaddleVideo/data/dataset/ets/generate_data.py b/PaddleCV/video/data/dataset/ets/generate_data.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/ets/generate_data.py
rename to PaddleCV/video/data/dataset/ets/generate_data.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/ets/generate_infer_data.py b/PaddleCV/video/data/dataset/ets/generate_infer_data.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/ets/generate_infer_data.py
rename to PaddleCV/video/data/dataset/ets/generate_infer_data.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/ets/generate_train_pickle.py b/PaddleCV/video/data/dataset/ets/generate_train_pickle.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/ets/generate_train_pickle.py
rename to PaddleCV/video/data/dataset/ets/generate_train_pickle.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/kinetics/generate_label.py b/PaddleCV/video/data/dataset/kinetics/generate_label.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/kinetics/generate_label.py
rename to PaddleCV/video/data/dataset/kinetics/generate_label.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/kinetics/video2pkl.py b/PaddleCV/video/data/dataset/kinetics/video2pkl.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/kinetics/video2pkl.py
rename to PaddleCV/video/data/dataset/kinetics/video2pkl.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/kinetics_labels.json b/PaddleCV/video/data/dataset/kinetics_labels.json
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/kinetics_labels.json
rename to PaddleCV/video/data/dataset/kinetics_labels.json
diff --git a/PaddleCV/PaddleVideo/data/dataset/nonlocal/README.md b/PaddleCV/video/data/dataset/nonlocal/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/nonlocal/README.md
rename to PaddleCV/video/data/dataset/nonlocal/README.md
diff --git a/PaddleCV/PaddleVideo/data/dataset/nonlocal/change_filelist.py b/PaddleCV/video/data/dataset/nonlocal/change_filelist.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/nonlocal/change_filelist.py
rename to PaddleCV/video/data/dataset/nonlocal/change_filelist.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/nonlocal/generate_filelist.py b/PaddleCV/video/data/dataset/nonlocal/generate_filelist.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/nonlocal/generate_filelist.py
rename to PaddleCV/video/data/dataset/nonlocal/generate_filelist.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/tall/README.md b/PaddleCV/video/data/dataset/tall/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/tall/README.md
rename to PaddleCV/video/data/dataset/tall/README.md
diff --git a/PaddleCV/PaddleVideo/data/dataset/tall/gen_infer.py b/PaddleCV/video/data/dataset/tall/gen_infer.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/tall/gen_infer.py
rename to PaddleCV/video/data/dataset/tall/gen_infer.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/youtube8m/tf2pkl.py b/PaddleCV/video/data/dataset/youtube8m/tf2pkl.py
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/youtube8m/tf2pkl.py
rename to PaddleCV/video/data/dataset/youtube8m/tf2pkl.py
diff --git a/PaddleCV/PaddleVideo/data/dataset/youtube8m/yt8m_pca/eigenvals.npy b/PaddleCV/video/data/dataset/youtube8m/yt8m_pca/eigenvals.npy
similarity index 100%
rename from PaddleCV/PaddleVideo/data/dataset/youtube8m/yt8m_pca/eigenvals.npy
rename to PaddleCV/video/data/dataset/youtube8m/yt8m_pca/eigenvals.npy
diff --git a/PaddleCV/PaddleVideo/eval.py b/PaddleCV/video/eval.py
similarity index 100%
rename from PaddleCV/PaddleVideo/eval.py
rename to PaddleCV/video/eval.py
diff --git a/PaddleCV/PaddleVideo/images/BMN.png b/PaddleCV/video/images/BMN.png
similarity index 100%
rename from PaddleCV/PaddleVideo/images/BMN.png
rename to PaddleCV/video/images/BMN.png
diff --git a/PaddleCV/PaddleVideo/images/BSN.png b/PaddleCV/video/images/BSN.png
similarity index 100%
rename from PaddleCV/PaddleVideo/images/BSN.png
rename to PaddleCV/video/images/BSN.png
diff --git a/PaddleCV/PaddleVideo/images/StNet.png b/PaddleCV/video/images/StNet.png
similarity index 100%
rename from PaddleCV/PaddleVideo/images/StNet.png
rename to PaddleCV/video/images/StNet.png
diff --git a/PaddleCV/PaddleVideo/images/attention_cluster.png b/PaddleCV/video/images/attention_cluster.png
similarity index 100%
rename from PaddleCV/PaddleVideo/images/attention_cluster.png
rename to PaddleCV/video/images/attention_cluster.png
diff --git a/PaddleCV/PaddleVideo/images/nonlocal_instantiation.png b/PaddleCV/video/images/nonlocal_instantiation.png
similarity index 100%
rename from PaddleCV/PaddleVideo/images/nonlocal_instantiation.png
rename to PaddleCV/video/images/nonlocal_instantiation.png
diff --git a/PaddleCV/PaddleVideo/images/temporal_shift.png b/PaddleCV/video/images/temporal_shift.png
similarity index 100%
rename from PaddleCV/PaddleVideo/images/temporal_shift.png
rename to PaddleCV/video/images/temporal_shift.png
diff --git a/PaddleCV/PaddleVideo/inference_model.py b/PaddleCV/video/inference_model.py
similarity index 100%
rename from PaddleCV/PaddleVideo/inference_model.py
rename to PaddleCV/video/inference_model.py
diff --git a/PaddleCV/PaddleVideo/metrics/__init__.py b/PaddleCV/video/metrics/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/__init__.py
rename to PaddleCV/video/metrics/__init__.py
diff --git a/PaddleCV/PaddleVideo/metrics/bmn_metrics/README.md b/PaddleCV/video/metrics/bmn_metrics/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/bmn_metrics/README.md
rename to PaddleCV/video/metrics/bmn_metrics/README.md
diff --git a/PaddleCV/PaddleVideo/metrics/bmn_metrics/__init__.py b/PaddleCV/video/metrics/bmn_metrics/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/bmn_metrics/__init__.py
rename to PaddleCV/video/metrics/bmn_metrics/__init__.py
diff --git a/PaddleCV/PaddleVideo/metrics/bmn_metrics/bmn_proposal_metrics.py b/PaddleCV/video/metrics/bmn_metrics/bmn_proposal_metrics.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/bmn_metrics/bmn_proposal_metrics.py
rename to PaddleCV/video/metrics/bmn_metrics/bmn_proposal_metrics.py
diff --git a/PaddleCV/PaddleVideo/metrics/bmn_metrics/eval_anet_prop.py b/PaddleCV/video/metrics/bmn_metrics/eval_anet_prop.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/bmn_metrics/eval_anet_prop.py
rename to PaddleCV/video/metrics/bmn_metrics/eval_anet_prop.py
diff --git a/PaddleCV/PaddleVideo/metrics/bsn_metrics/__init__.py b/PaddleCV/video/metrics/bsn_metrics/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/bsn_metrics/__init__.py
rename to PaddleCV/video/metrics/bsn_metrics/__init__.py
diff --git a/PaddleCV/PaddleVideo/metrics/bsn_metrics/bsn_pem_metrics.py b/PaddleCV/video/metrics/bsn_metrics/bsn_pem_metrics.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/bsn_metrics/bsn_pem_metrics.py
rename to PaddleCV/video/metrics/bsn_metrics/bsn_pem_metrics.py
diff --git a/PaddleCV/PaddleVideo/metrics/bsn_metrics/bsn_tem_metrics.py b/PaddleCV/video/metrics/bsn_metrics/bsn_tem_metrics.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/bsn_metrics/bsn_tem_metrics.py
rename to PaddleCV/video/metrics/bsn_metrics/bsn_tem_metrics.py
diff --git a/PaddleCV/PaddleVideo/metrics/detections/README.md b/PaddleCV/video/metrics/detections/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/detections/README.md
rename to PaddleCV/video/metrics/detections/README.md
diff --git a/PaddleCV/PaddleVideo/metrics/detections/__init__.py b/PaddleCV/video/metrics/detections/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/detections/__init__.py
rename to PaddleCV/video/metrics/detections/__init__.py
diff --git a/PaddleCV/PaddleVideo/metrics/detections/detection_metrics.py b/PaddleCV/video/metrics/detections/detection_metrics.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/detections/detection_metrics.py
rename to PaddleCV/video/metrics/detections/detection_metrics.py
diff --git a/PaddleCV/PaddleVideo/metrics/ets_metrics/README.md b/PaddleCV/video/metrics/ets_metrics/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/ets_metrics/README.md
rename to PaddleCV/video/metrics/ets_metrics/README.md
diff --git a/PaddleCV/PaddleVideo/metrics/ets_metrics/__init__.py b/PaddleCV/video/metrics/ets_metrics/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/ets_metrics/__init__.py
rename to PaddleCV/video/metrics/ets_metrics/__init__.py
diff --git a/PaddleCV/PaddleVideo/metrics/ets_metrics/ets_metrics.py b/PaddleCV/video/metrics/ets_metrics/ets_metrics.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/ets_metrics/ets_metrics.py
rename to PaddleCV/video/metrics/ets_metrics/ets_metrics.py
diff --git a/PaddleCV/PaddleVideo/metrics/kinetics/__init__.py b/PaddleCV/video/metrics/kinetics/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/kinetics/__init__.py
rename to PaddleCV/video/metrics/kinetics/__init__.py
diff --git a/PaddleCV/PaddleVideo/metrics/kinetics/accuracy_metrics.py b/PaddleCV/video/metrics/kinetics/accuracy_metrics.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/kinetics/accuracy_metrics.py
rename to PaddleCV/video/metrics/kinetics/accuracy_metrics.py
diff --git a/PaddleCV/PaddleVideo/metrics/metrics_util.py b/PaddleCV/video/metrics/metrics_util.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/metrics_util.py
rename to PaddleCV/video/metrics/metrics_util.py
diff --git a/PaddleCV/PaddleVideo/metrics/multicrop_test/__init__.py b/PaddleCV/video/metrics/multicrop_test/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/multicrop_test/__init__.py
rename to PaddleCV/video/metrics/multicrop_test/__init__.py
diff --git a/PaddleCV/PaddleVideo/metrics/multicrop_test/multicrop_test_metrics.py b/PaddleCV/video/metrics/multicrop_test/multicrop_test_metrics.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/multicrop_test/multicrop_test_metrics.py
rename to PaddleCV/video/metrics/multicrop_test/multicrop_test_metrics.py
diff --git a/PaddleCV/PaddleVideo/metrics/tall_metrics/__init__.py b/PaddleCV/video/metrics/tall_metrics/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/tall_metrics/__init__.py
rename to PaddleCV/video/metrics/tall_metrics/__init__.py
diff --git a/PaddleCV/PaddleVideo/metrics/tall_metrics/tall_metrics.py b/PaddleCV/video/metrics/tall_metrics/tall_metrics.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/tall_metrics/tall_metrics.py
rename to PaddleCV/video/metrics/tall_metrics/tall_metrics.py
diff --git a/PaddleCV/PaddleVideo/metrics/youtube8m/__init__.py b/PaddleCV/video/metrics/youtube8m/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/youtube8m/__init__.py
rename to PaddleCV/video/metrics/youtube8m/__init__.py
diff --git a/PaddleCV/PaddleVideo/metrics/youtube8m/average_precision_calculator.py b/PaddleCV/video/metrics/youtube8m/average_precision_calculator.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/youtube8m/average_precision_calculator.py
rename to PaddleCV/video/metrics/youtube8m/average_precision_calculator.py
diff --git a/PaddleCV/PaddleVideo/metrics/youtube8m/eval_util.py b/PaddleCV/video/metrics/youtube8m/eval_util.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/youtube8m/eval_util.py
rename to PaddleCV/video/metrics/youtube8m/eval_util.py
diff --git a/PaddleCV/PaddleVideo/metrics/youtube8m/mean_average_precision_calculator.py b/PaddleCV/video/metrics/youtube8m/mean_average_precision_calculator.py
similarity index 100%
rename from PaddleCV/PaddleVideo/metrics/youtube8m/mean_average_precision_calculator.py
rename to PaddleCV/video/metrics/youtube8m/mean_average_precision_calculator.py
diff --git a/PaddleCV/PaddleVideo/models/__init__.py b/PaddleCV/video/models/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/__init__.py
rename to PaddleCV/video/models/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/attention_cluster/README.md b/PaddleCV/video/models/attention_cluster/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/attention_cluster/README.md
rename to PaddleCV/video/models/attention_cluster/README.md
diff --git a/PaddleCV/PaddleVideo/models/attention_cluster/__init__.py b/PaddleCV/video/models/attention_cluster/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/attention_cluster/__init__.py
rename to PaddleCV/video/models/attention_cluster/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/attention_cluster/attention_cluster.py b/PaddleCV/video/models/attention_cluster/attention_cluster.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/attention_cluster/attention_cluster.py
rename to PaddleCV/video/models/attention_cluster/attention_cluster.py
diff --git a/PaddleCV/PaddleVideo/models/attention_cluster/logistic_model.py b/PaddleCV/video/models/attention_cluster/logistic_model.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/attention_cluster/logistic_model.py
rename to PaddleCV/video/models/attention_cluster/logistic_model.py
diff --git a/PaddleCV/PaddleVideo/models/attention_cluster/shifting_attention.py b/PaddleCV/video/models/attention_cluster/shifting_attention.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/attention_cluster/shifting_attention.py
rename to PaddleCV/video/models/attention_cluster/shifting_attention.py
diff --git a/PaddleCV/PaddleVideo/models/attention_lstm/README.md b/PaddleCV/video/models/attention_lstm/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/attention_lstm/README.md
rename to PaddleCV/video/models/attention_lstm/README.md
diff --git a/PaddleCV/PaddleVideo/models/attention_lstm/__init__.py b/PaddleCV/video/models/attention_lstm/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/attention_lstm/__init__.py
rename to PaddleCV/video/models/attention_lstm/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/attention_lstm/attention_lstm.py b/PaddleCV/video/models/attention_lstm/attention_lstm.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/attention_lstm/attention_lstm.py
rename to PaddleCV/video/models/attention_lstm/attention_lstm.py
diff --git a/PaddleCV/PaddleVideo/models/attention_lstm/lstm_attention.py b/PaddleCV/video/models/attention_lstm/lstm_attention.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/attention_lstm/lstm_attention.py
rename to PaddleCV/video/models/attention_lstm/lstm_attention.py
diff --git a/PaddleCV/PaddleVideo/models/bmn/README.md b/PaddleCV/video/models/bmn/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bmn/README.md
rename to PaddleCV/video/models/bmn/README.md
diff --git a/PaddleCV/PaddleVideo/models/bmn/__init__.py b/PaddleCV/video/models/bmn/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bmn/__init__.py
rename to PaddleCV/video/models/bmn/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/bmn/bmn.py b/PaddleCV/video/models/bmn/bmn.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bmn/bmn.py
rename to PaddleCV/video/models/bmn/bmn.py
diff --git a/PaddleCV/PaddleVideo/models/bmn/bmn_net.py b/PaddleCV/video/models/bmn/bmn_net.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bmn/bmn_net.py
rename to PaddleCV/video/models/bmn/bmn_net.py
diff --git a/PaddleCV/PaddleVideo/models/bmn/bmn_utils.py b/PaddleCV/video/models/bmn/bmn_utils.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bmn/bmn_utils.py
rename to PaddleCV/video/models/bmn/bmn_utils.py
diff --git a/PaddleCV/PaddleVideo/models/bsn/README.md b/PaddleCV/video/models/bsn/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bsn/README.md
rename to PaddleCV/video/models/bsn/README.md
diff --git a/PaddleCV/PaddleVideo/models/bsn/__init__.py b/PaddleCV/video/models/bsn/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bsn/__init__.py
rename to PaddleCV/video/models/bsn/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/bsn/bsn.py b/PaddleCV/video/models/bsn/bsn.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bsn/bsn.py
rename to PaddleCV/video/models/bsn/bsn.py
diff --git a/PaddleCV/PaddleVideo/models/bsn/bsn_net.py b/PaddleCV/video/models/bsn/bsn_net.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bsn/bsn_net.py
rename to PaddleCV/video/models/bsn/bsn_net.py
diff --git a/PaddleCV/PaddleVideo/models/bsn/bsn_utils.py b/PaddleCV/video/models/bsn/bsn_utils.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/bsn/bsn_utils.py
rename to PaddleCV/video/models/bsn/bsn_utils.py
diff --git a/PaddleCV/PaddleVideo/models/ctcn/README.md b/PaddleCV/video/models/ctcn/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/ctcn/README.md
rename to PaddleCV/video/models/ctcn/README.md
diff --git a/PaddleCV/PaddleVideo/models/ctcn/__init__.py b/PaddleCV/video/models/ctcn/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/ctcn/__init__.py
rename to PaddleCV/video/models/ctcn/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/ctcn/ctcn.py b/PaddleCV/video/models/ctcn/ctcn.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/ctcn/ctcn.py
rename to PaddleCV/video/models/ctcn/ctcn.py
diff --git a/PaddleCV/PaddleVideo/models/ctcn/ctcn_utils.py b/PaddleCV/video/models/ctcn/ctcn_utils.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/ctcn/ctcn_utils.py
rename to PaddleCV/video/models/ctcn/ctcn_utils.py
diff --git a/PaddleCV/PaddleVideo/models/ctcn/fpn_ctcn.py b/PaddleCV/video/models/ctcn/fpn_ctcn.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/ctcn/fpn_ctcn.py
rename to PaddleCV/video/models/ctcn/fpn_ctcn.py
diff --git a/PaddleCV/PaddleVideo/models/ets/README.md b/PaddleCV/video/models/ets/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/ets/README.md
rename to PaddleCV/video/models/ets/README.md
diff --git a/PaddleCV/PaddleVideo/models/ets/__init__.py b/PaddleCV/video/models/ets/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/ets/__init__.py
rename to PaddleCV/video/models/ets/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/ets/ets.py b/PaddleCV/video/models/ets/ets.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/ets/ets.py
rename to PaddleCV/video/models/ets/ets.py
diff --git a/PaddleCV/PaddleVideo/models/ets/ets_net.py b/PaddleCV/video/models/ets/ets_net.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/ets/ets_net.py
rename to PaddleCV/video/models/ets/ets_net.py
diff --git a/PaddleCV/PaddleVideo/models/model.py b/PaddleCV/video/models/model.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/model.py
rename to PaddleCV/video/models/model.py
diff --git a/PaddleCV/PaddleVideo/models/nextvlad/README.md b/PaddleCV/video/models/nextvlad/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nextvlad/README.md
rename to PaddleCV/video/models/nextvlad/README.md
diff --git a/PaddleCV/PaddleVideo/models/nextvlad/__init__.py b/PaddleCV/video/models/nextvlad/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nextvlad/__init__.py
rename to PaddleCV/video/models/nextvlad/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/nextvlad/clf_model.py b/PaddleCV/video/models/nextvlad/clf_model.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nextvlad/clf_model.py
rename to PaddleCV/video/models/nextvlad/clf_model.py
diff --git a/PaddleCV/PaddleVideo/models/nextvlad/nextvlad.py b/PaddleCV/video/models/nextvlad/nextvlad.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nextvlad/nextvlad.py
rename to PaddleCV/video/models/nextvlad/nextvlad.py
diff --git a/PaddleCV/PaddleVideo/models/nextvlad/nextvlad_model.py b/PaddleCV/video/models/nextvlad/nextvlad_model.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nextvlad/nextvlad_model.py
rename to PaddleCV/video/models/nextvlad/nextvlad_model.py
diff --git a/PaddleCV/PaddleVideo/models/nonlocal_model/README.md b/PaddleCV/video/models/nonlocal_model/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nonlocal_model/README.md
rename to PaddleCV/video/models/nonlocal_model/README.md
diff --git a/PaddleCV/PaddleVideo/models/nonlocal_model/__init__.py b/PaddleCV/video/models/nonlocal_model/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nonlocal_model/__init__.py
rename to PaddleCV/video/models/nonlocal_model/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/nonlocal_model/nonlocal_helper.py b/PaddleCV/video/models/nonlocal_model/nonlocal_helper.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nonlocal_model/nonlocal_helper.py
rename to PaddleCV/video/models/nonlocal_model/nonlocal_helper.py
diff --git a/PaddleCV/PaddleVideo/models/nonlocal_model/nonlocal_model.py b/PaddleCV/video/models/nonlocal_model/nonlocal_model.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nonlocal_model/nonlocal_model.py
rename to PaddleCV/video/models/nonlocal_model/nonlocal_model.py
diff --git a/PaddleCV/PaddleVideo/models/nonlocal_model/nonlocal_utils.py b/PaddleCV/video/models/nonlocal_model/nonlocal_utils.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nonlocal_model/nonlocal_utils.py
rename to PaddleCV/video/models/nonlocal_model/nonlocal_utils.py
diff --git a/PaddleCV/PaddleVideo/models/nonlocal_model/resnet_helper.py b/PaddleCV/video/models/nonlocal_model/resnet_helper.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nonlocal_model/resnet_helper.py
rename to PaddleCV/video/models/nonlocal_model/resnet_helper.py
diff --git a/PaddleCV/PaddleVideo/models/nonlocal_model/resnet_video.py b/PaddleCV/video/models/nonlocal_model/resnet_video.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/nonlocal_model/resnet_video.py
rename to PaddleCV/video/models/nonlocal_model/resnet_video.py
diff --git a/PaddleCV/PaddleVideo/models/stnet/README.md b/PaddleCV/video/models/stnet/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/stnet/README.md
rename to PaddleCV/video/models/stnet/README.md
diff --git a/PaddleCV/PaddleVideo/models/stnet/__init__.py b/PaddleCV/video/models/stnet/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/stnet/__init__.py
rename to PaddleCV/video/models/stnet/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/stnet/stnet.py b/PaddleCV/video/models/stnet/stnet.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/stnet/stnet.py
rename to PaddleCV/video/models/stnet/stnet.py
diff --git a/PaddleCV/PaddleVideo/models/stnet/stnet_res_model.py b/PaddleCV/video/models/stnet/stnet_res_model.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/stnet/stnet_res_model.py
rename to PaddleCV/video/models/stnet/stnet_res_model.py
diff --git a/PaddleCV/PaddleVideo/models/tall/README.md b/PaddleCV/video/models/tall/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tall/README.md
rename to PaddleCV/video/models/tall/README.md
diff --git a/PaddleCV/PaddleVideo/models/tall/__init__.py b/PaddleCV/video/models/tall/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tall/__init__.py
rename to PaddleCV/video/models/tall/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/tall/tall.py b/PaddleCV/video/models/tall/tall.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tall/tall.py
rename to PaddleCV/video/models/tall/tall.py
diff --git a/PaddleCV/PaddleVideo/models/tall/tall_net.py b/PaddleCV/video/models/tall/tall_net.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tall/tall_net.py
rename to PaddleCV/video/models/tall/tall_net.py
diff --git a/PaddleCV/PaddleVideo/models/tsm/README.md b/PaddleCV/video/models/tsm/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tsm/README.md
rename to PaddleCV/video/models/tsm/README.md
diff --git a/PaddleCV/PaddleVideo/models/tsm/__init__.py b/PaddleCV/video/models/tsm/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tsm/__init__.py
rename to PaddleCV/video/models/tsm/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/tsm/tsm.py b/PaddleCV/video/models/tsm/tsm.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tsm/tsm.py
rename to PaddleCV/video/models/tsm/tsm.py
diff --git a/PaddleCV/PaddleVideo/models/tsm/tsm_res_model.py b/PaddleCV/video/models/tsm/tsm_res_model.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tsm/tsm_res_model.py
rename to PaddleCV/video/models/tsm/tsm_res_model.py
diff --git a/PaddleCV/PaddleVideo/models/tsn/README.md b/PaddleCV/video/models/tsn/README.md
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tsn/README.md
rename to PaddleCV/video/models/tsn/README.md
diff --git a/PaddleCV/PaddleVideo/models/tsn/__init__.py b/PaddleCV/video/models/tsn/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tsn/__init__.py
rename to PaddleCV/video/models/tsn/__init__.py
diff --git a/PaddleCV/PaddleVideo/models/tsn/tsn.py b/PaddleCV/video/models/tsn/tsn.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tsn/tsn.py
rename to PaddleCV/video/models/tsn/tsn.py
diff --git a/PaddleCV/PaddleVideo/models/tsn/tsn_res_model.py b/PaddleCV/video/models/tsn/tsn_res_model.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/tsn/tsn_res_model.py
rename to PaddleCV/video/models/tsn/tsn_res_model.py
diff --git a/PaddleCV/PaddleVideo/models/utils.py b/PaddleCV/video/models/utils.py
similarity index 100%
rename from PaddleCV/PaddleVideo/models/utils.py
rename to PaddleCV/video/models/utils.py
diff --git a/PaddleCV/PaddleVideo/predict.py b/PaddleCV/video/predict.py
similarity index 100%
rename from PaddleCV/PaddleVideo/predict.py
rename to PaddleCV/video/predict.py
diff --git a/PaddleCV/PaddleVideo/reader/__init__.py b/PaddleCV/video/reader/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/__init__.py
rename to PaddleCV/video/reader/__init__.py
diff --git a/PaddleCV/PaddleVideo/reader/bmn_reader.py b/PaddleCV/video/reader/bmn_reader.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/bmn_reader.py
rename to PaddleCV/video/reader/bmn_reader.py
diff --git a/PaddleCV/PaddleVideo/reader/bsn_reader.py b/PaddleCV/video/reader/bsn_reader.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/bsn_reader.py
rename to PaddleCV/video/reader/bsn_reader.py
diff --git a/PaddleCV/PaddleVideo/reader/ctcn_reader.py b/PaddleCV/video/reader/ctcn_reader.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/ctcn_reader.py
rename to PaddleCV/video/reader/ctcn_reader.py
diff --git a/PaddleCV/PaddleVideo/reader/ets_reader.py b/PaddleCV/video/reader/ets_reader.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/ets_reader.py
rename to PaddleCV/video/reader/ets_reader.py
diff --git a/PaddleCV/PaddleVideo/reader/feature_reader.py b/PaddleCV/video/reader/feature_reader.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/feature_reader.py
rename to PaddleCV/video/reader/feature_reader.py
diff --git a/PaddleCV/PaddleVideo/reader/kinetics_reader.py b/PaddleCV/video/reader/kinetics_reader.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/kinetics_reader.py
rename to PaddleCV/video/reader/kinetics_reader.py
diff --git a/PaddleCV/PaddleVideo/reader/nonlocal_reader.py b/PaddleCV/video/reader/nonlocal_reader.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/nonlocal_reader.py
rename to PaddleCV/video/reader/nonlocal_reader.py
diff --git a/PaddleCV/PaddleVideo/reader/reader_utils.py b/PaddleCV/video/reader/reader_utils.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/reader_utils.py
rename to PaddleCV/video/reader/reader_utils.py
diff --git a/PaddleCV/PaddleVideo/reader/tall_reader.py b/PaddleCV/video/reader/tall_reader.py
similarity index 100%
rename from PaddleCV/PaddleVideo/reader/tall_reader.py
rename to PaddleCV/video/reader/tall_reader.py
diff --git a/PaddleCV/PaddleVideo/run.sh b/PaddleCV/video/run.sh
similarity index 100%
rename from PaddleCV/PaddleVideo/run.sh
rename to PaddleCV/video/run.sh
diff --git a/PaddleCV/PaddleVideo/run_ce.sh b/PaddleCV/video/run_ce.sh
similarity index 100%
rename from PaddleCV/PaddleVideo/run_ce.sh
rename to PaddleCV/video/run_ce.sh
diff --git a/PaddleCV/PaddleVideo/train.py b/PaddleCV/video/train.py
similarity index 100%
rename from PaddleCV/PaddleVideo/train.py
rename to PaddleCV/video/train.py
diff --git a/PaddleCV/PaddleVideo/utils/__init__.py b/PaddleCV/video/utils/__init__.py
similarity index 100%
rename from PaddleCV/PaddleVideo/utils/__init__.py
rename to PaddleCV/video/utils/__init__.py
diff --git a/PaddleCV/PaddleVideo/utils/config_utils.py b/PaddleCV/video/utils/config_utils.py
similarity index 100%
rename from PaddleCV/PaddleVideo/utils/config_utils.py
rename to PaddleCV/video/utils/config_utils.py
diff --git a/PaddleCV/PaddleVideo/utils/train_utils.py b/PaddleCV/video/utils/train_utils.py
similarity index 100%
rename from PaddleCV/PaddleVideo/utils/train_utils.py
rename to PaddleCV/video/utils/train_utils.py
diff --git a/PaddleCV/PaddleVideo/utils/utility.py b/PaddleCV/video/utils/utility.py
similarity index 100%
rename from PaddleCV/PaddleVideo/utils/utility.py
rename to PaddleCV/video/utils/utility.py
diff --git a/PaddleSlim/.run_ce.sh b/PaddleSlim/.run_ce.sh
deleted file mode 100755
index 3e20348057d61996ef7044aad753a26ac9eecaf2..0000000000000000000000000000000000000000
--- a/PaddleSlim/.run_ce.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-# This file is only used for continuous evaluation.
-
-export FLAGS_eager_delete_tensor_gb=0.0
-export CUDA_VISIBLE_DEVICES=3
-
-if [ ! -d 'pretrain' ]; then
- ln -s ${PRETRAINED_MODEL_PATH} ./pretrain
-fi
-
-if [ ! -d 'data' ]; then
- ln -s ${ILSVRC2012_DATA_PATH} ./data
-fi
-
-if [ -d 'checkpoints' ]; then
- rm -rf checkpoints
-fi
-
-sed -i "s/epoch: 200/epoch: 1/g" configs/filter_pruning_uniform.yaml
-
-python compress.py \
- --model "MobileNet" \
- --pretrained_model ./pretrain/MobileNetV1_pretrained \
- --compress_config ./configs/filter_pruning_uniform.yaml 2>&1 | tee run.log | python _ce.py
-
-
-
diff --git a/PaddleSlim/README.md b/PaddleSlim/README.md
deleted file mode 100644
index 1b3cc97115d3e2da369f93baadc2ced0143db192..0000000000000000000000000000000000000000
--- a/PaddleSlim/README.md
+++ /dev/null
@@ -1,217 +0,0 @@
-# PaddleSlim新版本已经发布,项目已被迁移到: https://github.com/PaddlePaddle/PaddleSlim
-
-
-
-
-
-
-
----
-# PaddleSlim模型压缩工具库
-
-PaddleSlim是PaddlePaddle框架的一个子模块,主要用于压缩图像领域模型。在PaddleSlim中,不仅实现了目前主流的网络剪枝、量化、蒸馏三种压缩策略,还实现了超参数搜索和小模型网络结构搜索功能。在后续版本中,会添加更多的压缩策略,以及完善对NLP领域模型的支持。
-
-## 目录
-- [特色](#特色)
-- [架构介绍](#架构介绍)
-- [功能列表](#功能列表)
-- [实验结果与ModelZoo](#简要实验结果)
-- [模型导出格式](#模型导出格式)
-
-## 主要特点
-
-Paddle-Slim工具库有以下特点:
-
-### 接口简单
-
-- 以配置文件方式集中管理可配参数,方便实验管理
-- 在普通模型训练脚本上,添加极少代码即可完成模型压缩
-
-详见:[使用示例](docs/demo.md)
-
-### 效果好
-
-- 对于冗余信息较少的MobileNetV1模型,模型通道剪裁策略依然可缩减模型大小,并保持尽量少的精度损失。
-- 蒸馏压缩策略可明显提升原始模型的精度。
-- 量化训练与蒸馏的组合使用,可同时做到缩减模型大小和提升模型精度。
-
-详见:[效果数据与ModelZoo](docs/model_zoo.md)
-
-### 功能更强更灵活
-
-- 模型剪裁压缩过程自动化
-- 模型剪裁压缩策略支持更多网络结构
-- 蒸馏支持多种方式,用户可自定义组合loss
-- 支持快速配置多种压缩策略组合使用
-
-详见:[使用说明](docs/usage.md)
-
-## 架构介绍
-
-这里简要介绍模型压缩工具实现的整体原理,便于理解使用流程。
-**图 1**为模型压缩工具的架构图,从上到下为API依赖关系。蒸馏模块、量化模块和剪裁模块都间接依赖底层的paddle框架。目前,模型压缩工具作为了PaddlePaddle框架的一部分,所以已经安装普通版本paddle的用户需要重新下载安装支持模型压缩功能的paddle,才能使用压缩功能。
-
-
-
-图 1
-
-
-如**图 1**所示,最上层的黄色模块为用户接口,在Python脚本中调用模型压缩功能时,只需要构造一个Compressor对象即可,在[使用文档](docs/usage.md)中会有详细说明。
-
-我们将每个压缩算法称为压缩策略,在迭代训练模型的过程中调用用户注册的压缩策略完成模型压缩,如**图2**所示。其中,模型压缩工具封装好了模型训练逻辑,用户只需要提供训练模型需要的网络结构、数据、优化策略(optimizer)等,在[使用文档](docs/usage.md)会对此详细介绍。
-
-
-
-图 2
-
-
-## 功能列表
-
-
-### 模型剪裁
-
-- 支持通道均匀模型剪裁(uniform pruning)、基于敏感度的模型剪裁、基于进化算法的自动模型剪裁三种方式
-- 支持VGG、ResNet、MobileNet等各种类型的网络
-- 支持用户自定义剪裁范围
-
-### 量化训练
-
-- 支持动态和静态两种量化训练方式
- - 动态策略: 在推理过程中,动态统计激活的量化参数。
- - 静态策略: 在推理过程中,对不同的输入,采用相同的从训练数据中统计得到的量化参数。
-- 支持对权重全局量化和Channel-Wise量化
-- 支持以兼容Paddle Mobile的格式保存模型
-
-### 蒸馏
-
-- 支持在teacher网络和student网络任意层添加组合loss
- - 支持FSP loss
- - 支持L2 loss
- - 支持softmax with cross-entropy loss
-
-### 轻量神经网络结构自动搜索(Light-NAS)
-
-- 支持基于进化算法的轻量神经网络结构自动搜索(Light-NAS)
- - 支持分布式搜索
- - 支持 FLOPS / 硬件延时约束
- - 支持多平台模型延时评估
-
-### 其它功能
-
-- 支持配置文件管理压缩任务超参数
-- 支持多种压缩策略组合使用
-- 蒸馏和模型剪裁压缩过程支持checkpoints功能
-
-## 简要实验结果
-
-本节列出了PaddleSlim模型压缩工具库的一些实验结果,更多实验数据和预训练模型的下载,请参考:[详细实验结果与ModelZoo](docs/model_zoo.md)
-
-### 量化训练
-
-评估实验所使用数据集为ImageNet 1000类数据, 量化训练前后模型top-5/top-1准确率对比如下:
-
-| Model | FP32| int8(X:abs_max, W:abs_max) | int8, (X:moving_average_abs_max, W:abs_max) |int8, (X:abs_max, W:channel_wise_abs_max) |
-|:---|:---:|:---:|:---:|:---:|
-|MobileNetV1|89.54%/70.91%|89.64%/71.01%|89.58%/70.86%|89.75%/71.13%|
-|ResNet50|92.80%/76.35%|93.12%/76.77%|93.07%/76.65%|93.15%/76.80%|
-
-量化训练前后,模型大小的变化对比如下:
-
-| Model | FP32 | int8(A:abs_max, W:abs_max) | int8, (A:moving_average_abs_max, W:abs_max) | int8, (A:abs_max, W:channel_wise_abs_max) |
-| :--- | :---: | :---: | :---: | :---: |
-| MobileNetV1 | 17M | 4.8M(-71.76%) | 4.9M(-71.18%) | 4.9M(-71.18%) |
-| ResNet50 | 99M | 26M(-73.74%) | 27M(-72.73%) | 27M(-72.73%) |
-
-注:abs_max为动态量化,moving_average_abs_max为静态量化, channel_wise_abs_max是对卷积权重进行分channel量化。
-
-### 模型通道剪裁
-
-数据:ImageNet 1000类
-模型:MobileNetV1
-原始模型大小:17M
-原始精度(top5/top1): 89.54% / 70.91%
-
-#### 模型通道均匀剪裁
-
-| FLOPS |model size| 精度损失(top5/top1)|精度(top5/top1) |
-|---|---|---|---|
-| -50%|-47.0%(9.0M)|-0.41% / -1.08%|88.92% / 69.66%|
-| -60%|-55.9%(7.5M)|-1.34% / -2.67%|88.22% / 68.24%|
-| -70%|-65.3%(5.9M)|-2.55% / -4.34%|86.99% / 66.57%|
-
-#### 基于敏感度迭代剪裁
-
-| FLOPS |精度(top5/top1)|
-|---|---|
-| -0% |89.54% / 70.91% |
-| -20% |90.08% / 71.48% |
-| -36% |89.62% / 70.83%|
-| -50% |88.77% / 69.31%|
-
-### 蒸馏
-
-数据:ImageNet 1000类
-模型:MobileNetV1
-
-|- |精度(top5/top1) |收益(top5/top1)|
-|---|---|---|
-| 单独训| 89.54% / 70.91%| - |
-| ResNet50蒸馏训| 90.92% / 71.97%| +1.28% / +1.06%|
-
-### 组合实验
-
-数据:ImageNet 1000类
-模型:MobileNetV1
-
-|压缩策略 |精度(top5/top1) |模型大小|
-|---|---|---|
-| Baseline|89.54% / 70.91%|17.0M|
-| ResNet50蒸馏|90.92% / 71.97%|17.0M|
-| ResNet50蒸馏训练 + 量化|90.94% / 72.01%|4.8M|
-| 剪裁-50% FLOPS|89.13% / 69.83%|9.0M|
-| 剪裁-50% FLOPS + 量化|89.11% / 69.20%|2.3M|
-
-### 模型结构搜索实验
-
-数据:ImageNet 1000类
-
-| - | FLOPS | Top1/Top5 accuracy | GPU cost |
-|------------------|-------|--------------------|----------------------|
-| MobileNetV2 | 0% | 71.90% / 90.55% | - |
-| Light-NAS-model0 | -3% | 72.45% / 90.70% | 1.2K GPU hours(V100) |
-| Light-NAS-model1 | -17% | 71.84% / 90.45% | 1.2K GPU hours(V100) |
-
-基于硬件耗时的模型结构搜索实验:
-
-| - | Latency | Top1/Top5 accuracy | GPU cost |
-|---------------|---------|--------------------|---------------------|
-| MobileNetV2 | 0% | 71.90% / 90.55% | - |
-| RK3288 开发板 | -23% | 71.97% / 90.35% | 1.2K GPU hours(V100) |
-| Android 手机 | -20% | 72.06% / 90.36% | 1.2K GPU hours(V100) |
-| iPhone 手机 | -17% | 72.22% / 90.47% | 1.2K GPU hours(V100) |
-
-
-## 模型导出格式
-
-模型压缩框架支持以下格式模型导出:
-
-- **Paddle Fluid模型格式:** Paddle Fluid模型格式,可通过[Paddle](https://github.com/PaddlePaddle/Paddle),[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite)框架加载使用。
-- **Paddle Mobile模型格式:** 仅在量化训练策略时使用,兼容[Paddle Mobile](https://github.com/PaddlePaddle/paddle-mobile)的模型格式(现Paddle Mobile已升级为[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite))。
diff --git a/PaddleSlim/__init__.py b/PaddleSlim/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/PaddleSlim/_ce.py b/PaddleSlim/_ce.py
deleted file mode 100644
index 3525b9ff231f2e93499616137cb986b6886ce1b6..0000000000000000000000000000000000000000
--- a/PaddleSlim/_ce.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-# -*- coding:utf-8 -*-
-
-import os
-import sys
-import re
-sys.path.append(os.environ['ceroot'])
-from kpi import AccKpi
-
-test_acc_top1_kpi = AccKpi(
- 'test_acc_top1', 0.02, 0, actived=True, desc='TOP1 ACC')
-test_acc_top5_kpi = AccKpi(
- 'test_acc_top5', 0.02, 0, actived=True, desc='TOP5 ACC')
-tracking_kpis = [test_acc_top1_kpi, test_acc_top5_kpi]
-
-
-def parse_log(log):
- '''
- parse log
- '''
- pattern = r"^.*Final eval result: \['acc_top1', 'acc_top5'\]=\[(?P0\.\d+)\s+(?P0\.\d+)\s*\]"
- prog = re.compile(pattern)
- for line in log.split('\n'):
- result = prog.match(line)
- if not result:
- continue
- for kpi_name, kpi_value in result.groupdict().iteritems():
- yield kpi_name, float(kpi_value)
-
-
-def log_to_ce(log):
- """
- log to ce
- """
- kpi_tracker = {}
- for kpi in tracking_kpis:
- kpi_tracker[kpi.name] = kpi
-
- for(kpi_name, kpi_value) in parse_log(log):
- kpi_tracker[kpi_name].add_record(kpi_value)
- kpi_tracker[kpi_name].persist()
-
-
-if __name__ == '__main__':
- log = sys.stdin.read()
- log_to_ce(log)
-
-
diff --git a/PaddleSlim/classification/__init__.py b/PaddleSlim/classification/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/PaddleSlim/classification/distillation/README.md b/PaddleSlim/classification/distillation/README.md
deleted file mode 100755
index 10af9b9cacc64a77ead34a92f68ff9bdfd7ab436..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/distillation/README.md
+++ /dev/null
@@ -1,183 +0,0 @@
->运行该示例前请安装Paddle1.6或更高版本
-
-# 分类模型知识蒸馏示例
-
-## 概述
-
-该示例使用PaddleSlim提供的[蒸馏策略]([https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/tutorial.md#3-%E8%92%B8%E9%A6%8F](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/tutorial.md#3-蒸馏))对分类模型进行知识蒸馏。
->本文默认使用ILSVRC2012数据集,数据集存放在`models/PaddleSlim/data/`路径下, 可以参考[数据准备](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#数据准备)在执行训练脚本run.sh前配置好您的数据集
-
-在阅读该示例前,建议您先了解以下内容:
-
-- [分类模型的常规训练方法](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification)
-- [PaddleSlim使用文档](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md)
-
-
-## 配置文件说明
-
-关于配置文件如何编写您可以参考:
-
-- [PaddleSlim配置文件编写说明](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md#122-%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6%E7%9A%84%E4%BD%BF%E7%94%A8)
-- [蒸馏策略配置文件编写说明](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md#23-%E8%92%B8%E9%A6%8F)
-
-这里以MobileNetV2模型为例,MobileNetV2的主要结构为Inverted residuals, 如图1所示:
-
-
-
-
-图1
-
-
-
-首先,为了对`student model`和`teacher model`有个总体的认识,从而进一步确认蒸馏的对象,我们通过以下命令分别观察两个网络变量(Variable)的名称和形状:
-
-```python
-# 观察student model的Variable
-for v in fluid.default_main_program().list_vars():
- print v.name, v.shape
-```
-```python
-# 观察teacher model的Variable
-for v in teacher_program.list_vars():
- print v.name, v.shape
-```
-
-经过对比可以发现,`student model`和`teacher model`预测的输出分别为:
-
-```bash
-# student model
-fc_0.tmp_0 (-1, 1000)
-# teacher model
-res50_fc_0.tmp_0 (-1, 1000)
-```
-
-所以,我们用`l2_distiller`对这两个特征图做蒸馏。在配置文件中进行如下配置:
-
-```yaml
-distillers:
- l2_distiller:
- class: 'L2Distiller'
- teacher_feature_map: 'res50_fc_0.tmp_1'
- student_feature_map: 'fc_0.tmp_1'
- distillation_loss_weight: 1
-strategies:
- distillation_strategy:
- class: 'DistillationStrategy'
- distillers: ['l2_distiller']
- start_epoch: 0
- end_epoch: 130
-```
-
-我们也可以根据上述操作为蒸馏策略选择其他loss,PaddleSlim支持的有`FSP_loss`, `L2_loss`和`softmax_with_cross_entropy_loss` 。
-
-
-## 训练
-
-根据[PaddleCV/image_classification/train.py](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/train.py)编写压缩脚本compress.py。
-在该脚本中定义了Compressor对象,用于执行压缩任务。
-
-可以通过命令`python compress.py`用默认参数执行压缩任务,通过`python compress.py --help`查看可配置参数,简述如下:
-
-- use_gpu: 是否使用gpu。如果选择使用GPU,请确保当前环境和Paddle版本支持GPU。默认为True。
-- batch_size: 蒸馏训练用的batch size。
-- total_images:使用数据集的训练集总图片数
-- class_dim:使用数据集的类别数。
-- image_shape:使用数据集的图片尺寸。
-- model: 要压缩的目标模型,该示例支持'MobileNetV1', 'MobileNetV2'和'ResNet34'。
-- pretrained_model: student预训练模型的路径,可以从[这里](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#%E5%B7%B2%E5%8F%91%E5%B8%83%E6%A8%A1%E5%9E%8B%E5%8F%8A%E5%85%B6%E6%80%A7%E8%83%BD)下载。
-- teacher_model: teacher模型,该示例支持'ResNet50'。
-- teacher_pretrained_model: teacher预训练模型的路径,可以从[这里](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#%E5%B7%B2%E5%8F%91%E5%B8%83%E6%A8%A1%E5%9E%8B%E5%8F%8A%E5%85%B6%E6%80%A7%E8%83%BD)下载。
-- config_file: 压缩策略的配置文件。
-
-您可以通过运行脚本`run.sh`运行改示例,请确保已正确下载[pretrained model](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#%E5%B7%B2%E5%8F%91%E5%B8%83%E6%A8%A1%E5%9E%8B%E5%8F%8A%E5%85%B6%E6%80%A7%E8%83%BD)。
-
-
-### 保存断点(checkpoint)
-
-如果在配置文件中设置了`checkpoint_path`, 则在压缩任务执行过程中会自动保存断点,当任务异常中断时,
-重启任务会自动从`checkpoint_path`路径下按数字顺序加载最新的checkpoint文件。如果不想让重启的任务从断点恢复,
-需要修改配置文件中的`checkpoint_path`,或者将`checkpoint_path`路径下文件清空。
-
->注意:配置文件中的信息不会保存在断点中,重启前对配置文件的修改将会生效。
-
-
-## 评估
-
-如果在配置文件中设置了`checkpoint_path`,则每个epoch会保存一个压缩后的用于评估的模型,
-该模型会保存在`${checkpoint_path}/${epoch_id}/eval_model/`路径下,包含`__model__`和`__params__`两个文件。
-其中,`__model__`用于保存模型结构信息,`__params__`用于保存参数(parameters)信息。
-
-如果不需要保存评估模型,可以在定义Compressor对象时,将`save_eval_model`选项设置为False(默认为True)。
-
-脚本PaddleSlim/classification/eval.py中为使用该模型在评估数据集上做评估的示例。
-
-## 预测
-
-如果在配置文件中设置了`checkpoint_path`,并且在定义Compressor对象时指定了`prune_infer_model`选项,则每个epoch都会
-保存一个`inference model`。该模型是通过删除eval_program中多余的operators而得到的。
-
-该模型会保存在`${checkpoint_path}/${epoch_id}/eval_model/`路径下,包含`__model__.infer`和`__params__`两个文件。
-其中,`__model__.infer`用于保存模型结构信息,`__params__`用于保存参数(parameters)信息。
-
-更多关于`prune_infer_model`选项的介绍,请参考:[Compressor介绍](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md#121-%E5%A6%82%E4%BD%95%E6%94%B9%E5%86%99%E6%99%AE%E9%80%9A%E8%AE%AD%E7%BB%83%E8%84%9A%E6%9C%AC)
-
-### python预测
-
-在脚本PaddleSlim/classification/infer.py中展示了如何使用fluid python API加载使用预测模型进行预测。
-
-### PaddleLite
-
-该示例中产出的预测(inference)模型可以直接用PaddleLite进行加载使用。
-关于PaddleLite如何使用,请参考:[PaddleLite使用文档](https://github.com/PaddlePaddle/Paddle-Lite/wiki#%E4%BD%BF%E7%94%A8)
-
-## 示例结果
-
-### MobileNetV1
-
-| FLOPS | top1_acc/top5_acc |
-| -------- | ----------------- |
-| baseline | 70.99%/89.68% |
-| 蒸馏后 | 72.30%/90.98% |
-
-#### 训练超参
-
-- batch size: 256
-- lr_strategy: piecewise_decay
-- step_epochs: 30, 60, 90
-- num_epochs: 120
-- l2_decay: 4e-5
-- init lr: 0.1
-
-### MobileNetV2
-
-| FLOPS | top1_acc/top5_acc |
-| -------- | ----------------- |
-| baseline | 72.15%/90.65% |
-| 蒸馏后 | 70.95%/90.40% |
-
-#### 训练超参
-
-- batch size: 256
-- lr_strategy: piecewise_decay
-- step_epochs: 30, 60, 90
-- num_epochs: 120
-- l2_decay: 4e-5
-- init lr: 0.1
-
-### ResNet34
-
-| FLOPS | top1_acc/top5_acc |
-| -------- | ----------------- |
-| baseline | 74.57%/92.14% |
-| 蒸馏后 | 74.48%/91.95% |
-
-#### 训练超参
-
-- batch size: 256
-- lr_strategy: piecewise_decay
-- step_epochs: 30, 60, 90
-- num_epochs: 120
-- l2_decay: 4e-5
-- init lr: 0.1
-
-## FAQ
diff --git a/PaddleSlim/classification/distillation/__init__.py b/PaddleSlim/classification/distillation/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/PaddleSlim/classification/distillation/compress.py b/PaddleSlim/classification/distillation/compress.py
deleted file mode 100644
index bf182fa22efad4b95861bed80484ca446050ebb5..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/distillation/compress.py
+++ /dev/null
@@ -1,168 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import os
-import time
-import sys
-import logging
-import paddle
-import argparse
-import functools
-import paddle.fluid as fluid
-sys.path.append("..")
-import imagenet_reader as reader
-import models
-sys.path.append("../../")
-from utility import add_arguments, print_arguments
-
-from paddle.fluid.contrib.slim import Compressor
-
-logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
-_logger = logging.getLogger(__name__)
-_logger.setLevel(logging.INFO)
-
-parser = argparse.ArgumentParser(description=__doc__)
-add_arg = functools.partial(add_arguments, argparser=parser)
-# yapf: disable
-add_arg('batch_size', int, 64*4, "Minibatch size.")
-add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
-add_arg('total_images', int, 1281167, "Training image number.")
-add_arg('class_dim', int, 1000, "Class number.")
-add_arg('image_shape', str, "3,224,224", "Input image size")
-add_arg('model', str, "MobileNet", "Set the network to use.")
-add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
-add_arg('teacher_model', str, None, "Set the teacher network to use.")
-add_arg('teacher_pretrained_model', str, None, "Whether to use pretrained model.")
-add_arg('compress_config', str, None, "The config file for compression with yaml format.")
-add_arg('enable_ce', bool, False, "If set, run the task with continuous evaluation logs.")
-
-# yapf: enable
-
-model_list = [m for m in dir(models) if "__" not in m]
-
-
-def compress(args):
- # add ce
- if args.enable_ce:
- SEED = 1
- fluid.default_main_program().random_seed = SEED
- fluid.default_startup_program().random_seed = SEED
-
- image_shape = [int(m) for m in args.image_shape.split(",")]
-
- assert args.model in model_list, "{} is not in lists: {}".format(args.model,
- model_list)
- image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
- # model definition
- model = models.__dict__[args.model]()
-
- if args.model == 'ResNet34':
- model.prefix_name = 'res34'
- out = model.net(input=image, class_dim=args.class_dim, fc_name='fc_0')
- else:
- out = model.net(input=image, class_dim=args.class_dim)
- cost = fluid.layers.cross_entropy(input=out, label=label)
- avg_cost = fluid.layers.mean(x=cost)
- acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
- acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
- #print("="*50+"student_model_params"+"="*50)
- #for v in fluid.default_main_program().list_vars():
- # print(v.name, v.shape)
-
- val_program = fluid.default_main_program().clone()
- boundaries = [
- args.total_images / args.batch_size * 30, args.total_images /
- args.batch_size * 60, args.total_images / args.batch_size * 90
- ]
- values = [0.1, 0.01, 0.001, 0.0001]
- opt = fluid.optimizer.Momentum(
- momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(
- boundaries=boundaries, values=values),
- regularization=fluid.regularizer.L2Decay(4e-5))
-
- place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
- exe = fluid.Executor(place)
- exe.run(fluid.default_startup_program())
-
- if args.pretrained_model:
-
- def if_exist(var):
- return os.path.exists(os.path.join(args.pretrained_model, var.name))
-
- fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
-
- val_reader = paddle.batch(reader.val(), batch_size=args.batch_size)
- val_feed_list = [('image', image.name), ('label', label.name)]
- val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', acc_top5.name)]
-
- train_reader = paddle.batch(
- reader.train(), batch_size=args.batch_size, drop_last=True)
- train_feed_list = [('image', image.name), ('label', label.name)]
- train_fetch_list = [('loss', avg_cost.name)]
-
- teacher_programs = []
- distiller_optimizer = None
-
- teacher_model = models.__dict__[args.teacher_model](prefix_name='res50')
- # define teacher program
- teacher_program = fluid.Program()
- startup_program = fluid.Program()
- with fluid.program_guard(teacher_program, startup_program):
- img = teacher_program.global_block()._clone_variable(
- image, force_persistable=False)
- predict = teacher_model.net(img,
- class_dim=args.class_dim,
- fc_name='fc_0')
- #print("="*50+"teacher_model_params"+"="*50)
- #for v in teacher_program.list_vars():
- # print(v.name, v.shape)
- #return
-
- exe.run(startup_program)
- assert args.teacher_pretrained_model and os.path.exists(
- args.teacher_pretrained_model
- ), "teacher_pretrained_model should be set when teacher_model is not None."
-
- def if_exist(var):
- return os.path.exists(
- os.path.join(args.teacher_pretrained_model, var.name))
-
- fluid.io.load_vars(
- exe,
- args.teacher_pretrained_model,
- main_program=teacher_program,
- predicate=if_exist)
-
- distiller_optimizer = opt
- teacher_programs.append(teacher_program.clone(for_test=True))
-
- com_pass = Compressor(
- place,
- fluid.global_scope(),
- fluid.default_main_program(),
- train_reader=train_reader,
- train_feed_list=train_feed_list,
- train_fetch_list=train_fetch_list,
- eval_program=val_program,
- eval_reader=val_reader,
- eval_feed_list=val_feed_list,
- eval_fetch_list=val_fetch_list,
- teacher_programs=teacher_programs,
- save_eval_model=True,
- prune_infer_model=[[image.name], [out.name]],
- train_optimizer=opt,
- distiller_optimizer=distiller_optimizer)
- com_pass.config(args.compress_config)
- com_pass.run()
-
-
-def main():
- args = parser.parse_args()
- print_arguments(args)
- compress(args)
-
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/classification/distillation/configs/mobilenetv1_resnet50_distillation.yaml b/PaddleSlim/classification/distillation/configs/mobilenetv1_resnet50_distillation.yaml
deleted file mode 100644
index 750cbae23928352fe0903f84327be0efab9b55ef..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/distillation/configs/mobilenetv1_resnet50_distillation.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-version: 1.0
-distillers:
- l2_distiller:
- class: 'L2Distiller'
- teacher_feature_map: 'res50_fc_0.tmp_0'
- student_feature_map: 'fc_0.tmp_0'
- distillation_loss_weight: 1
-strategies:
- distillation_strategy:
- class: 'DistillationStrategy'
- distillers: ['l2_distiller']
- start_epoch: 0
- end_epoch: 130
-compressor:
- epoch: 130
- checkpoint_path: './checkpoints/'
- strategies:
- - distillation_strategy
diff --git a/PaddleSlim/classification/distillation/configs/mobilenetv2_resnet50_distillation.yaml b/PaddleSlim/classification/distillation/configs/mobilenetv2_resnet50_distillation.yaml
deleted file mode 100644
index 079d9231b427a8a270cfc719db68e4f17d712507..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/distillation/configs/mobilenetv2_resnet50_distillation.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-version: 1.0
-distillers:
- l2_distiller:
- class: 'L2Distiller'
- teacher_feature_map: 'res50_fc_0.tmp_1'
- student_feature_map: 'fc_0.tmp_1'
- distillation_loss_weight: 1
-strategies:
- distillation_strategy:
- class: 'DistillationStrategy'
- distillers: ['l2_distiller']
- start_epoch: 0
- end_epoch: 130
-compressor:
- epoch: 130
- checkpoint_path: './checkpoints/'
- strategies:
- - distillation_strategy
diff --git a/PaddleSlim/classification/distillation/configs/resnet34_resnet50_distillation.yaml b/PaddleSlim/classification/distillation/configs/resnet34_resnet50_distillation.yaml
deleted file mode 100644
index e19dc0e9faaaa3b5c9277ae58cb0aa25bdb05ab3..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/distillation/configs/resnet34_resnet50_distillation.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-version: 1.0
-distillers:
- l2_distiller:
- class: 'L2Distiller'
- teacher_feature_map: 'res50_fc_0.tmp_0'
- student_feature_map: 'res34_fc_0.tmp_0'
- distillation_loss_weight: 1
-strategies:
- distillation_strategy:
- class: 'DistillationStrategy'
- distillers: ['l2_distiller']
- start_epoch: 0
- end_epoch: 130
-compressor:
- epoch: 130
- checkpoint_path: './checkpoints/'
- strategies:
- - distillation_strategy
diff --git a/PaddleSlim/classification/distillation/images/mobilenetv2.jpg b/PaddleSlim/classification/distillation/images/mobilenetv2.jpg
deleted file mode 100644
index c0dd3824dc77f8d3c87d719c13ceca16a3472e3c..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/classification/distillation/images/mobilenetv2.jpg and /dev/null differ
diff --git a/PaddleSlim/classification/distillation/run.sh b/PaddleSlim/classification/distillation/run.sh
deleted file mode 100644
index c18e6eeed028aa4f95735decf9901afde2e8af1a..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/distillation/run.sh
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env bash
-
-# download pretrain model
-root_url="http://paddle-imagenet-models-name.bj.bcebos.com"
-ResNet50="ResNet50_pretrained.tar"
-pretrain_dir='../pretrain'
-
-if [ ! -d ${pretrain_dir} ]; then
- mkdir ${pretrain_dir}
-fi
-
-cd ${pretrain_dir}
-
-if [ ! -f ${ResNet50} ]; then
- wget ${root_url}/${ResNet50}
- tar xf ${ResNet50}
-fi
-
-cd -
-
-# enable GC strategy
-export FLAGS_fast_eager_deletion_mode=1
-export FLAGS_eager_delete_tensor_gb=0.0
-
-# for distillation
-#-----------------
-export CUDA_VISIBLE_DEVICES=0,1,2,3
-
-# for mobilenet_v1 distillation
-cd ${pretrain_dir}/ResNet50_pretrained
-for files in $(ls res50_*)
- do mv $files ${files#*_}
-done
-for files in $(ls *)
- do mv $files "res50_"$files
-done
-cd -
-
-python -u compress.py \
---model "MobileNet" \
---teacher_model "ResNet50" \
---teacher_pretrained_model ../pretrain/ResNet50_pretrained \
---compress_config ./configs/mobilenetv1_resnet50_distillation.yaml \
-> mobilenet_v1.log 2>&1 &
-tailf mobilenet_v1.log
-
-## for mobilenet_v2 distillation
-#cd ${pretrain_dir}/ResNet50_pretrained
-#for files in $(ls res50_*)
-# do mv $files ${files#*_}
-#done
-#for files in $(ls *)
-# do mv $files "res50_"$files
-#done
-#cd -
-#
-#python -u compress.py \
-#--model "MobileNetV2" \
-#--teacher_model "ResNet50" \
-#--teacher_pretrained_model ../pretrain/ResNet50_pretrained \
-#--compress_config ./configs/mobilenetv2_resnet50_distillation.yaml\
-#> mobilenet_v2.log 2>&1 &
-#tailf mobilenet_v2.log
-
-## for resnet34 distillation
-#cd ${pretrain_dir}/ResNet50_pretrained
-#for files in $(ls res50_*)
-# do mv $files ${files#*_}
-#done
-#for files in $(ls *)
-# do mv $files "res50_"$files
-#done
-#cd -
-#
-#python -u compress.py \
-#--model "ResNet34" \
-#--teacher_model "ResNet50" \
-#--teacher_pretrained_model ../pretrain/ResNet50_pretrained \
-#--compress_config ./configs/resnet34_resnet50_distillation.yaml \
-#> resnet34.log 2>&1 &
-#tailf resnet34.log
diff --git a/PaddleSlim/classification/eval.py b/PaddleSlim/classification/eval.py
deleted file mode 100644
index 6cea460fb99156da42cf9f4718af2228b3e39a4e..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/eval.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
-#
-#Licensed under the Apache License, Version 2.0 (the "License");
-#you may not use this file except in compliance with the License.
-#You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-#Unless required by applicable law or agreed to in writing, software
-#distributed under the License is distributed on an "AS IS" BASIS,
-#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#See the License for the specific language governing permissions and
-#limitations under the License.
-
-import os
-import sys
-import numpy as np
-import argparse
-import functools
-
-import paddle
-import paddle.fluid as fluid
-import imagenet_reader as reader
-sys.path.append("../")
-from utility import add_arguments, print_arguments
-
-parser = argparse.ArgumentParser(description=__doc__)
-# yapf: disable
-add_arg = functools.partial(add_arguments, argparser=parser)
-add_arg('use_gpu', bool, False, "Whether to use GPU or not.")
-add_arg('model_path', str, "./pruning/checkpoints/resnet50/2/eval_model/", "Whether to use pretrained model.")
-add_arg('model_name', str, "__model__", "model filename for inference model")
-add_arg('params_name', str, "__params__", "params filename for inference model")
-# yapf: enable
-
-
-def eval(args):
- # parameters from arguments
-
- place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
- exe = fluid.Executor(place)
-
- val_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(
- args.model_path,
- exe,
- model_filename=args.model_name,
- params_filename=args.params_name)
- val_reader = paddle.batch(reader.val(), batch_size=128)
- feeder = fluid.DataFeeder(
- place=place, feed_list=feed_target_names, program=val_program)
-
- results = []
- for batch_id, data in enumerate(val_reader()):
-
- # top1_acc, top5_acc
- if len(feed_target_names) == 1:
- # eval "infer model", which input is image, output is classification probability
- image = [[d[0]] for d in data]
- label = [[d[1]] for d in data]
- feed_data = feeder.feed(image)
- pred = exe.run(val_program,
- feed=feed_data,
- fetch_list=fetch_targets)
- pred = np.array(pred[0])
- label = np.array(label)
- sort_array = pred.argsort(axis=1)
- top_1_pred = sort_array[:, -1:][:, ::-1]
- top_1 = np.mean(label == top_1_pred)
- top_5_pred = sort_array[:, -5:][:, ::-1]
- acc_num = 0
- for i in range(len(label)):
- if label[i][0] in top_5_pred[i]:
- acc_num += 1
- top_5 = float(acc_num) / len(label)
- results.append([top_1, top_5])
- else:
- # eval "eval model", which inputs are image and label, output is top1 and top5 accuracy
- result = exe.run(val_program,
- feed=feeder.feed(data),
- fetch_list=fetch_targets)
- result = [np.mean(r) for r in result]
- results.append(result)
- result = np.mean(np.array(results), axis=0)
- print("top1_acc/top5_acc= {}".format(result))
- sys.stdout.flush()
-
-
-def main():
- args = parser.parse_args()
- print_arguments(args)
- eval(args)
-
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/classification/imagenet_reader.py b/PaddleSlim/classification/imagenet_reader.py
deleted file mode 100644
index f1f9909646f2e5c21203fe3c070156eb901ff0ca..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/imagenet_reader.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import os
-import math
-import random
-import functools
-import numpy as np
-import paddle
-from PIL import Image, ImageEnhance
-
-random.seed(0)
-np.random.seed(0)
-
-DATA_DIM = 224
-
-THREAD = 16
-BUF_SIZE = 10240
-
-DATA_DIR = '../data/ILSVRC2012'
-DATA_DIR = os.path.join(os.path.split(os.path.realpath(__file__))[0], DATA_DIR)
-
-
-img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
-img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
-
-
-def resize_short(img, target_size):
- percent = float(target_size) / min(img.size[0], img.size[1])
- resized_width = int(round(img.size[0] * percent))
- resized_height = int(round(img.size[1] * percent))
- img = img.resize((resized_width, resized_height), Image.LANCZOS)
- return img
-
-
-def crop_image(img, target_size, center):
- width, height = img.size
- size = target_size
- if center == True:
- w_start = (width - size) / 2
- h_start = (height - size) / 2
- else:
- w_start = np.random.randint(0, width - size + 1)
- h_start = np.random.randint(0, height - size + 1)
- w_end = w_start + size
- h_end = h_start + size
- img = img.crop((w_start, h_start, w_end, h_end))
- return img
-
-
-def random_crop(img, size, scale=[0.08, 1.0], ratio=[3. / 4., 4. / 3.]):
- aspect_ratio = math.sqrt(np.random.uniform(*ratio))
- w = 1. * aspect_ratio
- h = 1. / aspect_ratio
-
- bound = min((float(img.size[0]) / img.size[1]) / (w**2),
- (float(img.size[1]) / img.size[0]) / (h**2))
- scale_max = min(scale[1], bound)
- scale_min = min(scale[0], bound)
-
- target_area = img.size[0] * img.size[1] * np.random.uniform(scale_min,
- scale_max)
- target_size = math.sqrt(target_area)
- w = int(target_size * w)
- h = int(target_size * h)
-
- i = np.random.randint(0, img.size[0] - w + 1)
- j = np.random.randint(0, img.size[1] - h + 1)
-
- img = img.crop((i, j, i + w, j + h))
- img = img.resize((size, size), Image.LANCZOS)
- return img
-
-
-def rotate_image(img):
- angle = np.random.randint(-10, 11)
- img = img.rotate(angle)
- return img
-
-
-def distort_color(img):
- def random_brightness(img, lower=0.5, upper=1.5):
- e = np.random.uniform(lower, upper)
- return ImageEnhance.Brightness(img).enhance(e)
-
- def random_contrast(img, lower=0.5, upper=1.5):
- e = np.random.uniform(lower, upper)
- return ImageEnhance.Contrast(img).enhance(e)
-
- def random_color(img, lower=0.5, upper=1.5):
- e = np.random.uniform(lower, upper)
- return ImageEnhance.Color(img).enhance(e)
-
- ops = [random_brightness, random_contrast, random_color]
- np.random.shuffle(ops)
-
- img = ops[0](img)
- img = ops[1](img)
- img = ops[2](img)
-
- return img
-
-
-def process_image(sample, mode, color_jitter, rotate):
- img_path = sample[0]
-
- img = Image.open(img_path)
- if mode == 'train':
- if rotate: img = rotate_image(img)
- img = random_crop(img, DATA_DIM)
- else:
- img = resize_short(img, target_size=256)
- img = crop_image(img, target_size=DATA_DIM, center=True)
- if mode == 'train':
- if color_jitter:
- img = distort_color(img)
- if np.random.randint(0, 2) == 1:
- img = img.transpose(Image.FLIP_LEFT_RIGHT)
-
- if img.mode != 'RGB':
- img = img.convert('RGB')
-
- img = np.array(img).astype('float32').transpose((2, 0, 1)) / 255
- img -= img_mean
- img /= img_std
-
- if mode == 'train' or mode == 'val':
- return img, sample[1]
- elif mode == 'test':
- return [img]
-
-
-def _reader_creator(file_list,
- mode,
- shuffle=False,
- color_jitter=False,
- rotate=False,
- data_dir=DATA_DIR,
- batch_size=1):
- def reader():
- try:
- with open(file_list) as flist:
- full_lines = [line.strip() for line in flist]
- if shuffle:
- np.random.shuffle(full_lines)
- if mode == 'train' and os.getenv('PADDLE_TRAINING_ROLE'):
- # distributed mode if the env var `PADDLE_TRAINING_ROLE` exits
- trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
- trainer_count = int(os.getenv("PADDLE_TRAINERS", "1"))
- per_node_lines = len(full_lines) // trainer_count
- lines = full_lines[trainer_id * per_node_lines:(
- trainer_id + 1) * per_node_lines]
- print(
- "read images from %d, length: %d, lines length: %d, total: %d"
- % (trainer_id * per_node_lines, per_node_lines,
- len(lines), len(full_lines)))
- else:
- lines = full_lines
-
- for line in lines:
- if mode == 'train' or mode == 'val':
- img_path, label = line.split()
- img_path = os.path.join(data_dir, img_path)
- yield img_path, int(label)
- elif mode == 'test':
- img_path = os.path.join(data_dir, line)
- yield [img_path]
- except Exception as e:
- print("Reader failed!\n{}".format(str(e)))
- os._exit(1)
-
- mapper = functools.partial(
- process_image, mode=mode, color_jitter=color_jitter, rotate=rotate)
-
- return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE)
-
-
-def train(data_dir=DATA_DIR):
- file_list = os.path.join(data_dir, 'train_list.txt')
- return _reader_creator(
- file_list,
- 'train',
- shuffle=True,
- color_jitter=False,
- rotate=False,
- data_dir=data_dir)
-
-
-def val(data_dir=DATA_DIR):
- file_list = os.path.join(data_dir, 'val_list.txt')
- return _reader_creator(file_list, 'val', shuffle=False, data_dir=data_dir)
-
-
-def test(data_dir=DATA_DIR):
- file_list = os.path.join(data_dir, 'test_list.txt')
- return _reader_creator(file_list, 'test', shuffle=False, data_dir=data_dir)
diff --git a/PaddleSlim/classification/infer.py b/PaddleSlim/classification/infer.py
deleted file mode 100644
index 9f87230153d70b0bf4bfda382f641f15f7915617..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/infer.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
-#
-#Licensed under the Apache License, Version 2.0 (the "License");
-#you may not use this file except in compliance with the License.
-#You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-#Unless required by applicable law or agreed to in writing, software
-#distributed under the License is distributed on an "AS IS" BASIS,
-#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#See the License for the specific language governing permissions and
-#limitations under the License.
-
-import os
-import sys
-import numpy as np
-import argparse
-import functools
-
-import paddle
-import paddle.fluid as fluid
-import imagenet_reader as reader
-sys.path.append("..")
-from utility import add_arguments, print_arguments
-
-parser = argparse.ArgumentParser(description=__doc__)
-# yapf: disable
-add_arg = functools.partial(add_arguments, argparser=parser)
-add_arg('use_gpu', bool, False, "Whether to use GPU or not.")
-add_arg('model_path', str, "./pruning/checkpoints/resnet50/2/eval_model/", "Whether to use pretrained model.")
-add_arg('model_name', str, "__model__.infer", "inference model filename")
-add_arg('params_name', str, "__params__", "inference model params filename")
-# yapf: enable
-
-def infer(args):
- # parameters from arguments
-
- place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
- exe = fluid.Executor(place)
-
- test_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(args.model_path,
- exe,
- model_filename=args.model_name,
- params_filename=args.params_name)
- test_reader = paddle.batch(reader.test(), batch_size=1)
- feeder = fluid.DataFeeder(place=place, feed_list=feed_target_names, program=test_program)
-
- results=[]
- for batch_id, data in enumerate(test_reader()):
-
- # top1_acc, top5_acc
- result = exe.run(test_program,
- feed=feeder.feed(data),
- fetch_list=fetch_targets)
- result = np.array(result[0])
- print(result.argsort(axis=1)[:,-1:][::-1])
- sys.stdout.flush()
-
-def main():
- args = parser.parse_args()
- print_arguments(args)
- infer(args)
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/classification/models/__init__.py b/PaddleSlim/classification/models/__init__.py
deleted file mode 100644
index e843697407850c049a5427d2b6533c417e59c228..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/models/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .mobilenet import MobileNet
-from .resnet import ResNet34, ResNet50
-from .mobilenet_v2 import MobileNetV2
-
-__all__ = ['MobileNet', 'ResNet34', 'ResNet50', 'MobileNetV2']
diff --git a/PaddleSlim/classification/models/mobilenet.py b/PaddleSlim/classification/models/mobilenet.py
deleted file mode 100644
index 921d6226ca2a65d5c9b57e27bf6607c7376c51f6..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/models/mobilenet.py
+++ /dev/null
@@ -1,197 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import paddle.fluid as fluid
-from paddle.fluid.initializer import MSRA
-from paddle.fluid.param_attr import ParamAttr
-
-__all__ = ['MobileNet']
-
-train_parameters = {
- "input_size": [3, 224, 224],
- "input_mean": [0.485, 0.456, 0.406],
- "input_std": [0.229, 0.224, 0.225],
- "learning_strategy": {
- "name": "piecewise_decay",
- "batch_size": 256,
- "epochs": [10, 16, 30],
- "steps": [0.1, 0.01, 0.001, 0.0001]
- }
-}
-
-
-class MobileNet():
- def __init__(self):
- self.params = train_parameters
-
- def net(self, input, class_dim=1000, scale=1.0):
- # conv1: 112x112
- input = self.conv_bn_layer(
- input,
- filter_size=3,
- channels=3,
- num_filters=int(32 * scale),
- stride=2,
- padding=1,
- name="conv1")
-
- # 56x56
- input = self.depthwise_separable(
- input,
- num_filters1=32,
- num_filters2=64,
- num_groups=32,
- stride=1,
- scale=scale,
- name="conv2_1")
-
- input = self.depthwise_separable(
- input,
- num_filters1=64,
- num_filters2=128,
- num_groups=64,
- stride=2,
- scale=scale,
- name="conv2_2")
-
- # 28x28
- input = self.depthwise_separable(
- input,
- num_filters1=128,
- num_filters2=128,
- num_groups=128,
- stride=1,
- scale=scale,
- name="conv3_1")
-
- input = self.depthwise_separable(
- input,
- num_filters1=128,
- num_filters2=256,
- num_groups=128,
- stride=2,
- scale=scale,
- name="conv3_2")
-
- # 14x14
- input = self.depthwise_separable(
- input,
- num_filters1=256,
- num_filters2=256,
- num_groups=256,
- stride=1,
- scale=scale,
- name="conv4_1")
-
- input = self.depthwise_separable(
- input,
- num_filters1=256,
- num_filters2=512,
- num_groups=256,
- stride=2,
- scale=scale,
- name="conv4_2")
-
- # 14x14
- for i in range(5):
- input = self.depthwise_separable(
- input,
- num_filters1=512,
- num_filters2=512,
- num_groups=512,
- stride=1,
- scale=scale,
- name="conv5" + "_" + str(i + 1))
- # 7x7
- input = self.depthwise_separable(
- input,
- num_filters1=512,
- num_filters2=1024,
- num_groups=512,
- stride=2,
- scale=scale,
- name="conv5_6")
-
- input = self.depthwise_separable(
- input,
- num_filters1=1024,
- num_filters2=1024,
- num_groups=1024,
- stride=1,
- scale=scale,
- name="conv6")
-
- input = fluid.layers.pool2d(
- input=input,
- pool_size=0,
- pool_stride=1,
- pool_type='avg',
- global_pooling=True)
-
- output = fluid.layers.fc(input=input,
- size=class_dim,
- act='softmax',
- param_attr=ParamAttr(
- initializer=MSRA(), name="fc7_weights"),
- bias_attr=ParamAttr(name="fc7_offset"))
-
- return output
-
- def conv_bn_layer(self,
- input,
- filter_size,
- num_filters,
- stride,
- padding,
- channels=None,
- num_groups=1,
- act='relu',
- use_cudnn=True,
- name=None):
- conv = fluid.layers.conv2d(
- input=input,
- num_filters=num_filters,
- filter_size=filter_size,
- stride=stride,
- padding=padding,
- groups=num_groups,
- act=None,
- use_cudnn=use_cudnn,
- param_attr=ParamAttr(
- initializer=MSRA(), name=name + "_weights"),
- bias_attr=False)
- bn_name = name + "_bn"
- return fluid.layers.batch_norm(
- input=conv,
- act=act,
- param_attr=ParamAttr(name=bn_name + "_scale"),
- bias_attr=ParamAttr(name=bn_name + "_offset"),
- moving_mean_name=bn_name + '_mean',
- moving_variance_name=bn_name + '_variance')
-
- def depthwise_separable(self,
- input,
- num_filters1,
- num_filters2,
- num_groups,
- stride,
- scale,
- name=None):
- depthwise_conv = self.conv_bn_layer(
- input=input,
- filter_size=3,
- num_filters=int(num_filters1 * scale),
- stride=stride,
- padding=1,
- num_groups=int(num_groups * scale),
- use_cudnn=False,
- name=name + "_dw")
-
- pointwise_conv = self.conv_bn_layer(
- input=depthwise_conv,
- filter_size=1,
- num_filters=int(num_filters2 * scale),
- stride=1,
- padding=0,
- name=name + "_sep")
- return pointwise_conv
diff --git a/PaddleSlim/classification/models/mobilenet_v2.py b/PaddleSlim/classification/models/mobilenet_v2.py
deleted file mode 100644
index 1855996ad20eb44ba656046db8a965b6da11784d..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/models/mobilenet_v2.py
+++ /dev/null
@@ -1,253 +0,0 @@
-#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
-#
-#Licensed under the Apache License, Version 2.0 (the "License");
-#you may not use this file except in compliance with the License.
-#You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-#Unless required by applicable law or agreed to in writing, software
-#distributed under the License is distributed on an "AS IS" BASIS,
-#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#See the License for the specific language governing permissions and
-#limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import paddle.fluid as fluid
-from paddle.fluid.initializer import MSRA
-from paddle.fluid.param_attr import ParamAttr
-
-__all__ = ['MobileNetV2', 'MobileNetV2_x0_25, ''MobileNetV2_x0_5', 'MobileNetV2_x1_0', 'MobileNetV2_x1_5', 'MobileNetV2_x2_0',
- 'MobileNetV2_scale']
-
-train_parameters = {
- "input_size": [3, 224, 224],
- "input_mean": [0.485, 0.456, 0.406],
- "input_std": [0.229, 0.224, 0.225],
- "learning_strategy": {
- "name": "piecewise_decay",
- "batch_size": 256,
- "epochs": [30, 60, 90],
- "steps": [0.1, 0.01, 0.001, 0.0001]
- }
-}
-
-
-class MobileNetV2():
- def __init__(self, scale=1.0, change_depth=False):
- self.params = train_parameters
- self.scale = scale
- self.change_depth=change_depth
-
-
- def net(self, input, class_dim=1000):
- scale = self.scale
- change_depth = self.change_depth
- #if change_depth is True, the new depth is 1.4 times as deep as before.
- bottleneck_params_list = [
- (1, 16, 1, 1),
- (6, 24, 2, 2),
- (6, 32, 3, 2),
- (6, 64, 4, 2),
- (6, 96, 3, 1),
- (6, 160, 3, 2),
- (6, 320, 1, 1),
- ] if change_depth == False else [
- (1, 16, 1, 1),
- (6, 24, 2, 2),
- (6, 32, 5, 2),
- (6, 64, 7, 2),
- (6, 96, 5, 1),
- (6, 160, 3, 2),
- (6, 320, 1, 1),
- ]
-
- #conv1
- input = self.conv_bn_layer(
- input,
- num_filters=int(32 * scale),
- filter_size=3,
- stride=2,
- padding=1,
- if_act=True,
- name='conv1_1')
-
- # bottleneck sequences
- i = 1
- in_c = int(32 * scale)
- for layer_setting in bottleneck_params_list:
- t, c, n, s = layer_setting
- i += 1
- input = self.invresi_blocks(
- input=input,
- in_c=in_c,
- t=t,
- c=int(c * scale),
- n=n,
- s=s,
- name='conv' + str(i))
- in_c = int(c * scale)
- #last_conv
- input = self.conv_bn_layer(
- input=input,
- num_filters=int(1280 * scale) if scale > 1.0 else 1280,
- filter_size=1,
- stride=1,
- padding=0,
- if_act=True,
- name='conv9')
-
- input = fluid.layers.pool2d(
- input=input,
- pool_size=7,
- pool_stride=1,
- pool_type='avg',
- global_pooling=True)
-
- output = fluid.layers.fc(input=input,
- size=class_dim,
- act='softmax',
- param_attr=ParamAttr(name='fc10_weights'),
- bias_attr=ParamAttr(name='fc10_offset'))
- return output
-
- def conv_bn_layer(self,
- input,
- filter_size,
- num_filters,
- stride,
- padding,
- channels=None,
- num_groups=1,
- if_act=True,
- name=None,
- use_cudnn=True):
- conv = fluid.layers.conv2d(
- input=input,
- num_filters=num_filters,
- filter_size=filter_size,
- stride=stride,
- padding=padding,
- groups=num_groups,
- act=None,
- use_cudnn=use_cudnn,
- param_attr=ParamAttr(name=name + '_weights'),
- bias_attr=False)
- bn_name = name + '_bn'
- bn = fluid.layers.batch_norm(
- input=conv,
- param_attr=ParamAttr(name=bn_name + "_scale"),
- bias_attr=ParamAttr(name=bn_name + "_offset"),
- moving_mean_name=bn_name + '_mean',
- moving_variance_name=bn_name + '_variance')
- if if_act:
- return fluid.layers.relu6(bn)
- else:
- return bn
-
- def shortcut(self, input, data_residual):
- return fluid.layers.elementwise_add(input, data_residual)
-
- def inverted_residual_unit(self,
- input,
- num_in_filter,
- num_filters,
- ifshortcut,
- stride,
- filter_size,
- padding,
- expansion_factor,
- name=None):
- num_expfilter = int(round(num_in_filter * expansion_factor))
-
- channel_expand = self.conv_bn_layer(
- input=input,
- num_filters=num_expfilter,
- filter_size=1,
- stride=1,
- padding=0,
- num_groups=1,
- if_act=True,
- name=name + '_expand')
-
- bottleneck_conv = self.conv_bn_layer(
- input=channel_expand,
- num_filters=num_expfilter,
- filter_size=filter_size,
- stride=stride,
- padding=padding,
- num_groups=num_expfilter,
- if_act=True,
- name=name + '_dwise',
- use_cudnn=False)
-
- linear_out = self.conv_bn_layer(
- input=bottleneck_conv,
- num_filters=num_filters,
- filter_size=1,
- stride=1,
- padding=0,
- num_groups=1,
- if_act=False,
- name=name + '_linear')
- if ifshortcut:
- out = self.shortcut(input=input, data_residual=linear_out)
- return out
- else:
- return linear_out
-
- def invresi_blocks(self, input, in_c, t, c, n, s, name=None):
- first_block = self.inverted_residual_unit(
- input=input,
- num_in_filter=in_c,
- num_filters=c,
- ifshortcut=False,
- stride=s,
- filter_size=3,
- padding=1,
- expansion_factor=t,
- name=name + '_1')
-
- last_residual_block = first_block
- last_c = c
-
- for i in range(1, n):
- last_residual_block = self.inverted_residual_unit(
- input=last_residual_block,
- num_in_filter=last_c,
- num_filters=c,
- ifshortcut=True,
- stride=1,
- filter_size=3,
- padding=1,
- expansion_factor=t,
- name=name + '_' + str(i + 1))
- return last_residual_block
-
-
-
-def MobileNetV2_x0_25():
- model = MobileNetV2(scale=0.25)
- return model
-
-def MobileNetV2_x0_5():
- model = MobileNetV2(scale=0.5)
- return model
-
-def MobileNetV2_x1_0():
- model = MobileNetV2(scale=1.0)
- return model
-
-def MobileNetV2_x1_5():
- model = MobileNetV2(scale=1.5)
- return model
-
-def MobileNetV2_x2_0():
- model = MobileNetV2(scale=2.0)
- return model
-
-def MobileNetV2_scale():
- model = MobileNetV2(scale=1.2, change_depth=True)
- return model
diff --git a/PaddleSlim/classification/models/resnet.py b/PaddleSlim/classification/models/resnet.py
deleted file mode 100644
index df13bf259853f763d0acc357243c8e76145d5758..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/models/resnet.py
+++ /dev/null
@@ -1,225 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import paddle
-import paddle.fluid as fluid
-import math
-from paddle.fluid.param_attr import ParamAttr
-
-__all__ = ["ResNet", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
-
-train_parameters = {
- "input_size": [3, 224, 224],
- "input_mean": [0.485, 0.456, 0.406],
- "input_std": [0.229, 0.224, 0.225],
- "learning_strategy": {
- "name": "piecewise_decay",
- "batch_size": 256,
- "epochs": [10, 16, 30],
- "steps": [0.1, 0.01, 0.001, 0.0001]
- }
-}
-
-
-class ResNet():
- def __init__(self, layers=50, prefix_name=''):
- self.params = train_parameters
- self.layers = layers
- self.prefix_name = prefix_name
-
- def net(self, input, class_dim=1000, conv1_name='conv1', fc_name=None):
- layers = self.layers
- prefix_name = self.prefix_name if self.prefix_name is '' else self.prefix_name + '_'
- supported_layers = [34, 50, 101, 152]
- assert layers in supported_layers, \
- "supported layers are {} but input layer is {}".format(supported_layers, layers)
-
- if layers == 34 or layers == 50:
- depth = [3, 4, 6, 3]
- elif layers == 101:
- depth = [3, 4, 23, 3]
- elif layers == 152:
- depth = [3, 8, 36, 3]
- num_filters = [64, 128, 256, 512]
-
- # TODO(wanghaoshuang@baidu.com):
- # fix name("conv1") conflict between student and teacher in distillation.
- conv = self.conv_bn_layer(
- input=input,
- num_filters=64,
- filter_size=7,
- stride=2,
- act='relu',
- name=prefix_name + conv1_name)
- conv = fluid.layers.pool2d(
- input=conv,
- pool_size=3,
- pool_stride=2,
- pool_padding=1,
- pool_type='max')
-
- if layers >= 50:
- for block in range(len(depth)):
- for i in range(depth[block]):
- if layers in [101, 152] and block == 2:
- if i == 0:
- conv_name = "res" + str(block + 2) + "a"
- else:
- conv_name = "res" + str(block + 2) + "b" + str(i)
- else:
- conv_name = "res" + str(block + 2) + chr(97 + i)
- conv_name = prefix_name + conv_name
- conv = self.bottleneck_block(
- input=conv,
- num_filters=num_filters[block],
- stride=2 if i == 0 and block != 0 else 1,
- name=conv_name)
-
- pool = fluid.layers.pool2d(
- input=conv, pool_size=7, pool_type='avg', global_pooling=True)
- stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
- fc_name = fc_name if fc_name is None else prefix_name + fc_name
- out = fluid.layers.fc(input=pool,
- size=class_dim,
- act='softmax',
- name=fc_name,
- param_attr=fluid.param_attr.ParamAttr(
- initializer=fluid.initializer.Uniform(-stdv,
- stdv)))
- else:
- for block in range(len(depth)):
- for i in range(depth[block]):
- conv_name = "res" + str(block + 2) + chr(97 + i)
- conv_name = prefix_name + conv_name
- conv = self.basic_block(
- input=conv,
- num_filters=num_filters[block],
- stride=2 if i == 0 and block != 0 else 1,
- is_first=block == i == 0,
- name=conv_name)
-
- pool = fluid.layers.pool2d(
- input=conv, pool_type='avg', global_pooling=True)
- stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
- fc_name = fc_name if fc_name is None else prefix_name + fc_name
- out = fluid.layers.fc(
- input=pool,
- size=class_dim,
- act='softmax',
- name=fc_name,
- param_attr=fluid.param_attr.ParamAttr(
- initializer=fluid.initializer.Uniform(-stdv, stdv)))
-
- return out
-
- def conv_bn_layer(self,
- input,
- num_filters,
- filter_size,
- stride=1,
- groups=1,
- act=None,
- name=None):
- conv = fluid.layers.conv2d(
- input=input,
- num_filters=num_filters,
- filter_size=filter_size,
- stride=stride,
- padding=(filter_size - 1) // 2,
- groups=groups,
- act=None,
- param_attr=ParamAttr(name=name + "_weights"),
- bias_attr=False,
- name=name + '.conv2d.output.1')
- if self.prefix_name == '':
- if name == "conv1":
- bn_name = "bn_" + name
- else:
- bn_name = "bn" + name[3:]
- else:
- if name.split("_")[1] == "conv1":
- bn_name = name.split("_", 1)[0] + "_bn_" + name.split("_", 1)[1]
- else:
- bn_name = name.split("_", 1)[0] + "_bn" + name.split("_",
- 1)[1][3:]
- return fluid.layers.batch_norm(
- input=conv,
- act=act,
- name=bn_name + '.output.1',
- param_attr=ParamAttr(name=bn_name + '_scale'),
- bias_attr=ParamAttr(bn_name + '_offset'),
- moving_mean_name=bn_name + '_mean',
- moving_variance_name=bn_name + '_variance', )
-
- def shortcut(self, input, ch_out, stride, is_first, name):
- ch_in = input.shape[1]
- if ch_in != ch_out or stride != 1 or is_first == True:
- return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
- else:
- return input
-
- def bottleneck_block(self, input, num_filters, stride, name):
- conv0 = self.conv_bn_layer(
- input=input,
- num_filters=num_filters,
- filter_size=1,
- act='relu',
- name=name + "_branch2a")
- conv1 = self.conv_bn_layer(
- input=conv0,
- num_filters=num_filters,
- filter_size=3,
- stride=stride,
- act='relu',
- name=name + "_branch2b")
- conv2 = self.conv_bn_layer(
- input=conv1,
- num_filters=num_filters * 4,
- filter_size=1,
- act=None,
- name=name + "_branch2c")
-
- short = self.shortcut(
- input, num_filters * 4, stride, is_first=False, name=name + "_branch1")
-
- return fluid.layers.elementwise_add(
- x=short, y=conv2, act='relu', name=name + ".add.output.5")
-
- def basic_block(self, input, num_filters, stride, is_first, name):
- conv0 = self.conv_bn_layer(
- input=input,
- num_filters=num_filters,
- filter_size=3,
- act='relu',
- stride=stride,
- name=name + "_branch2a")
- conv1 = self.conv_bn_layer(
- input=conv0,
- num_filters=num_filters,
- filter_size=3,
- act=None,
- name=name + "_branch2b")
- short = self.shortcut(
- input, num_filters, stride, is_first, name=name + "_branch1")
- return fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
-
-
-
-def ResNet34(prefix_name=''):
- model = ResNet(layers=34, prefix_name=prefix_name)
- return model
-
-
-def ResNet50(prefix_name=''):
- model = ResNet(layers=50, prefix_name=prefix_name)
- return model
-
-
-def ResNet101():
- model = ResNet(layers=101)
- return model
-
-
-def ResNet152():
- model = ResNet(layers=152)
- return model
diff --git a/PaddleSlim/classification/pruning/README.md b/PaddleSlim/classification/pruning/README.md
deleted file mode 100644
index a3141ba525e35ec6ba327785b6ecf61a50b3a5d7..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/pruning/README.md
+++ /dev/null
@@ -1,184 +0,0 @@
->运行该示例前请安装Paddle1.6或更高版本
-
-# 分类模型卷积通道剪裁示例
-
-## 概述
-
-该示例使用PaddleSlim提供的[卷积通道剪裁压缩策略](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/tutorial.md#2-%E5%8D%B7%E7%A7%AF%E6%A0%B8%E5%89%AA%E8%A3%81%E5%8E%9F%E7%90%86)对分类模型进行压缩。
->本文默认使用ILSVRC2012数据集,数据集存放在`models/PaddleSlim/data/`路径下, 可以参考[数据准备](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#数据准备)在执行训练脚本run.sh前配置好您的数据集
-
-在阅读该示例前,建议您先了解以下内容:
-
-- [分类模型的常规训练方法](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification)
-- [PaddleSlim使用文档](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md)
-
-
-## 配置文件说明
-
-关于配置文件如何编写您可以参考:
-
-- [PaddleSlim配置文件编写说明](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md#122-%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6%E7%9A%84%E4%BD%BF%E7%94%A8)
-- [裁剪策略配置文件编写说明](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md#22-%E6%A8%A1%E5%9E%8B%E9%80%9A%E9%81%93%E5%89%AA%E8%A3%81)
-
-其中,配置文件中的`pruned_params`需要根据当前模型的网络结构特点设置,它用来指定要裁剪的parameters.
-
-这里以MobileNetV2模型为例,MobileNetV2的主要结构为Inverted residuals, 如图1所示:
-
-
-
-
-图1
-
-
-PaddleSlim暂时无法对`depthwise convolution`直接进行剪裁, 因为`depthwise convolution`的`channel`的变化会同时影响到前后的卷积层。
-另外,`depthwise convolution`的参数(parameters)量占整个模型的比例并不高,所以,我们直接剪裁depthwise convolution的前后相邻的普通卷积层。
-通过以下命令观察目标卷积层的参数(parameters)的名称:
-
-```
-for param in fluid.default_main_program().global_block().all_parameters():
- if 'weights' in param.name:
- print param.name, param.shape
-```
-
-结果如下:
-
-```
-conv1_1_weights (32L, 3L, 3L, 3L)
-conv2_1_expand_weights (32L, 32L, 1L, 1L)
-conv2_1_dwise_weights (32L, 1L, 3L, 3L)
-conv2_1_linear_weights (16L, 32L, 1L, 1L)
-conv3_1_expand_weights (96L, 16L, 1L, 1L)
-conv3_1_dwise_weights (96L, 1L, 3L, 3L)
-conv3_1_linear_weights (24L, 96L, 1L, 1L)
-...
-conv8_1_expand_weights (960L, 160L, 1L, 1L)
-conv8_1_dwise_weights (960L, 1L, 3L, 3L)
-conv8_1_linear_weights (320L, 960L, 1L, 1L)
-conv9_weights (1280L, 320L, 1L, 1L)
-fc10_weights (1280L, 1000L)
-```
-
-观察可知,普通卷积的参数名称为`.*expand_weights`或`.*linear_weights`, 用以下正则表达式`.*[r|d]_weights`对其进行匹配。
-
-综上,我们将MobileNetV2配置文件中的`pruned_params`设置为`.*[r|d]_weights`。
-
-我们可以用上述操作观察MobileNetV1和ResNet50的参数名称规律,然后设置合适的正则表达式来剪裁合适的参数。
-
-
-## 训练
-
-根据[PaddleCV/image_classification/train.py](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/train.py)编写压缩脚本compress.py。
-在该脚本中定义了Compressor对象,用于执行压缩任务。
-
-可以通过命令`python compress.py`用默认参数执行压缩任务,通过`python compress.py --help`查看可配置参数,简述如下:
-
-- use_gpu: 是否使用gpu。如果选择使用GPU,请确保当前环境和Paddle版本支持GPU。默认为True。
-- batch_size: 在剪裁之后,对模型进行fine-tune训练时用的batch size。
-- model: 要压缩的目标模型,该示例支持'MobileNetV1', 'MobileNetV2'和'ResNet50'。
-- pretrained_model: 预训练模型的路径,可以从[这里](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#%E5%B7%B2%E5%8F%91%E5%B8%83%E6%A8%A1%E5%9E%8B%E5%8F%8A%E5%85%B6%E6%80%A7%E8%83%BD)下载。
-- config_file: 压缩策略的配置文件。
-
-您可以通过运行脚本`run.sh`运行改示例,请确保已正确下载[pretrained model](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#%E5%B7%B2%E5%8F%91%E5%B8%83%E6%A8%A1%E5%9E%8B%E5%8F%8A%E5%85%B6%E6%80%A7%E8%83%BD)。
-
-
-### 保存断点(checkpoint)
-
-如果在配置文件中设置了`checkpoint_path`, 则在压缩任务执行过程中会自动保存断点,当任务异常中断时,
-重启任务会自动从`checkpoint_path`路径下按数字顺序加载最新的checkpoint文件。如果不想让重启的任务从断点恢复,
-需要修改配置文件中的`checkpoint_path`,或者将`checkpoint_path`路径下文件清空。
-
->注意:配置文件中的信息不会保存在断点中,重启前对配置文件的修改将会生效。
-
-
-## 评估
-
-如果在配置文件中设置了`checkpoint_path`,则每个epoch会保存一个压缩后的用于评估的模型,
-该模型会保存在`${checkpoint_path}/${epoch_id}/eval_model/`路径下,包含`__model__`和`__params__`两个文件。
-其中,`__model__`用于保存模型结构信息,`__params__`用于保存参数(parameters)信息。
-
-如果不需要保存评估模型,可以在定义Compressor对象时,将`save_eval_model`选项设置为False(默认为True)。
-
-脚本PaddleSlim/classification/eval.py中为使用该模型在评估数据集上做评估的示例。
-
-## 预测
-
-如果在配置文件中设置了`checkpoint_path`,并且在定义Compressor对象时指定了`prune_infer_model`选项,则每个epoch都会
-保存一个`inference model`。该模型是通过删除eval_program中多余的operators而得到的。
-
-该模型会保存在`${checkpoint_path}/${epoch_id}/eval_model/`路径下,包含`__model__.infer`和`__params__`两个文件。
-其中,`__model__.infer`用于保存模型结构信息,`__params__`用于保存参数(parameters)信息。
-
-更多关于`prune_infer_model`选项的介绍,请参考:[Compressor介绍](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md#121-%E5%A6%82%E4%BD%95%E6%94%B9%E5%86%99%E6%99%AE%E9%80%9A%E8%AE%AD%E7%BB%83%E8%84%9A%E6%9C%AC)
-
-### python预测
-
-在脚本PaddleSlim/classification/infer.py中展示了如何使用fluid python API加载使用预测模型进行预测。
-
-### PaddleLite
-
-该示例中产出的预测(inference)模型可以直接用PaddleLite进行加载使用。
-关于PaddleLite如何使用,请参考:[PaddleLite使用文档](https://github.com/PaddlePaddle/Paddle-Lite/wiki#%E4%BD%BF%E7%94%A8)
-
-## 示例结果
-
-注:以下表格中的`model_size`为预测章节介绍的`__params__`文件的大小。
-
-### MobileNetV1
-
-| FLOPS |top1_acc/top5_acc| model_size |Paddle Fluid inference time(ms)| Paddle Lite inference time(ms)|
-|---|---|---|---|---|
-|baseline|70.99%/89.68% |- |- |-|
-|-10%|- |- |- |-|
-|-30%|- |- |- |-|
-|-50%|- |- |- |-|
-
-#### 训练超参
-
-- batch size: 256
-- lr_strategy: piecewise_decay
-- step_epochs: 30, 60, 90
-- num_epochs: 120
-- l2_decay: 3e-5
-- lr: 0.1
-
-### MobileNetV2
-
-| FLOPS |top1_acc/top5_acc| model_size |Paddle Fluid inference time(ms)| Paddle Lite inference time(ms)|
-|---|---|---|---|---|
-|baseline|72.15%/90.65% |- |- |-|
-|-10%|- |- |- |-|
-|-30%|- |- |- |-|
-|-50%|- |- |- |-|
-
-#### 训练超参
-
-- batch size: 500
-- lr_strategy: cosine_decay
-- num_epochs: 240
-- l2_decay: 4e-5
-- lr: 0.1
-
-
-### ResNet50
-
-| FLOPS |top1_acc/top5_acc| model_size |Paddle Fluid inference time(ms)| Paddle Lite inference time(ms)|
-|---|---|---|---|---|
-|baseline|76.50%/93.00% |- |- |-|
-|-10%|- |- |- |-|
-|-30%|- |- |- |-|
-|-50%|- |- |- |-|
-
-#### 训练超参
-
-- batch size: 256
-- lr_strategy: cosine_decay
-- num_epochs: 120
-- l2_decay: 1e-4
-- lr: 0.1
-
-## FAQ
-
-### 1. 如何压缩Paddle分类库中的其它模型或自定义的分类模型?
-
-建议您参考`models/PaddleSlim/classification/models`路径下的模型定义文件添加新的分类模型,您可以从[Paddle图像分类库](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification)拷贝模型定义文件或自己编写模型定义文件。更多细节请参考[分类模型的常规训练方法](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification)和[PaddleSlim使用文档](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md)
-
diff --git a/PaddleSlim/classification/pruning/__init__.py b/PaddleSlim/classification/pruning/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/PaddleSlim/classification/pruning/compress.py b/PaddleSlim/classification/pruning/compress.py
deleted file mode 100644
index 0247a78f794a09a18ca13adcf60801782ce71660..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/pruning/compress.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import os
-import sys
-import logging
-import paddle
-import argparse
-import functools
-import math
-import paddle.fluid as fluid
-sys.path.append("..")
-import imagenet_reader as reader
-import models
-sys.path.append("../../")
-from utility import add_arguments, print_arguments
-
-from paddle.fluid.contrib.slim import Compressor
-
-logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
-_logger = logging.getLogger(__name__)
-_logger.setLevel(logging.INFO)
-
-parser = argparse.ArgumentParser(description=__doc__)
-add_arg = functools.partial(add_arguments, argparser=parser)
-# yapf: disable
-add_arg('batch_size', int, 64*4, "Minibatch size.")
-add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
-add_arg('model', str, None, "The target model.")
-add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
-add_arg('lr', float, 0.1, "The learning rate used to fine-tune pruned model.")
-add_arg('lr_strategy', str, "piecewise_decay", "The learning rate decay strategy.")
-add_arg('l2_decay', float, 3e-5, "The l2_decay parameter.")
-add_arg('momentum_rate', float, 0.9, "The value of momentum_rate.")
-add_arg('num_epochs', int, 120, "The number of total epochs.")
-add_arg('total_images', int, 1281167, "The number of total training images.")
-parser.add_argument('--step_epochs', nargs='+', type=int, default=[30, 60, 90], help="piecewise decay step")
-add_arg('config_file', str, None, "The config file for compression with yaml format.")
-add_arg('enable_ce', bool, False, "If set, run the task with continuous evaluation logs.")
-# yapf: enable
-
-
-model_list = [m for m in dir(models) if "__" not in m]
-
-def piecewise_decay(args):
- step = int(math.ceil(float(args.total_images) / args.batch_size))
- bd = [step * e for e in args.step_epochs]
- lr = [args.lr * (0.1**i) for i in range(len(bd) + 1)]
- learning_rate = fluid.layers.piecewise_decay(boundaries=bd, values=lr)
- optimizer = fluid.optimizer.Momentum(
- learning_rate=learning_rate,
- momentum=args.momentum_rate,
- regularization=fluid.regularizer.L2Decay(args.l2_decay))
- return optimizer
-
-def cosine_decay(args):
- step = int(math.ceil(float(args.total_images) / args.batch_size))
- learning_rate = fluid.layers.cosine_decay(
- learning_rate=args.lr,
- step_each_epoch=step,
- epochs=args.num_epochs)
- optimizer = fluid.optimizer.Momentum(
- learning_rate=learning_rate,
- momentum=args.momentum_rate,
- regularization=fluid.regularizer.L2Decay(args.l2_decay))
- return optimizer
-
-def create_optimizer(args):
- if args.lr_strategy == "piecewise_decay":
- return piecewise_decay(args)
- elif args.lr_strategy == "cosine_decay":
- return cosine_decay(args)
-
-def compress(args):
- # add ce
- if args.enable_ce:
- SEED = 1
- fluid.default_main_program().random_seed = SEED
- fluid.default_startup_program().random_seed = SEED
-
- class_dim=1000
- image_shape="3,224,224"
- image_shape = [int(m) for m in image_shape.split(",")]
- assert args.model in model_list, "{} is not in lists: {}".format(args.model, model_list)
- image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
- # model definition
- model = models.__dict__[args.model]()
- out = model.net(input=image, class_dim=class_dim)
- cost = fluid.layers.cross_entropy(input=out, label=label)
- avg_cost = fluid.layers.mean(x=cost)
- acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
- acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
- val_program = fluid.default_main_program().clone()
- opt = create_optimizer(args)
- place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
- exe = fluid.Executor(place)
- exe.run(fluid.default_startup_program())
-
- if args.pretrained_model:
- def if_exist(var):
- return os.path.exists(os.path.join(args.pretrained_model, var.name))
- fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
-
- val_reader = paddle.batch(reader.val(), batch_size=args.batch_size)
- val_feed_list = [('image', image.name), ('label', label.name)]
- val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', acc_top5.name)]
-
- train_reader = paddle.batch(
- reader.train(), batch_size=args.batch_size, drop_last=True)
- train_feed_list = [('image', image.name), ('label', label.name)]
- train_fetch_list = [('loss', avg_cost.name)]
-
- com_pass = Compressor(
- place,
- fluid.global_scope(),
- fluid.default_main_program(),
- train_reader=train_reader,
- train_feed_list=train_feed_list,
- train_fetch_list=train_fetch_list,
- eval_program=val_program,
- eval_reader=val_reader,
- eval_feed_list=val_feed_list,
- eval_fetch_list=val_fetch_list,
- save_eval_model=True,
- prune_infer_model=[[image.name], [out.name]],
- train_optimizer=opt)
- com_pass.config(args.config_file)
- com_pass.run()
-
-
-def main():
- args = parser.parse_args()
- print_arguments(args)
- compress(args)
-
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/classification/pruning/configs/mobilenet_v1.yaml b/PaddleSlim/classification/pruning/configs/mobilenet_v1.yaml
deleted file mode 100644
index 2aa857c7123b239e2896b2873fbd6adb21d355ac..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/pruning/configs/mobilenet_v1.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-version: 1.0
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-strategies:
- uniform_pruning_strategy:
- class: 'UniformPruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- target_ratio: 0.5
- pruned_params: '.*_sep_weights'
-compressor:
- epoch: 121
- checkpoint_path: './checkpoints/mobilenet_v1/'
- strategies:
- - uniform_pruning_strategy
diff --git a/PaddleSlim/classification/pruning/configs/mobilenet_v2.yaml b/PaddleSlim/classification/pruning/configs/mobilenet_v2.yaml
deleted file mode 100644
index 59f7c7e5214fdcc6056561a7f582ceae4fecbd2f..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/pruning/configs/mobilenet_v2.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-version: 1.0
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-strategies:
- uniform_pruning_strategy:
- class: 'UniformPruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- target_ratio: 0.5
- pruned_params: '.*[r|d]_weights'
-# pruned_params: '.*linear_weights'
-# pruned_params: '.*expand_weights'
-compressor:
- epoch: 241
- checkpoint_path: './checkpoints/mobilenet_v2'
- strategies:
- - uniform_pruning_strategy
diff --git a/PaddleSlim/classification/pruning/configs/resnet34.yaml b/PaddleSlim/classification/pruning/configs/resnet34.yaml
deleted file mode 100644
index ba7d1a4f9d9df80cd47c96eca90b05ac1cd2754e..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/pruning/configs/resnet34.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-version: 1.0
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-strategies:
- uniform_pruning_strategy:
- class: 'UniformPruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- target_ratio: 0.5
- pruned_params: '.*branch.*_weights'
-compressor:
- epoch: 121
- checkpoint_path: './checkpoints/resnet34/'
- strategies:
- - uniform_pruning_strategy
diff --git a/PaddleSlim/classification/pruning/images/mobilenetv2.jpg b/PaddleSlim/classification/pruning/images/mobilenetv2.jpg
deleted file mode 100644
index c0dd3824dc77f8d3c87d719c13ceca16a3472e3c..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/classification/pruning/images/mobilenetv2.jpg and /dev/null differ
diff --git a/PaddleSlim/classification/pruning/run.sh b/PaddleSlim/classification/pruning/run.sh
deleted file mode 100644
index 4843dc8c4cf5c604c707ee774f2442b1f6c8f355..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/pruning/run.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env bash
-
-export CUDA_VISIBLE_DEVICES=0
-
-# download pretrain model
-root_url="http://paddle-imagenet-models-name.bj.bcebos.com"
-MobileNetV1="MobileNetV1_pretrained.tar"
-MobileNetV2="MobileNetV2_pretrained.tar"
-ResNet34="ResNet34_pretrained.tar"
-pretrain_dir='../pretrain'
-
-if [ ! -d ${pretrain_dir} ]; then
- mkdir ${pretrain_dir}
-fi
-
-cd ${pretrain_dir}
-
-if [ ! -f ${MobileNetV1} ]; then
- wget ${root_url}/${MobileNetV1}
- tar xf ${MobileNetV1}
-fi
-
-if [ ! -f ${MobileNetV2} ]; then
- wget ${root_url}/${MobileNetV2}
- tar xf ${MobileNetV2}
-fi
-
-if [ ! -f ${ResNet34} ]; then
- wget ${root_url}/${ResNet34}
- tar xf ${ResNet34}
-fi
-
-cd -
-
-nohup python -u compress.py \
---model "MobileNet" \
---use_gpu 1 \
---batch_size 256 \
---total_images 1281167 \
---lr_strategy "piecewise_decay" \
---lr 0.1 \
---l2_decay 3e-5 \
---pretrained_model ../pretrain/MobileNetV1_pretrained \
---config_file "./configs/mobilenet_v1.yaml" \
-> mobilenet_v1.log 2>&1 &
-tailf mobilenet_v1.log
-
-# for compression of mobilenet_v2
-#nohup python -u compress.py \
-#--model "MobileNetV2" \
-#--use_gpu 1 \
-#--batch_size 256 \
-#--total_images 1281167 \
-#--lr_strategy "cosine_decay" \
-#--lr 0.1 \
-#--l2_decay 4e-5 \
-#--pretrained_model ../pretrain/MobileNetV2_pretrained \
-#--config_file "./configs/mobilenet_v2.yaml" \
-#> mobilenet_v2.log 2>&1 &
-#tailf mobilenet_v2.log
-
-
-## for compression of resnet34
-#python -u compress.py \
-#--model "ResNet34" \
-#--use_gpu 1 \
-#--batch_size 256 \
-#--total_images 1281167 \
-#--lr_strategy "cosine_decay" \
-#--lr 0.1 \
-#--l2_decay 1e-4 \
-#--pretrained_model ../pretrain/ResNet34_pretrained \
-#--config_file "./configs/resnet34.yaml" \
-#> resnet34.log 2>&1 &
-#tailf resnet34.log
diff --git a/PaddleSlim/classification/quantization/README.md b/PaddleSlim/classification/quantization/README.md
deleted file mode 100644
index 1d7f00cec38e2c7e94b466cead1852e49b4b5a32..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/quantization/README.md
+++ /dev/null
@@ -1,265 +0,0 @@
->运行该示例前请安装Paddle1.6或更高版本。 本示例中的run.sh脚本仅适用于linux系统,在windows环境下,请参考run.sh内容编写适合windows环境的脚本。
-
-# 分类模型量化压缩示例
-
-## 概述
-
-该示例使用PaddleSlim提供的[量化压缩策略](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/tutorial.md#1-quantization-aware-training%E9%87%8F%E5%8C%96%E4%BB%8B%E7%BB%8D)对分类模型进行压缩。
->本文默认使用ILSVRC2012数据集,数据集存放在`models/PaddleSlim/data/`路径下, 可以参考[数据准备](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#数据准备)在执行训练脚本run.sh前配置好您的数据集
-
-在阅读该示例前,建议您先了解以下内容:
-
-- [分类模型的常规训练方法](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification)
-- [PaddleSlim使用文档](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md)
-
-
-## 配置文件说明
-
-关于配置文件如何编写您可以参考:
-
-- [PaddleSlim配置文件编写说明](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md#122-%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6%E7%9A%84%E4%BD%BF%E7%94%A8)
-- [量化策略配置文件编写说明](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md#21-%E9%87%8F%E5%8C%96%E8%AE%AD%E7%BB%83)
-
-其中save_out_nodes需要传入分类概率结果的Variable的名称,下面介绍如何确定save_out_nodes的参数
-以MobileNet V1为例,可在compress.py中构建好网络之后,直接打印Variable得到Variable的名称信息。
-代码示例:
-```
-#model definition, args.model=MobileNet
-model = models.__dict__[args.model]()
-out = model.net(input=image, class_dim=1000)
-print(out)
-cost = fluid.layers.cross_entropy(input=out, label=label)
-```
-根据运行结果可看到Variable的名字为:`fc_0.tmp_2`。
-## 训练
-
-根据 [PaddleCV/image_classification/train.py](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/train.py) 编写压缩脚本compress.py。
-在该脚本中定义了Compressor对象,用于执行压缩任务。
-
-可以通过命令`python compress.py`用默认参数执行压缩任务,通过`python compress.py --help`查看可配置参数,简述如下:
-
-- use_gpu: 是否使用gpu。如果选择使用GPU,请确保当前环境和Paddle版本支持GPU。默认为True。
-- batch_size: 在量化之后,对模型进行fine-tune训练时用的batch size。
-- model: 要压缩的目标模型,该示例支持'MobileNet', 'MobileNetV2'和'ResNet34'。
-- pretrained_model: 预训练模型的路径,可以从[这里](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#%E5%B7%B2%E5%8F%91%E5%B8%83%E6%A8%A1%E5%9E%8B%E5%8F%8A%E5%85%B6%E6%80%A7%E8%83%BD)下载。
-- config_file: 压缩策略的配置文件。
-
-您可以通过运行脚本`run.sh`运行该示例,请确保已正确下载[pretrained model](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#%E5%B7%B2%E5%8F%91%E5%B8%83%E6%A8%A1%E5%9E%8B%E5%8F%8A%E5%85%B6%E6%80%A7%E8%83%BD)。
-
-### 训练时的模型结构
-这部分介绍来源于[量化low-level API介绍](https://github.com/PaddlePaddle/models/tree/develop/PaddleSlim/quant_low_level_api#1-%E9%87%8F%E5%8C%96%E8%AE%AD%E7%BB%83low-level-apis%E4%BB%8B%E7%BB%8D)。
-
-PaddlePaddle框架中和量化相关的IrPass有QuantizationTransformPass、QuantizationFreezePass、ConvertToInt8Pass。在训练时,对网络应用了QuantizationTransformPass,作用是在网络中的conv2d、depthwise_conv2d、mul等算子的各个输入前插入连续的量化op和反量化op,并改变相应反向算子的某些输入。示例图如下:
-
-
-
-图1:应用QuantizationTransformPass后的结果
-
-
-### 保存断点(checkpoint)
-
-如果在配置文件中设置了`checkpoint_path`, 则在压缩任务执行过程中会自动保存断点,当任务异常中断时,
-重启任务会自动从`checkpoint_path`路径下按数字顺序加载最新的checkpoint文件。如果不想让重启的任务从断点恢复,
-需要修改配置文件中的`checkpoint_path`,或者将`checkpoint_path`路径下文件清空。
-
->注意:配置文件中的信息不会保存在断点中,重启前对配置文件的修改将会生效。
-
-### 保存评估和预测模型
-如果在配置文件的量化策略中设置了`float_model_save_path`, `int8_model_save_path`,在训练结束后,会保存模型量化压缩之后用于评估和预测的模型。接下来介绍这2种模型的区别。
-
-#### FP32模型
-在介绍量化训练时的模型结构时介绍了PaddlePaddle框架中和量化相关的IrPass, 有QuantizationTransformPass、QuantizationFreezePass、ConvertToInt8Pass。FP32预测模型是在应用QuantizationFreezePass并删除eval_program中多余的operators之后,保存的模型。
-
-QuantizationFreezePass主要用于改变IrGraph中量化op和反量化op的顺序,即将类似图1中的量化op和反量化op顺序改变为图2中的布局。除此之外,QuantizationFreezePass还会将`conv2d`、`depthwise_conv2d`、`mul`等算子的权重离线量化为int8_t范围内的值(但数据类型仍为float32),以减少预测过程中对权重的量化操作,示例如图2:
-
-
-
-图2:应用QuantizationFreezePass后的结果
-
-
-#### 8-bit模型
-在对训练网络进行QuantizationFreezePass之后,执行ConvertToInt8Pass,
-其主要目的是将执行完QuantizationFreezePass后输出的权重类型由`FP32`更改为`INT8`。换言之,用户可以选择将量化后的权重保存为float32类型(不执行ConvertToInt8Pass)或者int8_t类型(执行ConvertToInt8Pass),示例如图3:
-
-
-
-图3:应用ConvertToInt8Pass后的结果
-
-
-> 综上,可得在量化过程中有以下几种模型结构:
-
-1. 原始模型
-2. 经QuantizationTransformPass之后得到的适用于训练的量化模型结构,在${checkpoint_path}下保存的`eval_model`是这种结构,在训练过程中每个epoch结束时也使用这个网络结构进行评估,虽然这个模型结构不是最终想要的模型结构,但是每个epoch的评估结果可用来挑选模型。
-3. 经QuantizationFreezePass之后得到的FP32模型结构,具体结构已在上面进行介绍。本文档中列出的数据集的评估结果是对FP32模型结构进行评估得到的结果。这种模型结构在训练过程中只会保存一次,也就是在量化配置文件中设置的`end_epoch`结束时进行保存,如果想将其他epoch的训练结果转化成FP32模型,可使用脚本 PaddleSlim/classification/quantization/freeze.py进行转化,具体使用方法在[评估](#评估)中介绍。
-4. 经ConvertToInt8Pass之后得到的8-bit模型结构,具体结构已在上面进行介绍。这种模型结构在训练过程中只会保存一次,也就是在量化配置文件中设置的`end_epoch`结束时进行保存,如果想将其他epoch的训练结果转化成8-bit模型,可使用脚本 PaddleSlim/classification/quantization/freeze.py进行转化,具体使用方法在[评估](#评估)中介绍。
-
-
-## 评估
-
-### 每个epoch保存的评估模型
-因为量化的最终模型只有在end_epoch时保存一次,不能保证保存的模型是最好的,因此
-如果在配置文件中设置了`checkpoint_path`,则每个epoch会保存一个量化后的用于评估的模型,
-该模型会保存在`${checkpoint_path}/${epoch_id}/eval_model/`路径下,包含`__model__`和`__params__`两个文件。
-其中,`__model__`用于保存模型结构信息,`__params__`用于保存参数(parameters)信息。模型结构和训练时一样。
-
-如果不需要保存评估模型,可以在定义Compressor对象时,将`save_eval_model`选项设置为False(默认为True)。
-
-脚本PaddleSlim/classification/eval.py中为使用该模型在评估数据集上做评估的示例。
-
-运行命令示例:
-```
-python eval.py \
- --use_gpu 1 \
- --model_path ${checkpoint_path}/${epoch_id}/eval_model
-```
-
-在评估之后,选取效果最好的epoch的模型,可使用脚本 PaddleSlim/classification/quantization/freeze.py将该模型转化为以上介绍的2种模型:FP32模型,8-bit模型,需要配置的参数为:
-
-- model_path, 加载的模型路径,`为${checkpoint_path}/${epoch_id}/eval_model/`
-- weight_quant_type 模型参数的量化方式,和配置文件中的类型保持一致
-- save_path `FP32`, `8-bit`模型的保存路径,分别为 `${save_path}/float/`, `${save_path}/int8/`
-
-运行命令示例:
-```
-python freeze.py \
- --model_path ${checkpoint_path}/${epoch_id}/eval_model/ \
- --weight_quant_type ${weight_quant_type} \
- --save_path ${any path you want}
-```
-
-### 最终评估模型
-最终使用的评估模型是FP32模型,使用脚本PaddleSlim/classification/eval.py该模型在评估数据集上做评估。
-运行命令示例:
-```
-python eval.py \
- --use_gpu 1 \
- --model_path ${save_path}/float \
- --model_name model \
- --params_name weights
-
-```
-
-## 预测
-
-### python预测
-
-FP32模型可直接使用原生PaddlePaddle Fluid预测方法进行预测。
-
-在脚本PaddleSlim/classification/infer.py中展示了如何使用fluid python API加载使用预测模型进行预测。
-
-运行命令示例:
-```
-python infer.py \
- --model_path ${save_path}/float \
- --use_gpu 1 \
- --model_name model \
- --params_name weights
-```
-
-### PaddleLite预测
-FP32模型可使用Paddle-Lite进行加载预测,可参见教程[Paddle-Lite如何加载运行量化模型](https://github.com/PaddlePaddle/Paddle-Lite/wiki/model_quantization)。
-
-## 如何进行部分量化
-
-通过在定义op时指定 ``name_scope``为 ``skip_quant``可对这个op跳过量化。比如在PaddleSlim/classification/models/resnet.py中,将某个conv的定义作如下改变:
-
-原定义:
-```
-conv = self.conv_bn_layer(
- input=input,
- num_filters=64,
- filter_size=7,
- stride=2,
- act='relu',
- name=prefix_name + conv1_name)
-
-```
-
-跳过量化时的定义:
-
-```
-
-with fluid.name_scope('skip_quant'):
- conv = self.conv_bn_layer(
- input=input,
- num_filters=64,
- filter_size=7,
- stride=2,
- act='relu',
- name=prefix_name + conv1_name)
-
-```
-在脚本 PaddleSlim/classification/quantization/compress.py 中,统计了``conv`` op的数量和以``fake_quantize``开头的量化op的数量,在对一些``conv`` op跳过之后,可发现以``fake_quantize``开头的量化op的数量变少。
-
-
-## 示例结果
-
->当前release的结果并非超参调优后的最好结果,仅做示例参考,后续我们会优化当前结果。
-
->注: lite端运行手机信息:Android手机,
-型号:BKL-AL20,运行内存RAM:4GB 6GB,CPU核心数:八核 4*A73 2.36GHz+4*A53 1.8GHz,操作系统:EMUI 8.0,CPU品牌:麒麟970
-
-### MobileNetV1
-
-| weight量化方式 | activation量化方式| top1_acc/top5_acc |Paddle Fluid inference time(ms)| Paddle Lite inference time(ms)| 模型下载|
-|---|---|---|---|---| ---|
-|baseline|- |70.99%/89.68%|- |-| [下载模型](http://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_pretrained.tar)|
-|abs_max|abs_max|70.74%/89.55% |- |-| [下载模型](https://paddle-slim-models.bj.bcebos.com/quantization%2Fmobilenetv1_w_abs_a_abs_7074_8955.tar.gz)|
-|abs_max|moving_average_abs_max|70.89%/89.67% |5.18|37.65| [下载模型](https://paddle-slim-models.bj.bcebos.com/quantization%2Fmobilenetv1_w_abs_a_move_7089_8967.tar.gz)|
-|channel_wise_abs_max|abs_max|70.93%/89.65% |- |-|[下载模型](https://paddle-slim-models.bj.bcebos.com/quantization%2Fmobilenetv1_w_chan_a_abs_7093_8965.tar.gz)|
-
->训练超参:
-
-优化器
-```
-fluid.optimizer.Momentum(momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(
- boundaries=[5000 * 12],
- values=[0.0001, 0.00001]),
- regularization=fluid.regularizer.L2Decay(1e-4))
-```
-8卡,batch size 1024,epoch 30, 挑选好的结果
-
-### MobileNetV2
-
-| weight量化方式 | activation量化方式| top1_acc/top5_acc |Paddle Fluid inference time(ms)| Paddle Lite inference time(ms)|
-|---|---|---|---|---|
-|baseline|- |72.15%/90.65%|- |-|
-|abs_max|abs_max|- |- |-|
-|abs_max|moving_average_abs_max|72.19%/90.71%|9.43 |56.09|
-|channel_wise_abs_max|abs_max|- |- |-|
-
->训练超参:
-
-优化器
-```
-fluid.optimizer.Momentum(momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(
- boundaries=[5000 * 12],
- values=[0.0001, 0.00001]),
- regularization=fluid.regularizer.L2Decay(1e-4))
-```
-8卡,batch size 1024,epoch 30, 挑选好的结果
-### ResNet34
-
-| weight量化方式 | activation量化方式| top1_acc/top5_acc |Paddle Fluid inference time(ms)| Paddle Lite inference time(ms)|
-|---|---|---|---|---|---|
-|baseline|- |74.57%/92.14%|- |-|
-|abs_max|abs_max|-|- |-|
-|abs_max|moving_average_abs_max|74.63%/92.17%|7.20|392.59|
-|channel_wise_abs_max|abs_max|-|- |-|
-
->训练超参:
-
-优化器
-```
-fluid.optimizer.Momentum(momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(
- boundaries=[5000 * 12],
- values=[0.0001, 0.00001]),
- regularization=fluid.regularizer.L2Decay(1e-4))
-```
-8卡,batch size 1024,epoch 30, 挑选好的结果
-
-
-## FAQ
diff --git a/PaddleSlim/classification/quantization/__init__.py b/PaddleSlim/classification/quantization/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/PaddleSlim/classification/quantization/compress.py b/PaddleSlim/classification/quantization/compress.py
deleted file mode 100644
index 88c8d72ca904ce32e98309acfb1dc7d072dd132f..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/quantization/compress.py
+++ /dev/null
@@ -1,122 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import os
-import time
-import sys
-import logging
-import paddle
-import argparse
-import functools
-import paddle.fluid as fluid
-sys.path.append("..")
-import imagenet_reader as reader
-import models
-sys.path.append("../../")
-from utility import add_arguments, print_arguments
-
-from paddle.fluid.contrib.slim import Compressor
-
-logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
-_logger = logging.getLogger(__name__)
-_logger.setLevel(logging.INFO)
-
-parser = argparse.ArgumentParser(description=__doc__)
-add_arg = functools.partial(add_arguments, argparser=parser)
-# yapf: disable
-add_arg('batch_size', int, 64*4, "Minibatch size.")
-add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
-add_arg('model', str, None, "The target model")
-add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
-add_arg('config_file', str, None, "The config file for compression with yaml format.")
-# yapf: enable
-
-model_list = [m for m in dir(models) if "__" not in m]
-
-
-def compress(args):
- image_shape = "3,224,224"
- image_shape = [int(m) for m in image_shape.split(",")]
-
- image = fluid.data(
- name='image', shape=[None] + image_shape, dtype='float32')
- label = fluid.data(name='label', shape=[None, 1], dtype='int64')
- # model definition
- model = models.__dict__[args.model]()
-
- out = model.net(input=image, class_dim=1000)
- # print(out)
- cost = fluid.layers.cross_entropy(input=out, label=label)
- avg_cost = fluid.layers.mean(x=cost)
- acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
- acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
-
- val_program = fluid.default_main_program().clone()
-
- # quantization usually use small learning rate
- values = [1e-4, 1e-5]
- opt = fluid.optimizer.Momentum(
- momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(
- boundaries=[5000 * 12], values=values),
- regularization=fluid.regularizer.L2Decay(1e-4))
-
- place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
- exe = fluid.Executor(place)
- exe.run(fluid.default_startup_program())
-
- if args.pretrained_model:
- assert os.path.exists(
- args.pretrained_model), "pretrained_model path doesn't exist"
-
- def if_exist(var):
- return os.path.exists(os.path.join(args.pretrained_model, var.name))
-
- fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
-
- val_reader = paddle.batch(reader.val(), batch_size=args.batch_size)
- val_feed_list = [('image', image.name), ('label', label.name)]
- val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', acc_top5.name)]
-
- train_reader = paddle.batch(
- reader.train(), batch_size=args.batch_size, drop_last=True)
- train_feed_list = [('image', image.name), ('label', label.name)]
- train_fetch_list = [('loss', avg_cost.name)]
-
- com_pass = Compressor(
- place,
- fluid.global_scope(),
- fluid.default_main_program(),
- train_reader=train_reader,
- train_feed_list=train_feed_list,
- train_fetch_list=train_fetch_list,
- eval_program=val_program,
- eval_reader=val_reader,
- eval_feed_list=val_feed_list,
- eval_fetch_list=val_fetch_list,
- teacher_programs=[],
- train_optimizer=opt,
- prune_infer_model=[[image.name], [out.name]],
- distiller_optimizer=None)
- com_pass.config(args.config_file)
- com_pass.run()
-
- conv_op_num = 0
- fake_quant_op_num = 0
- for op in com_pass.context.eval_graph.ops():
- if op._op.type == 'conv2d':
- conv_op_num += 1
- elif op._op.type.startswith('fake_quantize'):
- fake_quant_op_num += 1
- print('conv op num {}'.format(conv_op_num))
- print('fake quant op num {}'.format(fake_quant_op_num))
-
-
-def main():
- args = parser.parse_args()
- print_arguments(args)
- compress(args)
-
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/classification/quantization/configs/mobilenet_v1.yaml b/PaddleSlim/classification/quantization/configs/mobilenet_v1.yaml
deleted file mode 100644
index bf06f5aa41c3dc87aa6a48460e07799c875a0057..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/quantization/configs/mobilenet_v1.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-version: 1.0
-strategies:
- quantization_strategy:
- class: 'QuantizationStrategy'
- start_epoch: 0
- end_epoch: 29
- float_model_save_path: './output/mobilenet_v1/float'
- int8_model_save_path: './output/mobilenet_v1/int8'
- weight_bits: 8
- activation_bits: 8
- weight_quantize_type: 'abs_max'
- activation_quantize_type: 'moving_average_abs_max'
- save_in_nodes: ['image']
- save_out_nodes: ['fc_0.tmp_2']
-compressor:
- epoch: 30
- checkpoint_path: './checkpoints/mobilenet_v1/'
- strategies:
- - quantization_strategy
diff --git a/PaddleSlim/classification/quantization/configs/mobilenet_v2.yaml b/PaddleSlim/classification/quantization/configs/mobilenet_v2.yaml
deleted file mode 100644
index 2c3cd7f366a69536cf64b3df2edec2596f70a6f7..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/quantization/configs/mobilenet_v2.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-version: 1.0
-strategies:
- quantization_strategy:
- class: 'QuantizationStrategy'
- start_epoch: 0
- end_epoch: 29
- float_model_save_path: './output/mobilenet_v2/float'
- int8_model_save_path: './output/mobilenet_v2/int8'
- weight_bits: 8
- activation_bits: 8
- weight_quantize_type: 'abs_max'
- activation_quantize_type: 'moving_average_abs_max'
- save_in_nodes: ['image']
- save_out_nodes: ['fc_0.tmp_2']
-compressor:
- epoch: 30
- checkpoint_path: './checkpoints/mobilenet_v2/'
- strategies:
- - quantization_strategy
diff --git a/PaddleSlim/classification/quantization/configs/resnet34.yaml b/PaddleSlim/classification/quantization/configs/resnet34.yaml
deleted file mode 100644
index 4b7aa8b4130f47dbabfcfd1f4a31411273b30b1b..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/quantization/configs/resnet34.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-version: 1.0
-strategies:
- quantization_strategy:
- class: 'QuantizationStrategy'
- start_epoch: 0
- end_epoch: 0
- float_model_save_path: './output/resnet34/float'
- int8_model_save_path: './output/resnet34/int8'
- weight_bits: 8
- activation_bits: 8
- weight_quantize_type: 'abs_max'
- activation_quantize_type: 'moving_average_abs_max'
- save_in_nodes: ['image']
- save_out_nodes: ['fc_0.tmp_2']
-compressor:
- epoch: 1
- checkpoint_path: './checkpoints/resnet34/'
- strategies:
- - quantization_strategy
diff --git a/PaddleSlim/classification/quantization/freeze.py b/PaddleSlim/classification/quantization/freeze.py
deleted file mode 100644
index a568e5a3154cebabfad1f70a613f20807de83065..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/quantization/freeze.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
-#
-#Licensed under the Apache License, Version 2.0 (the "License");
-#you may not use this file except in compliance with the License.
-#You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-#Unless required by applicable law or agreed to in writing, software
-#distributed under the License is distributed on an "AS IS" BASIS,
-#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#See the License for the specific language governing permissions and
-#limitations under the License.
-
-import os
-import sys
-import numpy as np
-import argparse
-import functools
-import logging
-
-import paddle
-import paddle.fluid as fluid
-from paddle.fluid.framework import IrGraph
-from paddle.fluid import core
-from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
-from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
-from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
-from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
-sys.path.append("..")
-import imagenet_reader as reader
-sys.path.append("../../")
-from utility import add_arguments, print_arguments
-
-logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
-_logger = logging.getLogger(__name__)
-_logger.setLevel(logging.INFO)
-
-parser = argparse.ArgumentParser(description=__doc__)
-# yapf: disable
-add_arg = functools.partial(add_arguments, argparser=parser)
-add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
-add_arg('model_path', str, "./pruning/checkpoints/resnet50/2/eval_model/", "Whether to use pretrained model.")
-add_arg('save_path', str, './output', 'Path to save inference model')
-add_arg('weight_quant_type', str, 'abs_max', 'quantization type for weight')
-# yapf: enable
-
-
-def eval(args):
- # parameters from arguments
-
- place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
- exe = fluid.Executor(place)
-
- val_program, feed_names, fetch_targets = fluid.io.load_inference_model(
- args.model_path,
- exe,
- model_filename="__model__.infer",
- params_filename="__params__")
- val_reader = paddle.batch(reader.val(), batch_size=128)
- feeder = fluid.DataFeeder(
- place=place, feed_list=feed_names, program=val_program)
-
- results = []
- for batch_id, data in enumerate(val_reader()):
- image = [[d[0]] for d in data]
- label = [[d[1]] for d in data]
- feed_data = feeder.feed(image)
- pred = exe.run(val_program, feed=feed_data, fetch_list=fetch_targets)
- pred = np.array(pred[0])
- label = np.array(label)
- sort_array = pred.argsort(axis=1)
- top_1_pred = sort_array[:, -1:][:, ::-1]
- top_1 = np.mean(label == top_1_pred)
- top_5_pred = sort_array[:, -5:][:, ::-1]
- acc_num = 0
- for i in range(len(label)):
- if label[i][0] in top_5_pred[i]:
- acc_num += 1
- top_5 = acc_num / len(label)
- results.append([top_1, top_5])
-
- result = np.mean(np.array(results), axis=0)
- print("top1_acc/top5_acc= {}".format(result))
- sys.stdout.flush()
-
- _logger.info("freeze the graph for inference")
- test_graph = IrGraph(core.Graph(val_program.desc), for_test=True)
-
- freeze_pass = QuantizationFreezePass(
- scope=fluid.global_scope(),
- place=place,
- weight_quantize_type=args.weight_quant_type)
- freeze_pass.apply(test_graph)
- server_program = test_graph.to_program()
- fluid.io.save_inference_model(
- dirname=os.path.join(args.save_path, 'float'),
- feeded_var_names=feed_names,
- target_vars=fetch_targets,
- executor=exe,
- main_program=server_program,
- model_filename='model',
- params_filename='weights')
-
- _logger.info("convert the weights into int8 type")
- convert_int8_pass = ConvertToInt8Pass(
- scope=fluid.global_scope(), place=place)
- convert_int8_pass.apply(test_graph)
- server_int8_program = test_graph.to_program()
- fluid.io.save_inference_model(
- dirname=os.path.join(args.save_path, 'int8'),
- feeded_var_names=feed_names,
- target_vars=fetch_targets,
- executor=exe,
- main_program=server_int8_program,
- model_filename='model',
- params_filename='weights')
-
-
-def main():
- args = parser.parse_args()
- print_arguments(args)
- eval(args)
-
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/classification/quantization/run.sh b/PaddleSlim/classification/quantization/run.sh
deleted file mode 100644
index 6c62fc5bf013e97fc7c5caed4996c543fb2011ab..0000000000000000000000000000000000000000
--- a/PaddleSlim/classification/quantization/run.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env bash
-
-# download pretrain model
-root_url="http://paddle-imagenet-models-name.bj.bcebos.com"
-MobileNetV1="MobileNetV1_pretrained.tar"
-MobileNetV2="MobileNetV2_pretrained.tar"
-ResNet34="ResNet34_pretrained.tar"
-pretrain_dir='../pretrain'
-
-if [ ! -d ${pretrain_dir} ]; then
- mkdir ${pretrain_dir}
-fi
-
-cd ${pretrain_dir}
-
-if [ ! -f ${MobileNetV1} ]; then
- wget ${root_url}/${MobileNetV1}
- tar xf ${MobileNetV1}
-fi
-
-if [ ! -f ${MobileNetV2} ]; then
- wget ${root_url}/${MobileNetV2}
- tar xf ${MobileNetV2}
-fi
-
-if [ ! -f ${ResNet34} ]; then
- wget ${root_url}/${ResNet34}
- tar xf ${ResNet34}
-fi
-
-cd -
-
-# enable GC strategy
-export FLAGS_fast_eager_deletion_mode=1
-export FLAGS_eager_delete_tensor_gb=0.0
-
-export CUDA_VISIBLE_DEVICES=0
-
-## for quantization for mobilenet_v1
-#python -u compress.py \
-# --model "MobileNet" \
-# --use_gpu 1 \
-# --batch_size 256 \
-# --pretrained_model ../pretrain/MobileNetV1_pretrained \
-# --config_file "./configs/mobilenet_v1.yaml" \
-#> mobilenet_v1.log 2>&1 &
-#tailf mobilenet_v1.log
-
-## for quantization of mobilenet_v2
-#python -u compress.py \
-# --model "MobileNetV2" \
-# --use_gpu 1 \
-# --batch_size 32 \
-# --pretrained_model ../pretrain/MobileNetV2_pretrained \
-# --config_file "./configs/mobilenet_v2.yaml" \
-# > mobilenet_v2.log 2>&1 &
-#tailf mobilenet_v2.log
-
-# for compression of resnet34
-python -u compress.py \
- --model "ResNet34" \
- --use_gpu 1 \
- --batch_size 32 \
- --pretrained_model ../pretrain/ResNet34_pretrained \
- --config_file "./configs/resnet34.yaml" \
- > resnet34.log 2>&1 &
-tailf resnet34.log
diff --git a/PaddleSlim/compress.py b/PaddleSlim/compress.py
deleted file mode 100644
index 7481160a878e0f9d84f4ae2f6588a84bf6bf3083..0000000000000000000000000000000000000000
--- a/PaddleSlim/compress.py
+++ /dev/null
@@ -1,169 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import os
-import time
-import sys
-import logging
-import paddle
-import models
-import argparse
-import functools
-import paddle.fluid as fluid
-import reader
-from utility import add_arguments, print_arguments
-
-from paddle.fluid.contrib.slim import Compressor
-
-logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
-_logger = logging.getLogger(__name__)
-_logger.setLevel(logging.INFO)
-
-parser = argparse.ArgumentParser(description=__doc__)
-add_arg = functools.partial(add_arguments, argparser=parser)
-# yapf: disable
-add_arg('batch_size', int, 64*4, "Minibatch size.")
-add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
-add_arg('total_images', int, 1281167, "Training image number.")
-add_arg('class_dim', int, 1000, "Class number.")
-add_arg('image_shape', str, "3,224,224", "Input image size")
-add_arg('model', str, "MobileNet", "Set the network to use.")
-add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
-add_arg('teacher_model', str, None, "Set the teacher network to use.")
-add_arg('teacher_pretrained_model', str, None, "Whether to use pretrained model.")
-add_arg('compress_config', str, None, "The config file for compression with yaml format.")
-add_arg('quant_only', bool, False, "Only do quantization-aware training.")
-add_arg('enable_ce', bool, False, "If set, run the task with continuous evaluation logs.")
-# yapf: enable
-
-model_list = [m for m in dir(models) if "__" not in m]
-
-
-def compress(args):
- # add ce
- if args.enable_ce:
- SEED = 1
- fluid.default_main_program().random_seed = SEED
- fluid.default_startup_program().random_seed = SEED
-
- image_shape = [int(m) for m in args.image_shape.split(",")]
-
- assert args.model in model_list, "{} is not in lists: {}".format(args.model,
- model_list)
- image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
- # model definition
- model = models.__dict__[args.model]()
-
- if args.model is "GoogleNet":
- out0, out1, out2 = model.net(input=image, class_dim=args.class_dim)
- cost0 = fluid.layers.cross_entropy(input=out0, label=label)
- cost1 = fluid.layers.cross_entropy(input=out1, label=label)
- cost2 = fluid.layers.cross_entropy(input=out2, label=label)
- avg_cost0 = fluid.layers.mean(x=cost0)
- avg_cost1 = fluid.layers.mean(x=cost1)
- avg_cost2 = fluid.layers.mean(x=cost2)
- avg_cost = avg_cost0 + 0.3 * avg_cost1 + 0.3 * avg_cost2
- acc_top1 = fluid.layers.accuracy(input=out0, label=label, k=1)
- acc_top5 = fluid.layers.accuracy(input=out0, label=label, k=5)
- else:
- out = model.net(input=image, class_dim=args.class_dim)
- cost = fluid.layers.cross_entropy(input=out, label=label)
- avg_cost = fluid.layers.mean(x=cost)
- acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
- acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
- val_program = fluid.default_main_program().clone()
- if args.quant_only:
- boundaries=[args.total_images / args.batch_size * 10,
- args.total_images / args.batch_size * 16]
- values=[1e-4, 1e-5, 1e-6]
- else:
- boundaries=[args.total_images / args.batch_size * 30,
- args.total_images / args.batch_size * 60,
- args.total_images / args.batch_size * 90]
- values=[0.1, 0.01, 0.001, 0.0001]
- opt = fluid.optimizer.Momentum(
- momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(
- boundaries=boundaries,
- values=values),
- regularization=fluid.regularizer.L2Decay(4e-5))
-
- place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
- exe = fluid.Executor(place)
- exe.run(fluid.default_startup_program())
-
- if args.pretrained_model:
-
- def if_exist(var):
- return os.path.exists(os.path.join(args.pretrained_model, var.name))
-
- fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
-
- val_reader = paddle.batch(reader.val(), batch_size=args.batch_size)
- val_feed_list = [('image', image.name), ('label', label.name)]
- val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', acc_top5.name)]
-
- train_reader = paddle.batch(
- reader.train(), batch_size=args.batch_size, drop_last=True)
- train_feed_list = [('image', image.name), ('label', label.name)]
- train_fetch_list = [('loss', avg_cost.name)]
-
- teacher_programs = []
- distiller_optimizer = None
- if args.teacher_model:
- teacher_model = models.__dict__[args.teacher_model]()
- # define teacher program
- teacher_program = fluid.Program()
- startup_program = fluid.Program()
- with fluid.program_guard(teacher_program, startup_program):
- img = teacher_program.global_block()._clone_variable(
- image, force_persistable=False)
- predict = teacher_model.net(img,
- class_dim=args.class_dim,
- conv1_name='res_conv1',
- fc_name='res_fc')
- exe.run(startup_program)
- assert args.teacher_pretrained_model and os.path.exists(
- args.teacher_pretrained_model
- ), "teacher_pretrained_model should be set when teacher_model is not None."
-
- def if_exist(var):
- return os.path.exists(
- os.path.join(args.teacher_pretrained_model, var.name))
-
- fluid.io.load_vars(
- exe,
- args.teacher_pretrained_model,
- main_program=teacher_program,
- predicate=if_exist)
-
- distiller_optimizer = opt
- teacher_programs.append(teacher_program.clone(for_test=True))
-
- com_pass = Compressor(
- place,
- fluid.global_scope(),
- fluid.default_main_program(),
- train_reader=train_reader,
- train_feed_list=train_feed_list,
- train_fetch_list=train_fetch_list,
- eval_program=val_program,
- eval_reader=val_reader,
- eval_feed_list=val_feed_list,
- eval_fetch_list=val_fetch_list,
- teacher_programs=teacher_programs,
- train_optimizer=opt,
- distiller_optimizer=distiller_optimizer)
- com_pass.config(args.compress_config)
- com_pass.run()
-
-
-def main():
- args = parser.parse_args()
- print_arguments(args)
- compress(args)
-
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/configs/auto_prune.yaml b/PaddleSlim/configs/auto_prune.yaml
deleted file mode 100644
index 51a8e2f887b1b96114ea810795ef9288d8b15b01..0000000000000000000000000000000000000000
--- a/PaddleSlim/configs/auto_prune.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: 1.0
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-controllers:
- sa_controller:
- class: 'SAController'
- reduce_rate: 0.9
- init_temperature: 100
- max_iter_number: 300
-strategies:
- auto_pruning_strategy:
- class: 'AutoPruneStrategy'
- pruner: 'pruner_1'
- controller: 'sa_controller'
- start_epoch: 0
- end_epoch: 500
- retrain_epoch: 0
- max_ratio: 0.50
- min_ratio: 0.48
- uniform_range: 0.4
- pruned_params: '.*_sep_weights'
- metric_name: 'acc_top1'
-compressor:
- epoch: 500
- checkpoint_path: './checkpoints_auto_pruning/'
- strategies:
- - auto_pruning_strategy
diff --git a/PaddleSlim/configs/filter_pruning_sen.yaml b/PaddleSlim/configs/filter_pruning_sen.yaml
deleted file mode 100644
index f83079f5ff704f0e8605bb41e010e935a4180a44..0000000000000000000000000000000000000000
--- a/PaddleSlim/configs/filter_pruning_sen.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-version: 1.0
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-strategies:
- sensitive_pruning_strategy:
- class: 'SensitivePruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- delta_rate: 0.1
- target_ratio: 0.5
- num_steps: 1
-# eval_rate: 0.2
- pruned_params: '.*_sep_weights'
- sensitivities_file: 'mobilenet_acc_top1_sensitive.data'
- metric_name: 'acc_top1'
-compressor:
- epoch: 200
- #init_model: './checkpoints/0' # Please enable this option for loading checkpoint.
- checkpoint_path: './checkpoints/'
- strategies:
- - sensitive_pruning_strategy
diff --git a/PaddleSlim/configs/filter_pruning_uniform.yaml b/PaddleSlim/configs/filter_pruning_uniform.yaml
deleted file mode 100644
index 798e7f1bea875d626e84fd802df885ed95194de6..0000000000000000000000000000000000000000
--- a/PaddleSlim/configs/filter_pruning_uniform.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-version: 1.0
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-strategies:
- uniform_pruning_strategy:
- class: 'UniformPruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- target_ratio: 0.5
- pruned_params: '.*_sep_weights'
- metric_name: 'acc_top1'
-compressor:
- epoch: 200
- #init_model: './checkpoints/0' # Please enable this option for loading checkpoint.
- checkpoint_path: './checkpoints/'
- strategies:
- - uniform_pruning_strategy
diff --git a/PaddleSlim/configs/mobilenetv1_resnet50_distillation.yaml b/PaddleSlim/configs/mobilenetv1_resnet50_distillation.yaml
deleted file mode 100644
index 3a2b00dc0d3bb9d22a9bdcbd58e620f70906e62f..0000000000000000000000000000000000000000
--- a/PaddleSlim/configs/mobilenetv1_resnet50_distillation.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-version: 1.0
-distillers:
- l2_distiller:
- class: 'L2Distiller'
- teacher_feature_map: 'res_fc.tmp_0'
- student_feature_map: 'fc_0.tmp_0'
- distillation_loss_weight: 1
-strategies:
- distillation_strategy:
- class: 'DistillationStrategy'
- distillers: ['l2_distiller']
- start_epoch: 0
- end_epoch: 130
-compressor:
- epoch: 130
- checkpoint_path: './checkpoints/'
- strategies:
- - distillation_strategy
diff --git a/PaddleSlim/configs/quantization.yaml b/PaddleSlim/configs/quantization.yaml
deleted file mode 100644
index d7f2a939315f9f268de0eb4860361a0cef2058f6..0000000000000000000000000000000000000000
--- a/PaddleSlim/configs/quantization.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-version: 1.0
-strategies:
- quantization_strategy:
- class: 'QuantizationStrategy'
- start_epoch: 0
- end_epoch: 19
- float_model_save_path: './output/float'
- mobile_model_save_path: './output/mobile'
- int8_model_save_path: './output/int8'
- weight_bits: 8
- activation_bits: 8
- weight_quantize_type: 'abs_max'
- activation_quantize_type: 'moving_average_abs_max'
- save_in_nodes: ['image']
- save_out_nodes: ['fc_0.tmp_2']
-compressor:
- epoch: 20
- checkpoint_path: './checkpoints_quan/'
- strategies:
- - quantization_strategy
diff --git a/PaddleSlim/configs/quantization_dist.yaml b/PaddleSlim/configs/quantization_dist.yaml
deleted file mode 100644
index 6f7e6afd512a5eb50b0a32d0998112c6301492aa..0000000000000000000000000000000000000000
--- a/PaddleSlim/configs/quantization_dist.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-# Step1: distillation training from epoch-0 to epoch-120
-# Step2: quantization training from epoch-121 to epoch-141
-version: 1.0
-distillers:
- fsp_distiller:
- class: 'FSPDistiller'
- teacher_pairs: [['res2a_branch2a.conv2d.output.1.tmp_0', 'res3a_branch2a.conv2d.output.1.tmp_0']]
- student_pairs: [['depthwise_conv2d_1.tmp_0', 'conv2d_3.tmp_0']]
- distillation_loss_weight: 1
- l2_distiller:
- class: 'L2Distiller'
- teacher_feature_map: 'res_fc.tmp_0'
- student_feature_map: 'fc_0.tmp_0'
- distillation_loss_weight: 1
-strategies:
- distillation_strategy:
- class: 'DistillationStrategy'
- distillers: ['fsp_distiller', 'l2_distiller']
- start_epoch: 0
- end_epoch: 120
- quantization_strategy:
- class: 'QuantizationStrategy'
- start_epoch: 121
- end_epoch: 141
- float_model_save_path: './output/float'
- mobile_model_save_path: './output/mobile'
- int8_model_save_path: './output/int8'
- weight_bits: 8
- activation_bits: 8
- weight_quantize_type: 'abs_max'
- activation_quantize_type: 'abs_max'
-
-compressor:
- epoch: 142
- checkpoint_path: './checkpoints/'
- strategies:
- - distillation_strategy
- - quantization_strategy
diff --git a/PaddleSlim/configs/quantization_pruning.yaml b/PaddleSlim/configs/quantization_pruning.yaml
deleted file mode 100644
index 0499051037215c8341ef18fdffb8605b912ee8c4..0000000000000000000000000000000000000000
--- a/PaddleSlim/configs/quantization_pruning.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-# step1: Pruning at epoch-0
-# step2: Fine-tune from epoch-0 to epoch-120
-# step3: Quantization training from epoch-121 to epoch-141
-version: 1.0
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-strategies:
- uniform_pruning_strategy:
- class: 'UniformPruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- target_ratio: 0.5
- pruned_params: '.*_sep_weights'
- metric_name: 'acc_top1'
- quantization_strategy:
- class: 'QuantizationStrategy'
- start_epoch: 121
- end_epoch: 141
- float_model_save_path: './output/float'
- mobile_model_save_path: './output/mobile'
- int8_model_save_path: './output/int8'
- weight_bits: 8
- activation_bits: 8
- weight_quantize_type: 'abs_max'
- activation_quantize_type: 'abs_max'
-compressor:
- epoch: 142
- #init_model: './checkpoints/0' # Please enable this option for loading checkpoint.
- checkpoint_path: './checkpoints/'
- strategies:
- - uniform_pruning_strategy
- - quantization_strategy
diff --git a/PaddleSlim/docs/demo.md b/PaddleSlim/docs/demo.md
deleted file mode 100644
index c3c9001ed039c7f20d0a307ba3e948ff2c5f0e99..0000000000000000000000000000000000000000
--- a/PaddleSlim/docs/demo.md
+++ /dev/null
@@ -1,393 +0,0 @@
-
-
----
-# Paddle模型压缩工具库使用示例
-
-## 目录
-
-- [概述](#0-概述)
-- [数据准备](#1-数据准备)
-- [压缩脚本准备](#2-压缩脚本介绍)
-- [蒸馏示例](#31-蒸馏)
-- [剪切示例](#32-uniform剪切)
-- [量化示例](#35-int8量化训练)
-- [蒸馏后量化示例](#36-蒸馏后int8量化)
-- [剪切后量化示例](#37-剪切后int8量化)
-- [小模型结构搜索示例](#38-小模型结构搜索示例)
-
-## 0. 概述
-该示例参考[PaddlePaddle/models/fluid/PaddleCV/image_classification](https://github.com/PaddlePaddle/models/tree/develop/fluid/PaddleCV/image_classification)下代码,分别实现了以下策略:
-
-1. 蒸馏:用ResNet50对MobileNetV1的在ImageNet 1000数据上的蒸馏训练, [code](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/run.sh#L42)。
-2. 剪切:对预训练好的MobileNetV1进行剪切, [code](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/run.sh#L65)。
-3. 量化:对预训练好的MobileNetV1进行int8量化训练, [code](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/run.sh#L81)
-4. 蒸馏量化组合:先用ResNet50对MobileNetV1进行蒸馏,再对蒸馏后得到的模型进行int8量化训练, [code](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/run.sh#L99)。
-5. 剪切量化组合:先用Uniform剪切策略对MobileNetV1进行剪切,再对剪切后的模型进行int8量化训练, [code](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/run.sh#L114)。
-6. 小模型结构搜索示例: 先用模拟退火策略搜索出一组tokens, 再用该tokens构建网络进行训练, [code](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/light_nas/run.sh)。
-
-本示例完整代码链接:https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/run.sh
-
-使用方式:
-克隆[PaddlePaddle/models](https://github.com/PaddlePaddle/models)到本地,并进入models/fluid/PaddleSlim路径。
-
-**文件结构**
-
-```
-/.
- |-configs # 压缩任务的配置文件,包括:蒸馏、int8量化量化、filter剪切和组合策略的配置文件。
- |-data # 存放训练数据和pretrain model
- |-models # MobileNetV1和ResNet50网络结构的定义
- |-quant_low_level_api # 量化训练的底层API, 用于处理特殊情况,用户可暂时忽略该内容
- |-compress.py # 模型压缩任务主脚本,示例中多个压缩策略共用这一个脚本。定义了压缩任务需要的模型相关的信息。
- |-reader.py # 定义数据处理逻辑
- |-run.sh # 模型压缩任务启动脚本
- |-utility.py # 定义了常用的工具方法
-```
-
-本示例中的五个压缩策略使用相同的训练数据和压缩Python脚本`compress.py`,每种策略对应独立的配置文件。
-
-第1章介绍数据准备,第2章介绍脚本compress.py中几个关键步骤。第3章分别介绍了如何执行各种压缩策略的示例。
-
-
-
-
-## 1. 数据准备
-
-### 1.1 训练数据准备
-参考[models/fluid/PaddleCV/image_classification](https://github.com/PaddlePaddle/models/tree/develop/fluid/PaddleCV/image_classification#data-preparation)下的数据准备教程准备训练数据,并放入PaddleSlim/data路径下。
-
-### 1.2 预训练模型准备
-
-脚本run.sh会自动从[models/fluid/PaddleCV/image_classification](https://github.com/PaddlePaddle/models/tree/develop/fluid/PaddleCV/image_classification#supported-models-and-performances)下载ResNet50和MobileNetV1的预训练模型,并放入PaddleSlim/pretrain路径下。
-
-
-## 2. 压缩脚本介绍
-在`compress.py`中定义了执行压缩任务需要的所有模型相关的信息,这里对几个关键的步骤进行简要介绍:
-
-### 2.1 目标网络的定义
-
-compress.py的以下代码片段定义了train program, 这里train program只有前向计算操作。
-```
-out = model.net(input=image, class_dim=args.class_dim)
-cost = fluid.layers.cross_entropy(input=out, label=label)
-avg_cost = fluid.layers.mean(x=cost)
-acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
-acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
-```
-
-然后,通过clone方法得到eval_program, 用来在压缩过程中评估模型精度,如下:
-
-```
-val_program = fluid.default_main_program().clone()
-```
-
-定义完目标网络结构,需要对其初始化,并根据需要加载预训练模型。
-
-### 2.2 定义feed_list和fetch_list
-对于train program, 定义train_feed_list用于指定从train data reader中取的数据feed给哪些variable。定义train_fetch_list用于指定在训练时,需要在log中展示的结果。如果需要在训练过程中在log中打印accuracy信心,则将('acc_top1', acc_top1.name)添加到train_fetch_list中即可。
-```
-train_feed_list = [('image', image.name), ('label', label.name)]
-train_fetch_list = [('loss', avg_cost.name)]
-```
-
->注意: 在train_fetch_list里必须有loss这一项。
-
-对于eval program. 同上定义eval_feed_list和train_fetch_list:
-
-```
-val_feed_list = [('image', image.name), ('label', label.name)]
-val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', acc_top5.name)]
-```
-
-### 2.3 定义teacher网络
-
-以下代码片段定义了teacher网络,并对其进行了初始化操作。
-```
-teacher_program = fluid.Program()
-startup_program = fluid.Program()
-with fluid.program_guard(teacher_program, startup_program):
- img = teacher_program.global_block()._clone_variable(image, force_persistable=False)
- predict = teacher_model.net(img, class_dim=args.class_dim)
-exe.run(startup_program)
-```
-需要注意的是:
-
-- teacher网络只有一个输入,直接clone在train program(fluid.default_main_program) 中定义的image变量即可。
-- teacher网络的输出只需要到predict即可,不用加loss和accuracy等操作
-- teacher网络需要初始化并加载预训练模型。
-
->注意: ResNet50和MobileNetV1的fc layer的weight parameter的名称都为‘fc_1.weight’,所以需要到PaddleSlim/models/resnet.py中修改一下ResNet fc layer的名称, 同时,修改ResNet50 pretrain model中响应weight的文件名,使其与resnet.py中的名称保持一致。
-
-
-## 3. 执行压缩策略示例
-所有示例的执行命令都放在`run.sh`文件中,用户可以修改run.sh后,执行不同的压缩策略示例。
-
-### 3.1 蒸馏
-
-在该示例中,用预训练好的ResNet50模型监督训练MobileNetV1模型。
-修改run.sh, 执行以下命令,执行蒸馏压缩示例:
-```
-# for distillation
-#--------------------
-export CUDA_VISIBLE_DEVICES=0
-python compress.py \
---model "MobileNet" \
---teacher_model "ResNet50" \
---teacher_pretrained_model ./data/pretrain/ResNet50_pretrained \
---compress_config ./configs/mobilenetv1_resnet50_distillation.yaml
-```
-该示例在评估数据集上的准确率结果如下:
-
-|- |精度(top5/top1) |
-|---|---|
-| ResNet50蒸馏训| 90.92% / 71.97%|
-
-
-
-图1
-
-
-
-### 3.2 Uniform剪切
-
-在该示例中,将MobileNetV1模型剪掉50%的FLOPS.
-修改run.sh, 执行以下命令,执行Uniform卷积核剪切模型压缩示例:
-
-```
-# for uniform filter pruning
-#---------------------------
-export CUDA_VISIBLE_DEVICES=0
-python compress.py \
---model "MobileNet" \
---pretrained_model ./data/pretrain/MobileNetV1_pretrained \
---compress_config ./configs/filter_pruning_uniform.yaml
-```
-该示例在评估数据集上的准确率结果如下:
-
-| FLOPS |模型大小|精度(top5/top1) |
-|---|---|---|
-| -50%|-47.0%(9.0M) |89.13% / 69.83%|
-
-
-
-图2
-
-
-
-### 3.3 敏感度剪切
-
-在该示例中,将MobileNetV1模型剪掉50%的FLOPS.
-修改run.sh, 执行以下命令,执行敏感度卷积核剪切压缩示例:
-
-```
-# for sensitivity filter pruning
-#---------------------------
-export CUDA_VISIBLE_DEVICES=0
-python compress.py \
---model "MobileNet" \
---pretrained_model ./data/pretrain/MobileNetV1_pretrained \
---compress_config ./configs/filter_pruning_sen.yaml
-```
-该示例在评估数据集上的准确率结果如下:
-
-| FLOPS |模型大小| 精度(top5/top1) |
-|---|---|---|
-| -50%|-61.2%(6.6M) |88.47% / 68.68%|
-
-
-
-图3
-
-
-### 3.4 剪切率超参搜索
-
-在该示例中,使用模拟退火策略搜索出一组最优的剪切率,将MobileNetV1模型剪掉50%的FLOPS.
-修改run.sh, 执行以下命令,搜索一组剪切率:
-
-```
-
-# for auto filter pruning
-#---------------------------
-export CUDA_VISIBLE_DEVICES=0
-python compress.py \
---model "MobileNet" \
---pretrained_model ./pretrain/MobileNetV1_pretrained \
---compress_config ./configs/auto_prune.yaml
-
-```
-
-通过上述步骤,得到一组最优的tokens, 将其按以下方式设置到`auto_prune.yaml`文件中:
-
-```
-strategies:
- auto_pruning_strategy:
- class: 'AutoPruneStrategy'
- pruner: 'pruner_1'
- controller: 'sa_controller'
- start_epoch: 0
- end_epoch: 200
- retrain_epoch: 200
- max_ratio: 0.50
- min_ratio: 0.48
- uniform_range: 0.4
- init_tokens: [39, 38, 38, 24, 21, 34, 24, 29, 19, 11, 33, 36, 39]
- pruned_params: '.*_sep_weights'
- metric_name: 'acc_top1'
-compressor:
- epoch: 200
- checkpoint_path: './checkpoints_auto_pruning/'
- strategies:
- - auto_pruning_strategy
-```
-
-其中,需要修改的选项有:
-
-- end_epoch: 将其修改为200,训练任务共执行200个epochs
-- retrain_epoch: 将其修改为200,当前任务的200个epochs全为训练,不做搜索。
-- init_tokens: 在auto_pruning_strategy下新增init_tokens, 为上一步骤中搜索出的最优tokens.
-- compressor::epoch: 修改为200,整个压缩任务执行200个epochs后退出。
-
-
-该示例在评估数据集上的准确率结果如下:
-
-| FLOPS |模型大小| 精度(top5/top1) |pruned ratios|
-|---|---|---|---|
-| -50%|- |88.86% / 69.64%|[0.39, 0.38, 0.38, 0.24, 0.21, 0.34, 0.24, 0.29, 0.19, 0.11, 0.33, 0.36, 0.39]|
-
->该搜索策略有一定的随机性,用上述搜索参数,不一定能搜索完全一样的结果。
-
-### 3.5 int8量化训练
-
-修改run.sh, 执行以下命令,执行int8量化训练示例:
-
-```
-# for quantization
-#---------------------------
-export CUDA_VISIBLE_DEVICES=0
-python compress.py \
---batch_size 64 \
---model "MobileNet" \
---pretrained_model ./pretrain/MobileNetV1_pretrained \
---compress_config ./configs/quantization.yaml
-```
-
-该示例结果如下:
-
-| 模型(int8动态量化) | 模型大小 | 精度(top5/top1)|
-|---|---|---|
-|MobileNetV1|-71.76%(4.8M)|89.64% / 71.01%|
-
-
-### 3.6 蒸馏后int8量化
-
-本示例先用ResNet50模型对MobileNetV1蒸馏训练120个epochs,然后再对MobileNetV1模型进行动态int8量化训练。
-修改run.sh, 执行以下命令,执行蒸馏与int8量化训练结合的模型压缩示例:
-
-```
-# for distillation with quantization
-#-----------------------------------
-export CUDA_VISIBLE_DEVICES=0
-python compress.py \
---model "MobileNet" \
---teacher_model "ResNet50" \
---teacher_pretrained_model ./data/pretrain/ResNet50_pretrained \
---compress_config ./configs/quantization_dist.yaml
-```
-
-该示例结果如下:
-
-| 模型(ResNet50蒸馏训练+int8量化) | 模型大小 | 精度(top1) |
-| --- | --- | --- |
-| MobileNet v1 | -71.76%(4.8M)| 72.01% |
-
-### 3.7 剪切后int8量化
-
-本示例先将预训练好的MobileNetV1模型剪掉50% FLOPS, 让后再对其进行动态int8量化训练。
-修改run.sh, 执行以下命令,执行剪切与int8量化训练结合的模型压缩示例:
-
-```
-# for uniform filter pruning with quantization
-#---------------------------------------------
-export CUDA_VISIBLE_DEVICES=0
-python compress.py \
---model "MobileNet" \
---pretrained_model ./data/pretrain/MobileNetV1_pretrained \
---compress_config ./configs/quantization_pruning.yaml
-```
-
-该示例结果如下:
-
-| 模型(剪切FLOPS+动态int8量化) | 模型大小 | 精度(top1) |
-| --- | --- | --- |
-| MobileNet v1(剪切FLOPS -50%) | -86.47%(2.3M) | 69.20% |
-
-### 3.8 小模型结构搜索示例
-
-本示例先用模拟退火策略搜索出一组tokens, 再用搜索出的tokens初始化构建模型进行训练。
-
-> tokens:light_nas将搜索空间中的CNN模型映射为一组token, token可以唯一地表示一个CNN模型。搜索过程就是在不断优化token, 使其构建得到的模型性能更强。
->
-> 在light_nas中,token是一个长度为`30`的list,以每`6`个数为一组,共有`5`组
->
-> 每组中的`6`个数分别代表: `0:通道扩增系数,1:卷积核数量,2:网络层数,3:卷积核尺寸,4.是否用shorcut,5.是否用SE(squeeze excitation)`
-
-step1: 进入路径`PaddlePaddle/models/PaddleSlim/light_nas/`。
-
-step2: (可选)按照[使用手册](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/usage.md#245-延时评估器生成方式)中说明的方法生成好延时评估器表格 `latency_lookup_table.txt`,放置到当前路径。
-
-step3: (可选)修改 `light_nas_space.py` 文件中的LATENCY_LOOKUP_TABLE_PATH, 更改为 LATENCY_LOOKUP_TABLE_PATH='latency_lookup_table.txt'。
-
-step4: 在当前路径下,新建软链接指向上级目录的data: `ln -s ../data data`。
-
-step5: 修改 `compress.yaml` 文件, 将参数 `server_ip` 设置为当前机器的 IP。
-
-step6: (可选)修改 `compress.yaml` 文件,将参数 `target_latency` 设置为用户的目标延时。
-
-step7: 执行 `sh run.sh`, 可根据实际情况修改 `run.sh` 中的 `CUDA_VISIBLE_DEVICES`。
-
-step8: 修改 `light_nas_space.py` 文件中的 `LightNASSpace::init_tokens`, 使其返回step6中搜到的最优tokens。
-
-step9: 修改 `compress.yaml` 文件,将 `compressor` 下的 `strategies` 去掉。
-
-step10: 执行 `sh run.sh` 进行训练任务。
-
-该示例基于 Flops 约束的两组结果如下:
-
-| - | FLOPS | Top1/Top5 accuracy | GPU cost | token |
-|------------------|-------|--------------------|----------------------|--------|
-| MobileNetV2 | 0% | 71.90% / 90.55% | - | - |
-| Light-NAS-model0 | -3% | 72.45% / 90.70% | 1.2K GPU hours(V100) | token0 |
-| Light-NAS-model1 | -17% | 71.84% / 90.45% | 1.2K GPU hours(V100) | token1 |
-
-
-基于硬件耗时的模型结构搜索实验:
-
-| - | Latency | Top1/Top5 accuracy | GPU cost | token |
-|---------------|---------|--------------------|---------------------|--------|
-| MobileNetV2 | 0% | 71.90% / 90.55% | - | - |
-| RK3288 开发板 | -22% | 71.97% / 90.35% | 1.2K GPU hours(V100) | token2 |
-| Android 手机 | -20% | 72.06% / 90.36% | 1.2K GPU hours(V100) | token3 |
-| iPhone 手机 | -16% | 72.22% / 90.47% | 1.2K GPU hours(V100) | token4 |
-
-
-| token name | tokens |
-|------------|--------|
-| tokens0 | [3, 1, 1, 0, 1, 0, 3, 2, 1, 0, 1, 0, 3, 1, 1, 0, 1, 0, 2, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0] |
-| tokens1 | [3, 1, 1, 0, 1, 0, 3, 2, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 2, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1] |
-| tokens2 | [0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 2, 2, 1, 0, 1, 1, 2, 1, 0, 0, 0, 0, 3, 2, 1, 0, 1, 0] |
-| tokens3 | [3, 0, 0, 0, 1, 0, 1, 2, 0, 0, 1, 0, 0, 2, 0, 1, 1, 0, 3, 1, 0, 1, 1, 0, 0, 2, 1, 1, 1, 0] |
-| tokens4 | [3, 1, 0, 0, 1, 0, 3, 1, 1, 0, 1, 0, 3, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 3, 1, 1, 0, 1, 0] |
diff --git a/PaddleSlim/docs/images/demo/demo.zip b/PaddleSlim/docs/images/demo/demo.zip
deleted file mode 100644
index 428bdc4a6b59caf845ac00bbc1ecd71003683575..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/demo/demo.zip and /dev/null differ
diff --git a/PaddleSlim/docs/images/demo/distillation_result.png b/PaddleSlim/docs/images/demo/distillation_result.png
deleted file mode 100644
index 49e954feb44e162853fe33ba346a27b5f4221858..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/demo/distillation_result.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/demo/pruning_sen_result.png b/PaddleSlim/docs/images/demo/pruning_sen_result.png
deleted file mode 100644
index e725682fa307f5666d542a258f278d71f5f6db5a..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/demo/pruning_sen_result.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/demo/pruning_uni_result.png b/PaddleSlim/docs/images/demo/pruning_uni_result.png
deleted file mode 100644
index a3acdd9fd824da76d105cb8e20457eeb1984659e..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/demo/pruning_uni_result.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/framework_0.png b/PaddleSlim/docs/images/framework_0.png
deleted file mode 100644
index 223f384f23775403129a69967bbe9e891dfa77ff..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/framework_0.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/framework_1.png b/PaddleSlim/docs/images/framework_1.png
deleted file mode 100644
index 642bc13f8e12eace8fe8e70d8f3ec04a39e4275a..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/framework_1.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/distillation_0.png b/PaddleSlim/docs/images/tutorial/distillation_0.png
deleted file mode 100644
index 0946b90914339d2b66af7b9c86b5f77b4ec57ec5..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/distillation_0.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/light-nas-block.png b/PaddleSlim/docs/images/tutorial/light-nas-block.png
deleted file mode 100644
index c469c1b634cef2dcf4c6e4f6c7907e7d7e9aaf09..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/light-nas-block.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/pruning_0.png b/PaddleSlim/docs/images/tutorial/pruning_0.png
deleted file mode 100644
index 55cab1d5658d040c256ffe5ddd6a440b82a96b35..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/pruning_0.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/pruning_1.png b/PaddleSlim/docs/images/tutorial/pruning_1.png
deleted file mode 100644
index 99dc55e83e6c745fc55e6e0cc67b55ef38754080..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/pruning_1.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/pruning_2.png b/PaddleSlim/docs/images/tutorial/pruning_2.png
deleted file mode 100644
index 8c413672ccc827537e10a54b749dd4b4bf7b0122..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/pruning_2.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/pruning_3.png b/PaddleSlim/docs/images/tutorial/pruning_3.png
deleted file mode 100644
index 764b9c8f2eaabbc14dd593cd138c04fe9d3ef202..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/pruning_3.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/pruning_4.png b/PaddleSlim/docs/images/tutorial/pruning_4.png
deleted file mode 100644
index c99e89261bccea4cc82de20288a59addfdb153ab..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/pruning_4.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/quan_bwd.png b/PaddleSlim/docs/images/tutorial/quan_bwd.png
deleted file mode 100644
index 9fe571f30bbc133f0a8c5da1639876a3ca39001b..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/quan_bwd.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/quan_forward.png b/PaddleSlim/docs/images/tutorial/quan_forward.png
deleted file mode 100644
index 56c52bb140cb3e0590f3cafa9043205779116533..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/quan_forward.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/quan_fwd_1.png b/PaddleSlim/docs/images/tutorial/quan_fwd_1.png
deleted file mode 100644
index 8224a560d525b47e5d8395064bae018d4d9e67c4..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/quan_fwd_1.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/quan_table_0.png b/PaddleSlim/docs/images/tutorial/quan_table_0.png
deleted file mode 100644
index ea6571509e4b8f1fa00ee8f9ffb0a6870b740d0f..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/quan_table_0.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/tutorial/quan_table_1.png b/PaddleSlim/docs/images/tutorial/quan_table_1.png
deleted file mode 100644
index 53bc672246c341dd46dbb7a269ff2b3d1c35a05d..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/tutorial/quan_table_1.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/usage/ConvertToInt8Pass.png b/PaddleSlim/docs/images/usage/ConvertToInt8Pass.png
deleted file mode 100644
index 8b5849819c0bc8e592dc8f864d8945330df85ab1..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/usage/ConvertToInt8Pass.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/usage/FreezePass.png b/PaddleSlim/docs/images/usage/FreezePass.png
deleted file mode 100644
index acd2b0a890a8af85bec6eecdb22e47ad386a178c..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/usage/FreezePass.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/usage/TransformForMobilePass.png b/PaddleSlim/docs/images/usage/TransformForMobilePass.png
deleted file mode 100644
index 4104cacc67af0be1c7bc152696e2ae544127aace..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/usage/TransformForMobilePass.png and /dev/null differ
diff --git a/PaddleSlim/docs/images/usage/TransformPass.png b/PaddleSlim/docs/images/usage/TransformPass.png
deleted file mode 100644
index f29ab62753e0e6ddf28d0c1dda7139705fc24b18..0000000000000000000000000000000000000000
Binary files a/PaddleSlim/docs/images/usage/TransformPass.png and /dev/null differ
diff --git a/PaddleSlim/docs/model_zoo.md b/PaddleSlim/docs/model_zoo.md
deleted file mode 100644
index a9094797ed39b7da66b905eafc7a7377c2ff2eb3..0000000000000000000000000000000000000000
--- a/PaddleSlim/docs/model_zoo.md
+++ /dev/null
@@ -1,218 +0,0 @@
-
-
-
----
-# Paddle模型压缩工具实验与模型库
-
-## 目录
-
-- [量化实验](#int8量化训练)
-- [剪切实验](#剪切实验)
-- [蒸馏实验](蒸馏实验)
-- [组合实验](组合实验)
-
-
-## 1. int8量化训练
-
-评估实验所使用数据集为ImageNet 1000类数据, 量化训练前后模型top-5/top-1准确率对比如下:
-
-| Model | FP32| int8(A:abs_max, W:abs_max) | int8, (A:moving_average_abs_max, W:abs_max) |int8, (A:abs_max, W:channel_wise_abs_max) |
-|:---|:---:|:---:|:---:|:---:|
-|MobileNetV1|[89.68% / 70.99%](http://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_pretrained.tar)|[89.55% / 70.74%](https://paddle-slim-models.bj.bcebos.com/quantization%2Fmobilenetv1_w_abs_a_abs_7074_8955.tar.gz)|[89.67% / 70.89%](https://paddle-slim-models.bj.bcebos.com/quantization%2Fmobilenetv1_w_abs_a_move_7089_8967.tar.gz)|[89.65% / 70.93%](https://paddle-slim-models.bj.bcebos.com/quantization%2Fmobilenetv1_w_chan_a_abs_7093_8965.tar.gz)|
-|ResNet50|[93.00% / 76.50%](http://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar)|[93.10% / 76.71%](https://paddle-slim-models.bj.bcebos.com/quantization%2Fresnet50_w_abs_a_abs_7670_9310.tar.gz)|[93.12% / 76.65%](https://paddle-slim-models.bj.bcebos.com/quantization%2Fresnet50_w_abs_a_move_7665_9312.tar.gz)|[93.05% / 76.56%](https://paddle-slim-models.bj.bcebos.com/quantization%2Fresnet50_w_chan_a_abs_7656_9304.tar.gz)|
-
-点击表中超链接即可下载预训练模型。
-
-
-量化训练前后,模型大小的变化对比如下:
-
-| Model | FP32 | int8(A:abs_max, W:abs_max) | int8, (A:moving_average_abs_max, W:abs_max) | int8, (A:abs_max, W:channel_wise_abs_max) |
-| :--- | :---: | :---: | :---: | :---: |
-| MobileNetV1 | 17M | 4.8M(-71.76%) | 4.9M(-71.18%) | 4.9M(-71.18%) |
-| ResNet50 | 99M | 26M(-73.74%) | 27M(-72.73%) | 27M(-72.73%) |
-
-注:abs_max为动态量化,moving_average_abs_max为静态量化, channel_wise_abs_max是对卷积权重进行分channel量化。
-> 'A' 代表activation, ‘W' 代表weight
-
-## 2. 剪切实验
-
-数据: ImageNet 1000类
-模型:MobileNetV1
-原始模型大小:17M
-原始精度(top5/top1): 89.54% / 70.91%
-
-### 2.1 基于敏感度迭代剪切
-
-#### 实验说明
-
-分步剪切,每步剪掉模型7%的FLOPS.
-optimizer配置如下:
-
-```
-epoch_size=5000
-boundaries = [30, 60, 90, 120] * epoch_size # for -50% FLOPS
-#boundaries = [35, 65, 95, 125] * epoch_size # for -60% FLOPS
-#boundaries = [50, 80, 110, 140] * epoch_size # for -70% FLOPS
-values = [0.01, 0.1, 0.01, 0.001, 0.0001]
-optimizer = fluid.optimizer.Momentum(
- momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values),
- regularization=fluid.regularizer.L2Decay(1e-4))
-```
-
-#### 实验结果
-
-
-| FLOPS |model size| 精度(top5/top1) |下载模型|
-|---|---|---|---|
-| -50%|-59.4%(6.9M) |88.22% / 68.41% |[点击下载](https://paddle-slim-models.bj.bcebos.com/sensitive_filter_pruning_0.5_model.tar.gz)|
-| -60%|-70.6%(5.0M)|87.01% / 66.31% |[点击下载](https://paddle-slim-models.bj.bcebos.com/sensitive_filter_pruning_0.6_model.tar.gz)|
-| -70%|-78.8%(3.6M)|85.30% / 63.41% |[点击下载](https://paddle-slim-models.bj.bcebos.com/sensitive_filter_pruning_0.7_model.tar.gz)|
-
-### 2.2 基于敏感度一次性剪切
-
-#### 实验说明
-
-一步剪切掉50%FLOPS, 然后fine-tune 120个epoch.
-
-optimizer配置如下:
-
-```
-epoch_size=5000
-boundaries = [30, 60, 90] * epoch_size
-values = [0.1, 0.01, 0.001, 0.0001]
-optimizer = fluid.optimizer.Momentum(
- momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values),
- regularization=fluid.regularizer.L2Decay(1e-4))
-```
-
-#### 实验结果
-
-| FLOPS |model size|精度(top5/top1) |模型下载|
-|---|---|---|---|
-| -50%|-61.2%(6.6M)| 88.47% / 68.68% |[点击下载](https://paddle-slim-models.bj.bcebos.com/sensitive_filter_pruning_0.5-1step.tar.gz)|
-
-### 2.3 基于敏感度分步剪切
-
-#### 实验说明
-
-1. 一次剪掉20%FLOPS, fine-tune 120个epoch
-2. 在上一步基础上,一次剪掉20%FLOPS, fine-tune 120个epoch
-3. 在上一步基础上,一次剪掉20%FLOPS, fine-tune 120个epoch
-
-optimizer配置如下:
-
-```
-epoch_size=5000
-boundaries = [30, 60, 90] * epoch_size
-values = [0.1, 0.01, 0.001, 0.0001]
-optimizer = fluid.optimizer.Momentum(
- momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values),
- regularization=fluid.regularizer.L2Decay(1e-4))
-```
-
-#### 实验结果
-
-| FLOPS |精度(top5/top1)|模型下载 |
-|---|---|---|
-| -20%|90.08% / 71.48% |[点击下载](https://paddle-slim-models.bj.bcebos.com/sensitive_filter_pruning_3step_0.2_model.tar.gz)|
-| -36%|89.62% / 70.83%|[点击下载](https://paddle-slim-models.bj.bcebos.com/sensitive_filter_pruning_3step_0.36_model.tar.gz)|
-| -50%|88.77% / 69.31%|[点击下载](https://paddle-slim-models.bj.bcebos.com/sensitive_filter_pruning_3step_0.5_model.tar.gz)|
-
-
-### 2.4 Uniform剪切
-
-#### 实验说明
-
-一次剪掉指定比例的FLOPS,然后fine-tune 120个epoch.
-
-optimizer配置如下:
-
-```
-epoch_size=5000
-boundaries = [30, 60, 90] * epoch_size
-values = [0.1, 0.01, 0.001, 0.0001]
-optimizer = fluid.optimizer.Momentum(
- momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values),
- regularization=fluid.regularizer.L2Decay(1e-4))
-```
-
-#### 实验结果
-
-| FLOPS |model size|精度(top5/top1) |模型下载 |
-|---|---|---|---|
-| -50%|-47.0%(9.0M) | 88.92% / 69.66%|[点击下载](https://paddle-slim-models.bj.bcebos.com/uniform_filter_pruning_0.5_model.tar.gz)|
-| -60%|-55.9%(7.5M)|88.22% / 68.24%| [点击下载](https://paddle-slim-models.bj.bcebos.com/uniform_filter_pruning_0.6_model.tar.gz)|
-| -70%|-65.3%(5.9M)|86.99% / 66.57%| [点击下载](https://paddle-slim-models.bj.bcebos.com/uniform_filter_pruning_0.7_model.tar.gz)|
-
-
-## 3. 蒸馏
-
-数据: ImageNet 1000类
-模型:MobileNetV1
-原始模型大小:17M
-原始精度(top5/top1): 89.54% / 70.91%
-
-#### 实验说明
-
-用训练好的ResNet50蒸馏训练MobileNetV1, 训练120个epoch. 对第一个block加FSP loss; 对softmax layer的输入加L2-loss.
-
-optimizer配置如下:
-
-```
-epoch_size=5000
-boundaries = [30, 60, 90] * epoch_size
-values = [0.1, 0.01, 0.001, 0.0001]
-optimizer = fluid.optimizer.Momentum(
- momentum=0.9,
- learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values),
- regularization=fluid.regularizer.L2Decay(1e-4))
-```
-
-#### 实验结果
-
-|- |精度(top5/top1) |收益(top5/top1)|模型下载 |
-|---|---|---|---|
-| ResNet50蒸馏训| 90.92% / 71.97%| +1.28% / +1.06%| [点击下载](https://paddle-slim-models.bj.bcebos.com/mobilenetv1_resnet50_distillation_model.tar.gz)|
-
-
-## 4. 组合实验
-
-### 4.1 蒸馏后量化
-
-#### 实验说明
-
-#### 实验结果
-
-|- |精度(top1) |模型下载 |
-|---|---|---|
-| ResNet50蒸馏训练+量化| 72.01%| [点击下载]()|
-
-
-### 4.2 剪切后量化
-
-
-#### 实验说明
-
-#### 实验结果
-
-| 剪切FLOPS |剪切+量化(dynamic)|模型下载 |
-|---|---|---|
-| -50%| 69.20%| [点击下载]()|
diff --git a/PaddleSlim/docs/tutorial.md b/PaddleSlim/docs/tutorial.md
deleted file mode 100644
index 6c3a6719b0b627081cb318c9f4a72f8bdd983a4a..0000000000000000000000000000000000000000
--- a/PaddleSlim/docs/tutorial.md
+++ /dev/null
@@ -1,312 +0,0 @@
->本文包含大量行内公式,将公式转为图片会导致各种排版问题,建议您使用浏览器插件[MathJax Plugin for Github](https://chrome.google.com/webstore/detail/mathjax-plugin-for-github/ioemnmodlmafdkllaclgeombjnmnbima)渲染该页公式。后续我们会将该文档迁移至[PaddlePaddle官网](https://www.paddlepaddle.org)。
-
-
-
----
-# Paddle模型压缩工具库算法原理介绍
-
-## 目录
-
-- [量化原理介绍](#1-quantization-aware-training量化介绍)
-- [剪裁原理介绍](#2-卷积核剪裁原理)
-- [蒸馏原理介绍](#3-蒸馏)
-- [轻量级模型结构搜索原理介绍](#4-轻量级模型结构搜索)
-
-## 1. Quantization Aware Training量化介绍
-
-### 1.1 背景
-
-近年来,定点量化使用更少的比特数(如8-bit、3-bit、2-bit等)表示神经网络的权重和激活已被验证是有效的。定点量化的优点包括低内存带宽、低功耗、低计算资源占用以及低模型存储需求等。
-
-
-
-表1: 不同类型操作的开销对比
-
-
-由表1可知,低精度定点数操作的硬件面积大小及能耗比高精度浮点数要少几个数量级。 使用定点量化可带来4倍的模型压缩、4倍的内存带宽提升,以及更高效的cache利用(很多硬件设备,内存访问是主要能耗)。除此之外,计算速度也会更快(通常具有2x-3x的性能提升)。由表2可知,在很多场景下,定点量化操作对精度并不会造成损失。另外,定点量化对神经网络于嵌入式设备上的推断来说是极其重要的。
-
-
-
-表2:模型量化前后精度对比
-
-
-目前,学术界主要将量化分为两大类:`Post Training Quantization`和`Quantization Aware Training`。`Post Training Quantization`是指使用KL散度、滑动平均等方法确定量化参数且不需要重新训练的定点量化方法。`Quantization Aware Training`是在训练过程中对量化进行建模以确定量化参数,它与`Post Training Quantization`模式相比可以提供更高的预测精度。
-
-### 1.2 量化原理
-
-#### 1.2.1 量化方式
-目前,存在着许多方法可以将浮点数量化成定点数。例如:
-$$ r = min(max(x, a), b)$$ $$ s = \frac{b - a}{n - 1} $$ $$ q = \left \lfloor \frac{r - a}{s} \right \rceil $$
-式中,$x$是待量化的浮点值,$[a, b]$是量化范围,$a$是待量化浮点数中的最小值, $b$ 是待量化浮点数中的最大值。$\left \lfloor \right \rceil$ 表示将结果四舍五入到最近的整数。如果量化级别为$k$,则$n$为$2^k$。例如,若$k$为8,则$n$为256。$q$是量化得到的整数。
-PaddleSlim框架中选择的量化方法为最大绝对值量化(`max-abs`),具体描述如下:
-$$ M = max(abs(x)) $$ $$ q = \left \lfloor \frac{x}{M} * (n - 1) \right \rceil $$
-式中,$x$是待被量化的浮点值,$M$是待量化浮点数中的绝对值最大值。$\left \lfloor \right \rceil$表示将结果四舍五入到最近的整数。对于8bit量化,PaddleSlim采用`int8_t`,即$n=2^7=128$。$q$是量化得到的整数。
-无论是`min-max量化`还是`max-abs量化`,他们都可以表示为如下形式:
-$q = scale * r + b$
-其中`min-max`和`max-abs`被称为量化参数或者量化比例或者量化范围。
-
-#### 1.2.2 量化训练
-##### 1.2.2.1 前向传播
-前向传播过程采用模拟量化的方式,具体描述如下:
-
-
-
-图1:基于模拟量化训练的前向过程
-
-
-由图1可知,基于模拟量化训练的前向过程可被描述为以下四个部分:
-1) 输入和权重均被量化成8-bit整数。
-2) 在8-bit整数上执行矩阵乘法或者卷积操作。
-3) 反量化矩阵乘法或者卷积操作的输出结果为32-bit浮点型数据。
-4) 在32-bit浮点型数据上执行偏置加法操作。此处,偏置并未被量化。
-对于通用矩阵乘法(`GEMM`),输入$X$和权重$W$的量化操作可被表述为如下过程:
-$$ X_q = \left \lfloor \frac{X}{X_m} * (n - 1) \right \rceil $$ $$ W_q = \left \lfloor \frac{W}{W_m} * (n - 1) \right \rceil $$
-执行通用矩阵乘法:
-$$ Y_q = X_q * W_q $$
-对量化乘积结果$Yq$进行反量化:
-$$
-\begin{align}
-Y_{dq} = \frac{Y_q}{(n - 1) * (n - 1)} * X_m * W_m \
-=\frac{X_q * W_q}{(n - 1) * (n - 1)} * X_m * W_m \
-=(\frac{X_q}{n - 1} * X_m) * (\frac{W_q}{n - 1} * W_m) \
-\end{align}
-$$
-上述公式表明反量化操作可以被移动到`GEMM`之前,即先对$Xq$和$Wq$执行反量化操作再做`GEMM`操作。因此,前向传播的工作流亦可表示为如下方式:
-
-
-
-图2:基于模拟量化训练前向过程的等价工作流
-
-
-训练过程中,PaddleSlim使用图2中所示的等价工作流。在设计中,量化Pass在IrGraph中插入量化op和反量化op。因为在连续的量化、反量化操作之后输入仍然为32-bit浮点型数据。因此,PaddleSlim量化训练框架所采用的量化方式被称为模拟量化。
-
-##### 1.2.2.2 反向传播
-由图3可知,权重更新所需的梯度值可以由量化后的权重和量化后的激活求得。反向传播过程中的所有输入和输出均为32-bit浮点型数据。注意,梯度更新操作需要在原始权重上进行,即计算出的梯度将被加到原始权重上而非量化后或反量化后的权重上。
-
-
-
-图3:基于模拟量化训练的反向传播和权重更新过程
-
-
-因此,量化Pass也会改变相应反向算子的某些输入。
-
-##### 1.2.2.3 确定量化比例系数
-存在着两种策略可以计算求取量化比例系数,即动态策略和静态策略。动态策略会在每次迭代过程中计算量化比例系数的值。静态策略则对不同的输入采用相同的量化比例系数。
-对于权重而言,在训练过程中采用动态策略。换句话说,在每次迭代过程中量化比例系数均会被重新计算得到直至训练过程结束。
-对于激活而言,可以选择动态策略也可以选择静态策略。若选择使用静态策略,则量化比例系数会在训练过程中被评估求得,且在推断过程中被使用(不同的输入均保持不变)。静态策略中的量化比例系数可于训练过程中通过如下三种方式进行评估:
-
-1. 在一个窗口中计算激活最大绝对值的平均值。
-
-2. 在一个窗口中计算激活最大绝对值的最大值。
-
-3. 在一个窗口中计算激活最大绝对值的滑动平均值,计算公式如下:
-
-$$ Vt = (1 - k) * V + k * V_{t-1} $$
-
-式中,$V$ 是当前batch的最大绝对值, $Vt$是滑动平均值。$k$是一个因子,例如其值可取为0.9。
-
-#### 1.2.4 训练后量化
-
-训练后量化是基于采样数据,采用KL散度等方法计算量化比例因子的方法。相比量化训练,训练后量化不需要重新训练,可以快速得到量化模型。
-
-训练后量化的目标是求取量化比例因子,主要有两种方法:非饱和量化方法 ( No Saturation) 和饱和量化方法 (Saturation)。非饱和量化方法计算FP32类型Tensor中绝对值的最大值`abs_max`,将其映射为127,则量化比例因子等于`abs_max/127`。饱和量化方法使用KL散度计算一个合适的阈值`T` (`0
-
-图4
-
-
-
-**剪裁注意事项2**
-
-如**图5**所示,剪裁完$X_i$之后,根据注意事项1我们从$X_{i+1}$的filter中删除了一行(图中蓝色行),在计算$X_{i+1}$的filters的l1_norm(图中绿色一列)的时候,有两种选择:
-算上被删除的一行:independent pruning
-减去被删除的一行:greedy pruning
-
-
-
-图5
-
-
-**剪裁注意事项3**
-在对ResNet等复杂网络剪裁的时候,还要考虑到后当前卷积层的修改对上一层卷积层的影响。
-如**图6**所示,在对residual block剪裁时,$X_{i+1}$层如何剪裁取决于project shortcut的剪裁结果,因为我们要保证project shortcut的output和$X_{i+1}$的output能被正确的concat.
-
-
-
-
-图6
-
-
-### 2.2 Uniform剪裁卷积网络
-
-每层剪裁一样比例的卷积核。
-在剪裁一个卷积核之前,按l1_norm对filter从高到低排序,越靠后的filter越不重要,优先剪掉靠后的filter.
-
-
-### 2.3 基于敏感度剪裁卷积网络
-
-根据每个卷积层敏感度的不同,剪掉不同比例的卷积核。
-
-#### 两个假设
-
-- 在一个conv layer的parameter内部,按l1_norm对filter从高到低排序,越靠后的filter越不重要。
-- 两个layer剪裁相同的比例的filters,我们称对模型精度影响更大的layer的敏感度相对高。
-
-#### 剪裁filter的指导原则
-
-- layer的剪裁比例与其敏感度成反比
-- 优先剪裁layer内l1_norm相对低的filter
-
-#### 敏感度的理解
-
-
-
-图7
-
-
-如**图7**所示,横坐标是将filter剪裁掉的比例,竖坐标是精度的损失,每条彩色虚线表示的是网络中的一个卷积层。
-以不同的剪裁比例**单独**剪裁一个卷积层,并观察其在验证数据集上的精度损失,并绘出**图7**中的虚线。虚线上升较慢的,对应的卷积层相对不敏感,我们优先剪不敏感的卷积层的filter.
-
-#### 选择最优的剪裁率组合
-
-我们将**图7**中的折线拟合为**图8**中的曲线,每在竖坐标轴上选取一个精度损失值,就在横坐标轴上对应着一组剪裁率,如**图8**中黑色实线所示。
-用户给定一个模型整体的剪裁率,我们通过移动**图5**中的黑色实线来找到一组满足条件的且合法的剪裁率。
-
-
-
-图8
-
-
-#### 迭代剪裁
-考虑到多个卷积层间的相关性,一个卷积层的修改可能会影响其它卷积层的敏感度,我们采取了多次剪裁的策略,步骤如下:
-
-- step1: 统计各卷积层的敏感度信息
-- step2: 根据当前统计的敏感度信息,对每个卷积层剪掉少量filter, 并统计FLOPS,如果FLOPS已满足要求,进入step4,否则进行step3。
-- step3: 对网络进行简单的fine-tune,进入step1
-- step4: fine-tune训练至收敛
-
-## 3. 蒸馏
-
- 一般情况下,模型参数量越多,结构越复杂,其性能越好,但参数也越允余,运算量和资源消耗也越大;模型蒸馏是将复杂网络中的有用信息将复杂网络中的有用信息提取出来提取出来,迁移到一个更小的网络中去,在我们的工具包中,支持两种蒸馏的方法。
- 第一种是传统的蒸馏方法(参考论文:[Distilling the Knowledge in a Neural Network](https://arxiv.org/pdf/1503.02531.pdf))
- 使用复杂的网络作为teacher模型去监督训练一个参数量和运算量更少的student模型。teacher模型可以是一个或者多个提前训练好的高性能模型。student模型的训练有两个目标:一个是原始的目标函数,为student模型输出的类别概率和label的交叉熵,记为hard-target;另一个是student模型输出的类别概率和teacher模型输出的类别概率的交叉熵,记为soft target,这两个loss加权后得到最终的训练loss,共同监督studuent模型的训练。
- 第二种是基于FSP的蒸馏方法(参考论文:[A Gift from Knowledge Distillation:
-Fast Optimization, Network Minimization and Transfer Learning](http://openaccess.thecvf.com/content_cvpr_2017/papers/Yim_A_Gift_From_CVPR_2017_paper.pdf))
- 相比传统的蒸馏方法直接用小模型去拟合大模型的输出,该方法用小模型去拟合大模型不同层特征之间的转换关系,其用一个FSP矩阵(特征的内积)来表示不同层特征之间的关系,大模型和小模型不同层之间分别获得多个FSP矩阵,然后使用L2 loss让小模型的对应层FSP矩阵和大模型对应层的FSP矩阵尽量一致,具体如下图所示。这种方法的优势,通俗的解释是,比如将蒸馏类比成teacher(大模型)教student(小模型)解决一个问题,传统的蒸馏是直接告诉小模型问题的答案,让小模型学习,而学习FSP矩阵是让小模型学习解决问题的中间过程和方法,因此其学到的信息更多。
-
-
-
-图9
-
-
- 由于小模型和大模型之间通过L2 loss进行监督,必须保证两个FSP矩阵的维度必须相同,而FSP矩阵的维度为M*N,其中M、N分别为输入和输出特征的channel数,因此大模型和小模型的FSP矩阵需要一一对应。
-
-## 4. 轻量级模型结构搜索
-
-深度学习模型在很多任务上都取得了不错的效果,网络结构的好坏对最终模型的效果有非常重要的影响。手工设计网络需要非常丰富的经验和众多尝试,并且众多的超参数和网络结构参数会产生爆炸性的组合,常规的random search几乎不可行,因此最近几年自动模型搜索技术(Neural Architecture Search)成为研究热点。区别于传统NAS,我们专注在搜索精度高并且速度快的模型结构,我们将该功能统称为Light-NAS.
-
-### 4.1 搜索策略
-
-搜索策略定义了使用怎样的算法可以快速、准确找到最优的网络结构参数配置。常见的搜索方法包括:强化学习、贝叶斯优化、进化算法、基于梯度的算法。我们当前的实现以模拟退火算法为主。
-
-#### 4.1.1 模拟退火
-
-模拟退火算法来源于固体退火原理,将固体加温至充分高,再让其徐徐冷却,加温时,固体内部粒子随温升变为无序状,内能增大,而徐徐冷却时粒子渐趋有序,在每个温度都达到平衡态,最后在常温时达到基态,内能减为最小。
-
-鉴于物理中固体物质的退火过程与一般组合优化问题之间的相似性,我们将其用于网络结构的搜索。
-
-使用模拟退火算法搜索模型的过程如下:
-
-$$
-T_k = T_0*\theta^k
-$$
-
-\begin{equation}
-P(r_k) =
-\begin{cases}
-e^{\frac{(r_k-r)}{T_k}} & r_k < r\\
-1 & r_k>=r
-\end{cases}
-\end{equation}
-
-在第k次迭代,搜到的网络为$N_k$, 对$N_k$训练若干epoch后,在测试集上得到reward为$r_k$, 以概率$P(r_k)$接受$r_k$,即执行$r=r_k$。$r$在搜索过程起始时被初始化为0. $T_0$为初始化温度,$\theta$为温度衰减系数,$T_k$为第k次迭代的温度。
-
-
-在我们的NAS任务中,区别于RL每次重新生成一个完整的网络,我们将网络结构映射成一段编码,第一次随机初始化,然后每次随机修改编码中的一部分(对应于网络结构的一部分)生成一个新的编码,然后将这个编码再映射回网络结构,通过在训练集上训练一定的epochs后的精度以及网络延时融合获得reward,来指导退火算法的收敛。
-
-
-### 4.2 搜索空间
-
-搜索空间定义了优化问题的变量,变量规模决定了搜索算法的难度和搜索时间。因此为了加快搜索速度,定义一个合理的搜索空间至关重要。在Light-NAS中,为了加速搜索速度,我们将一个网络划分为多个block,先手动按链状层级结构堆叠c,再 使用搜索算法自动搜索每个block内部的结构。
-
-因为要搜索出在移动端运行速度快的模型,我们参考了MobileNetV2中的Linear Bottlenecks和Inverted residuals结构,搜索每一个Inverted residuals中的具体参数,包括kernelsize、channel扩张倍数、重复次数、channels number。如图10所示:
-
-
-
-图10
-
-
-
-### 4.3 模型延时评估
-
-搜索过程支持 FLOPS 约束和模型延时约束。而基于 Android/iOS 移动端、开发板等硬件平台,迭代搜索过程中不断测试模型的延时不仅消耗时间而且非常不方便,因此我们开发了模型延时评估器来评估搜索得到模型的延时。通过延时评估器评估得到的延时与模型实际测试的延时波动偏差小于 10%。
-
-延时评估器分为配置硬件延时评估器和评估模型延时两个阶段,配置硬件延时评估器只需要执行一次,而评估模型延时则在搜索过程中不断评估搜索得到的模型延时。
-
-- 配置硬件延时评估器
-
- 1. 获取搜索空间中所有不重复的 op 及其参数
- 2. 获取每组 op 及其参数的延时
-
-- 评估模型延时
-
- 1. 获取给定模型的所有 op 及其参数
- 2. 根据给定模型的所有 op 及参数,利用延时评估器去估计模型的延时
-
-
-## 5. 参考文献
-
-1. [High-Performance Hardware for Machine Learning](https://media.nips.cc/Conferences/2015/tutorialslides/Dally-NIPS-Tutorial-2015.pdf)
-
-2. [Quantizing deep convolutional networks for efficient inference: A whitepaper](https://arxiv.org/pdf/1806.08342.pdf)
-
-3. [Pruning Filters for Efficient ConvNets](https://arxiv.org/pdf/1608.08710.pdf)
-
-4. [Distilling the Knowledge in a Neural Network](https://arxiv.org/pdf/1503.02531.pdf)
-
-5. [A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning](http://openaccess.thecvf.com/content_cvpr_2017/papers/Yim_A_Gift_From_CVPR_2017_paper.pdf)
diff --git a/PaddleSlim/docs/usage.md b/PaddleSlim/docs/usage.md
deleted file mode 100644
index e76a01ed8084347ff68df73b96a1008e205aed20..0000000000000000000000000000000000000000
--- a/PaddleSlim/docs/usage.md
+++ /dev/null
@@ -1,723 +0,0 @@
-
-
-
----
-# Paddle模型压缩工具库使用说明
-
-本文第一章介绍PaddleSlim模块通用功能的使用,不涉及具体压缩策略的细节。第二章分别用4小节介绍量化训练、剪裁、蒸馏和轻量级模型结构搜索四种压缩策略的使用方式。
-建议在看具体策略使用方式之前,先浏览下对应的原理介绍:算法原理介绍
-
->在本文中不区分operator和layer的概念。不区分loss和cost的概念。
-
-## 目录
-
-- [通用功能使用说明](#1-paddleslim通用功能使用介绍)
-- [量化使用说明](#21-量化训练)
-- [剪裁使用说明](#22-模型通道剪裁)
-- [蒸馏使用说明](#23-蒸馏)
-- [轻量级模型结构搜索使用说明](#24-基于硬件的轻量级模型结构搜索)
-
-
-## 1. PaddleSlim通用功能使用介绍
-
-## 1.1 使用压缩工具库的前提
-
-### 1.1.1 安装paddle
-
-**版本:** PaddlePaddle >= 1.4
-**安装教程:** [安装说明](http://paddlepaddle.org/documentation/docs/zh/1.3/beginners_guide/install/index_cn.html)
-
-
-### 1.1.2 搭建好网络结构
-
-用户需要搭建好前向网络,并可以正常执行。
-一个正常可执行的网络一般需要以下内容或操作:
-
-- 网络结构的定义
-- data_reader
-- optimizer
-- 初始化,load pretrain model
-- feed list与fetch list
-
-#### 1.1.2.1 网络结构的定义
-首先参考以下文档,配置网络:
-[《Paddle使用指南:配置简单的网络》](http://paddlepaddle.org/documentation/docs/zh/1.3/user_guides/howto/configure_simple_model/index.html)
-
-这一步的产出应该是两个[Program](http://paddlepaddle.org/documentation/docs/zh/1.3/api_cn/fluid_cn.html#program)实例:
-
-- **train_program:** 用于在压缩过程中迭代训练模型,该program必须包含loss。一般该program不要有backward op和weights update op,否则不能使用蒸馏策略。
-
-- **eval_program:** 用于在压缩过程中评估模型的精度,一般会包含accuracy、IoU等评估指标的计算layer。
-
->在量化训练策略中,会根据eval_program进行网络结构剪枝并保存一个用于inference的量化模型。这时候,就要求inference网络是eval_program的一个子网络。
-
-#### 1.1.2.2. data_reader
-
-按照以下文档准备数据:
-[《Paddle使用指南:准备数据》](http://paddlepaddle.org/documentation/docs/zh/1.3/user_guides/howto/prepare_data/index.html)
-
-这一步需要产出两个DataReader:
-
-**train_reader:** 用于给train_program的执行提供数据
-**eval_reader:** 用于给eval_program的执行提供数据
-
-#### 1.1.2.3. optimizer
-[fluid.optimizer API](http://www.paddlepaddle.org/documentation/docs/zh/1.3/api_cn/optimizer_cn.html)
-
-在不同的使用场景下,用户需要提供0个、1个或2个optimizer:
-
-- **0个optimizer:** 在模型搭建阶段的train_program已经是一个包含了反向op和模型weight更新op的网络,则不用再提供optimizer
-- **1个optimizer:** train_program只有前向计算op, 则需要提供一个optimizer,用于优化训练train_program.
-- **2个optimizer:** 在使用蒸馏策略时,且蒸馏训练阶段和单独fine-tune阶段用不同的优化策略。一个optimizer用于优化训练teacher网络和student网络组成的蒸馏训练网络,另一个optimizer用于单独优化student网络。更多细节会在蒸馏策略使用文档中介绍。
-
-#### 1.1.2.4. load pretrain model
-
-- 剪裁:需要加载pretrain model
-- 蒸馏:根据需要选择是否加载pretrain model
-- 量化训练:需要加载pretrain model
-
-#### 1.1.2.5. feed list与fetch list
-feed list和fetch list是两个有序的字典, 示例如下:
-```
-feed_list = [('image', image.name), ('label', label.name)]
-fetch_list = [('loss', avg_cost.name)]
-```
-其中,feed_list中的key为自定义的有一定含义的字符串,value是[Variable](http://paddlepaddle.org/documentation/docs/zh/1.3/api_guides/low_level/program.html#variable)的名称, feed_list中的顺序需要和DataReader提供的数据的顺序对应。
-
-对于train_program和eval_program都需要有与其对应的feed_list和fetch_list。
-
->注意: 在train_program对应的fetch_list中,loss variable(loss layer的输出)对应的key一定要是‘‘loss’’
-
-
-## 1.2 压缩工具库的使用
-
-经过1.1节的准备,所以压缩工具用到的关于目标模型的信息已经就绪,执行以下步骤配置并启动压缩任务:
-
-- 改写模型训练脚本,加入模型压缩逻辑
-- 编写配置文件
-- 执行训练脚本进行模型压缩
-
-### 1.2.1 如何改写普通训练脚本
-
-在1.1节得到的模型脚本基础上做如下修改:
-
-第一步: 构造`paddle.fluid.contrib.slim.Compressor`对象, Compressor构造方法参数说明如下:
-
-```
-Compressor(place,
- scope,
- train_program,
- train_reader=None,
- train_feed_list=None,
- train_fetch_list=None,
- eval_program=None,
- eval_reader=None,
- eval_feed_list=None,
- eval_fetch_list=None,
- teacher_programs=[],
- checkpoint_path='./checkpoints',
- train_optimizer=None,
- distiller_optimizer=None)
-```
-- **place:** 压缩任务使用的device。GPU请使用[paddle.fluid.CUDAPlace(0)](http://paddlepaddle.org/documentation/docs/zh/1.3/api_cn/fluid_cn.html#paddle.fluid.CUDAPlace)
-- **scope:** 如果在网络配置阶段没有构造scope,则用的是[global scope](http://paddlepaddle.org/documentation/docs/zh/1.3/api_cn/executor_cn.html#paddle.fluid.global_scope),该参数设置为`paddle.fluid.global_scope()`. 如果有自己构造scope,则设置为自己构造的scope.
-- **train_program:** 该program内的网络只有前向operator,而且必须带有loss. 关于program的概念,请参考:[Program API](http://paddlepaddle.org/documentation/docs/zh/1.3/api_cn/fluid_cn.html#program)
-- **train_reader:** 提供训练数据的[data reader](http://paddlepaddle.org/documentation/docs/zh/1.3/user_guides/howto/prepare_data/reader_cn.html)
-- **train_feed_list:** 用于指定train program的输入节点, 详见:1.1.2.5节。
-- **train_fetch_list:** 用于指定train program的输出节点,详见:1.1.2.5节。
-- **eval_program:** 用于评估模型精度的program
-- **eval_reader:** 提供评估数据的[data reader](http://paddlepaddle.org/documentation/docs/zh/1.3/user_guides/howto/prepare_data/reader_cn.html)
-- **eval_feed_list:** 用于指定eval program的输入节点,详见:1.1.2.5节。
-- **eval_fetch_list:** 用于指定eval program的输出节点, 格式同train_fetch_list, 详见:1.1.2.5节。
-- **teacher_programs:** 用于蒸馏的programs, 这些program需要和train program共用同一个scope.
-- **train_optimizer:** 用于训练train program的优化器
-- **distiller_optimizer:** 用于蒸馏训练的优化器
-
-
-第2步:读取配置文件和调用run方法,示例如下
-```python
-compressor.config('./compress.yaml')
-compressor.run()
-```
-其中,compress.yaml文件是压缩策略配置文件,集中了压缩策略的所有可调节参数,在1.2.2节中会详细介绍其格式和内容。
-
-完成该节操作后的完整示例见:[compress.py]()
-
-### 1.2.2 配置文件的使用
-
-模型压缩模块用[yaml](https://zh.wikipedia.org/wiki/YAML)文件集中管理可调节的压缩策略参数。我们以filter pruning为例,说明配置文件的编写方式。
-
-第一步:注册pruners, 如下所示,指定pruner的类别和一些属性,后文**第5节**会详细介绍可选类别和属性的含义。
-```python
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-```
-
-第二步:注册剪裁策略
-如下所示,我们注册两个uniform剪裁策略,分别在第0个epoch和第10个epoch将模型的FLOPS剪掉10%.
-```python
-strategies:
- pruning_strategy_0:
- class: 'UniformPruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- target_ratio: 0.10
- pruned_params: '.*_sep_weights'
- metric_name: 'acc_top1'
- pruning_strategy_1:
- class: 'UniformPruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 10
- target_ratio: 0.10
- pruned_params: '.*_sep_weights'
- metric_name: 'acc_top1'
-```
-
-第三步:配置通用参数
-
-我们在compress_pass下配置整个压缩任务的参数,如下所示,整个压缩任务会执行120个epoch, 压缩过程中的checkpoint保存在./checkpoints路径下。compress_pass.strategies下为生效的压缩策略,如果生效的多个策略的start_epoch参数一样,则按compress_pass.strategies下列出的先后顺序被调用。
-
-```python
-compress_pass:
- epoch: 120
- checkpoint_path: './checkpoints/'
- strategies:
- - pruning_strategy_0
- - pruning_strategy_1
-```
-
-compress_pass下可配置的参数有:
-
-- **epoch**: 整个压缩任务执行的epoch数量。
-- **init_model**: 初始化模型路径。在裁剪策略中,会根据`init_model`中`parameter`的`shape`对当前网络进行裁剪。
-- **checkpoint_path**: 保存`checkpoint`的路径, checkpoint中包含了模型训练信息和策略执行信息。在重启任务时,会自动从`checkpoint`路径下加载最新的`checkpoint`,所以用户需要根据自己的需求决定是否修改`checkpoint`。
-- **strategies**: 在当前压缩任务中依次生效的策略。
-
-
-## 2. 模型压缩策略使用介绍
-
-本章依次介绍量化训练、模型通道剪裁和蒸馏三种策略的使用方式,在此之前建议先浏览相应策略的原理介绍:
-
-- [量化训练原理](tutorial.md#1-quantization-aware-training量化介绍)
-- [模型通道剪裁原理](tutorial.md#2-模型通道剪裁原理)
-- [蒸馏原理](tutorial.md#3-蒸馏)
-
-### 2.1 量化训练
-
-**用户须知:** 现阶段的量化训练主要针对卷积层(包括二维卷积和Depthwise卷积)以及全连接层进行量化。卷积层和全连接层在PaddlePaddle框架中对应算子包括`conv2d`、`depthwise_conv2d`和`mul`等。量化训练会对所有的`conv2d`、`depthwise_conv2d`和`mul`进行量化操作,且要求它们的输入中必须包括激活和参数两部分。
-
-#### 2.1.1 基于High-Level API的量化训练
-
->注意:多个压缩策略组合使用时,量化训练策略必须放在最后。
-
-```
-class Compressor(object):
- def __init__(self,
- place,
- scope,
- train_program,
- train_reader=None,
- train_feed_list=None,
- train_fetch_list=None,
- eval_program=None,
- eval_reader=None,
- eval_feed_list=None,
- eval_fetch_list=None,
- teacher_programs=[],
- checkpoint_path='./checkpoints',
- train_optimizer=None,
- distiller_optimizer=None):
-```
-在定义Compressor对象时,需要注意以下问题:
-
-- train program如果带反向operators和优化更新相关的operators, train_optimizer需要设置为None.
-- eval_program中parameter的名称需要与train_program中的parameter的名称完全一致。
-- 最终保存的量化后的int8模型,是在eval_program网络基础上进行剪枝保存的,所以,如果用户希望最终保存的模型可以用于inference, 则eval program需要包含infer需要的各种operators.
-- checkpoint保存的是float数据类型的模型
-
-在配置文件中,配置量化训练策略发方法如下:
-```
-strategies:
- quantization_strategy:
- class: 'QuantizationStrategy'
- start_epoch: 0
- end_epoch: 10
- float_model_save_path: './output/float'
- mobile_model_save_path: './output/mobile'
- int8_model_save_path: './output/int8'
- weight_bits: 8
- activation_bits: 8
- weight_quantize_type: 'abs_max'
- activation_quantize_type: 'abs_max'
- save_in_nodes: ['image']
- save_out_nodes: ['quan.tmp_2']
- compressor:
- epoch: 20
- checkpoint_path: './checkpoints_quan/'
- strategies:
- - quantization_strategy
-```
-可配置参数有:
-
-- **class:** 量化策略的类名称,目前仅支持`QuantizationStrategy`
-- **start_epoch:** 在start_epoch开始之前,量化训练策略会往train_program和eval_program插入量化operators和反量化operators. 从start_epoch开始,进入量化训练阶段。
-- **end_epoch:** 在end_epoch结束之后,会保存用户指定格式的模型。注意:end_epoch之后并不会停止量化训练,而是继续训练到compressor.epoch为止。
-- **float_model_save_path:** 保存float数据格式模型的路径。模型weight的实际大小在int8可表示范围内,但是是以float格式存储的。如果设置为None, 则不存储float格式的模型。默认为None.
-- **int8_model_save_path:** 保存int8数据格式模型的路径。如果设置为None, 则不存储int8格式的模型。默认为None.
-- **mobile_model_save_path:** 保存兼容paddle-mobile框架的模型的路径。如果设置为None, 则不存储mobile格式的模型。默认为None.
-- **weight_bits:** 量化weight的bit数,bias不会被量化。
-- **activation_bits:** 量化activation的bit数。
-- **weight_quantize_type:** 对于weight的量化方式,目前支持'abs_max', 'channel_wise_abs_max'.
-- **activation_quantize_type:** 对activation的量化方法,目前可选`abs_max`或`range_abs_max`。`abs_max`意为在训练的每个step和inference阶段动态的计算量化范围。`range_abs_max`意为在训练阶段计算出一个静态的范围,并将其用于inference阶段。
-- **save_in_nodes:** variable名称列表。在保存量化后模型的时候,需要根据save_in_nodes对eval programg 网络进行前向遍历剪枝。默认为eval_feed_list内指定的variable的名称列表。
-- **save_out_nodes:** varibale名称列表。在保存量化后模型的时候,需要根据save_out_nodes对eval programg 网络进行回溯剪枝。默认为eval_fetch_list内指定的variable的名称列表。
-
-
-#### 2.1.2 基于Low-Level API的量化训练
-
-量化训练High-Level API是对Low-Level API的高层次封装,这使得用户仅需编写少量的代码和配置文件即可进行量化训练。然而,封装必然会带来使用灵活性的降低。因此,若用户在进行量化训练时需要更多的灵活性,可参考 [量化训练Low-Level API使用示例](../quant_low_level_api/README.md) 。
-
-### 2.2 模型通道剪裁
-该策略通过减少指定卷积层中卷积核的数量,达到缩减模型大小和计算复杂度的目的。根据选取剪裁比例的策略的不同,又细分为以下两个方式:
-
-- uniform pruning: 每层剪裁掉相同比例的卷积核数量。
-- sensitive pruning: 根据每层敏感度,剪裁掉不同比例的卷积核数量。
-
-两种剪裁方式都需要加载预训练模型。
-通道剪裁是基于结构剪裁,所以在配置文件中需要注册一个`StructurePruner`, 如下所示:
-
-```
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-```
-
-其中,一个配置文件可注册多个pruners, 所有pruner需要放在`pruners`关键字下, `pruner`的可配置参数有:
-
-- **class:** pruner 的类型,目前只支持`StructurePruner`
-- **pruning_axis:** 剪裁的纬度;'`conv*': 0`表示对所有的卷积层filter weight的第0维进行剪裁,即对卷积层filter的数量进行剪裁。
-- **criterions**: 通过通配符指定剪裁不同parameter时用的排序方式。目前仅支持`l1_norm`.
-
-
-#### 2.2.1 uniform pruning
-
-uniform pruning剪裁策略需要在配置文件的`strategies`关键字下注册`UniformPruneStrategy`实例,并将其添加至compressor的strategies列表中。
-如下所示:
-```
-strategies:
- uniform_pruning_strategy:
- class: 'UniformPruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- target_ratio: 0.5
- pruned_params: '.*_sep_weights'
-compressor:
- epoch: 100
- strategies:
- - uniform_pruning_strategy
-```
-UniformPruneStrategy的可配置参数有:
-
-- **class:** 如果使用Uniform剪裁策略,请设置为`UniformPruneStrategy`
-- **pruner:** StructurePruner实例的名称,需要在配置文件中注册。在pruner中指定了对单个parameter的剪裁方式。
-- **start_epoch:** 开始剪裁策略的epoch. 在start_epoch开始之前,该策略会对网络中的filter数量进行剪裁,从start_epoch开始对被剪裁的网络进行fine-tune训练,直到整个压缩任务结束。
-- **target_ratio:** 将目标网络的FLOPS剪掉的比例。
-- **pruned_params:** 被剪裁的parameter的名称,支持通配符。如,‘*’为对所有parameter进行剪裁,‘conv*’意为对所有名义以‘conv’开头的parameter进行剪裁。
-
-
-
-#### 2.2.2 sensitive pruning
-
-sensitive剪裁策略需要在配置文件的`strategies`关键字下注册`SensitivePruneStrategy`实例,并将其添加至compressor的strategies列表中。
-如下所示:
-```
-strategies:
- sensitive_pruning_strategy:
- class: 'SensitivePruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- delta_rate: 0.1
- target_ratio: 0.5
- num_steps: 1
- eval_rate: 0.2
- pruned_params: '.*_sep_weights'
- sensitivities_file: 'mobilenet_acc_top1_sensitive.data'
- metric_name: 'acc_top1'
-compressor:
- epoch: 200
- strategies:
- - sensitive_pruning_strategy
-```
-SensitivePruneStrategy可配置的参数有:
-
-- **class:** 如果使用敏感度剪裁策略,请设置为`SensitivePruneStrategy`
-- **pruner:** StructurePruner实例的名称,需要在配置文件中注册。在pruner中指定了对单个parameter的剪裁方式。
-- **start_epoch:** 开始剪裁策略的epoch。 在start_epoch开始之前,该策略会对网络中的filter数量进行第一次剪裁。
-- **delta_rate:** 统计敏感度信息时,剪裁率从0到1,依次递增delta_rate. 具体细节可参考[原理介绍文档]()
-- **target_ratio:** 将目标网络的FLOPS剪掉的比例。
-- **num_steps:** 整个剪裁过程的步数。每次迭代剪掉的比例为:$step = 1 - (1-target\_ratio)^{\frac{1}{num\_steps}}$
-- **eval_rate:** 计算敏感度时,随机抽取使用的验证数据的比例。在迭代剪裁中,为了快速重新计算每一步的每个parameter的敏感度,建议随机选取部分验证数据进行计算。当`num_steps`等于1时,建议使用全量数据进行计算。
-
-#### 2.2.3 auto filter pruning
-
-该策略使用模拟退火算法搜索得到一组剪裁率,按搜索到的这组剪裁率剪裁网络,并对剪裁后的网络进行训练。
-
-自动通道剪裁策略需要在配置文件的`strategies`关键字下注册`AutoPruneStrategy`实例,并将其添加至compressor的strategies列表中。
-如下所示:
-```
-strategies:
- auto_pruning_strategy:
- class: 'AutoPruneStrategy'
- pruner: 'pruner_1'
- controller: 'sa_controller'
- start_epoch: 0
- end_epoch: 500
- retrain_epoch: 0
- max_ratio: 0.50
- min_ratio: 0.48
- uniform_range: 0.4
- pruned_params: '.*_sep_weights'
- metric_name: 'acc_top1'
-compressor:
- epoch: 500
- checkpoint_path: './checkpoints/'
- strategies:
- - auto_pruning_strategy
-```
-AutoPruneStrategy可配置的参数有:
-
-- **class:** 如果使用自动通道剪裁策略,请设置为`AutoPruneStrategy`。
-- **pruner:** StructurePruner实例的名称,需要在配置文件中注册。在pruner中指定了对单个parameter的剪裁方式。
-- **controller:** 用于搜索的controller, 需要在当前配置文件提前注册,下文会详细介绍其注册方法。
-
-- **start_epoch:** 开始搜索剪裁率组合的的epoch。
-- **end_epoch:** 结束搜索剪裁率组合的epoch。 在end_epoch,该策略会根据当前搜索到的最好的剪裁率组合对网络进行剪裁。
-
-- **retrain_epoch:** 评估一个模型性能之前,需要训练的epoch的数量。默认为0。
-- **max_ratio:** 剪掉FLOPS的最高比例。
-- **target_ratio:** 剪掉FLOPS的最低比例。
-- **uniform_range:** 每个Parameter最多允许被剪掉的比例。
-- **pruned_params:** 被剪裁的parameter的名称,支持通配符。如,‘*’为对所有parameter进行剪裁,‘conv*’意为对所有名义以‘conv’开头的parameter进行剪裁。
-- **metric_name:** 评估模型性能的指标。
-
-controller的配置方式如下:
-
-```
-controllers:
- sa_controller:
- class: 'SAController'
- reduce_rate: 0.85
- init_temperature: 10.24
- max_iter_number: 300
-```
-- **class:** distiller类名称,当前可选:`SAController`。
-- **reduce_rate:** float类型;温度的衰减率。
-- **init_temperature:** float类型;初始化温度。
-- **max_iter_number:** int类型;在得到一个满足FLOPS限制的tokens之前,最多尝试的次数。
-
-### 2.3 蒸馏
-
-PaddleSlim支持`FSP_loss`, `L2_loss`和`softmax_with_cross_entropy_loss`, 用户可以在配置文件中,用这三种loss组合teacher net和student net的任意一层。
-
-与其它策略不同,如果要使用蒸馏策略,用户在脚本中构造Compressor对象时,需要指定teacher program 和distiller optimizer.
-其中,teacher program有以下要求:
-
-- teacher program需要加载预训练好的模型。
-- teacher program中的变量不能与student program中的变量有命名冲突。
-- teacher program中只有前向计算operators, 不能有backward operators。
-- 用户不必手动设置teacher program的stop_gradient属性(不计算gradient和不更新weight),PaddleSlim会自动将其设置为True.
-
-distiller optimizer用来为student net和teacher net组合而成的网络添加反向operators和优化相关的operators, 仅用于蒸馏训练阶段。
-
-在配置文件中,配置蒸馏策略方式如下:
-```
-strategies:
- distillation_strategy:
- class: 'DistillationStrategy'
- distillers: ['fsp_distiller', 'l2_distiller']
- start_epoch: 0
- end_epoch: 130
-```
-其中, 需要在关键字`strategies`下注册策略实例,可配置参数有:
-
-- **class:** 策略类的名称,蒸馏策略请设置为DistillationStrategy。
-- **distillers:** 一个distiller列表,列表中每一个distiller代表了student net和teacher net之间的一个组合loss。该策略会将这个列表中定义的loss加在一起作为蒸馏训练阶段优化的目标。 distiller需要提前在当前配置文件中进行注册,下文会详细介绍其注册方法。
-- **start_epoch:** 在start_epoch开始之前,该策略会根据用户定义的losses将teacher net合并到student net中,并根据合并后的loss添加反向计算操作和优化更新操作。
-- **end_epoch:** 在 end_epoch结束之后,该策略去将teacher net从student net中删除,并回复student net的loss. 在次之后,进入单独fine-tune student net的阶段。
-
-distiller的配置方式如下:
-
-**FSPDistiller**
-```
-distillers:
- fsp_distiller:
- class: 'FSPDistiller'
- teacher_pairs: [['res2a_branch2a.conv2d.output.1.tmp_0', 'res3a_branch2a.conv2d.output.1.tmp_0']]
- student_pairs: [['depthwise_conv2d_1.tmp_0', 'conv2d_3.tmp_0']]
- distillation_loss_weight: 1
-```
-- **class:** distiller类名称,可选:`FSPDistiller`,`L2Distiller`,`SoftLabelDistiller`
-- **teacher_pairs:** teacher网络对应的sections. 列表中每一个section由两个variable name表示,这两个variable代表网络中的两个feature map. 这两个feature map可以有不同的channel数量,但是必须有相同的长和宽。
-- **student_pairs:** student网络对应的sections. student_pairs[i]与teacher_pairs[i]计算出一个fsp loss.
-- **distillation_loss_weight:** 当前定义的fsp loss对应的权重。默认为1.0
-
-**L2-loss**
-
-```
-distillers:
- l2_distiller:
- class: 'L2Distiller'
- teacher_feature_map: 'fc_1.tmp_0'
- student_feature_map: 'fc_0.tmp_0'
- distillation_loss_weight: 1
-```
-
-- **teacher_feature_map:** teacher网络中用于计算l2 loss的feature map
-- **student_feature_map:** student网络中用于计算l2 loss的feature map, shape必须与`teacher_feature_map`完全一致。
-
-**SoftLabelDistiller**
-
-```
-distillers:
- soft_label_distiller:
- class: 'SoftLabelDistiller'
- student_temperature: 1.0
- teacher_temperature: 1.0
- teacher_feature_map: 'teacher.tmp_1'
- student_feature_map: 'student.tmp_1'
- distillation_loss_weight: 0.001
-```
-
-- **teacher_feature_map:** teacher网络中用于计算softmax_with_cross_entropy的feature map。
-- **student_feature_map:** student网络中用于计算softmax_with_cross_entropy的feature map。shape必须与`teacher_feature_map`完全一致。
-- **student_temperature:** 在计算softmax_with_cross_entropy之前,用该系数除student_feature_map。
-- **teacher_temperature:** 在计算softmax_with_cross_entropy之前,用该系数除teacher_feature_map。
-- **distillation_loss_weight:** 当前定义的loss对应的权重。默认为1.0
-
-
-### 2.4 基于硬件的轻量级模型结构搜索
-
-该功能基于模拟退火算法,实现了基于不同硬件的轻量级模型结构的快速搜索,简称为LightNAS (Light Network Architecture Search).
-
-使用该功能,需要用户做三个工作:
-
-- 定义搜索空间
-- (可选)基于不同的硬件,例如Android/iOS移动端、Android开发板等,配置延时评估器
-- 配置LightNASStrategy,并启动搜索任务
-
-#### 2.4.1 定义搜索空间
-
-模型结构搜索是一种自动的模型结构设计方法。它的目的是从众多可能的模型结构中自动搜索出一个性能最优的模型。这些众多可能的模型结构的集合就是搜索空间。在我们的轻量级模型结构搜索中,我们通过改变卷积的输入输出channel数等得到不同的模型结构,因此搜索空间的定义是通过模型结构中的这些可变参数确定的。如果用户自己定义搜索空间,用户需要通过继承[SearchSpace类](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/contrib/slim/nas/search_space.py#L19)并重写其方法来定义搜索空间。用户需要重写实现的方法有:
-
-- `init_tokens`: `tokens` 以数组的形式格式表示网络结构,一个 `tokens` 对应一个网络结构。`init_tokens` 指搜索的初始化 `tokens` 。备注:在 light_nas_space.py 里面 token 中的每个元素定义了 Inverted residuals 结构里面的通道扩增系数、卷积核数量、网络层数、卷积核尺寸、是否用 shortcut 结构、是否利用 SE 结构。用户如果结合自己的任务,可能用到不同于 Inverted residuals 的结构,可以自己确定网络中可变的参数,然后将这些参数映射到自己的 tokens。
-
-- `range_table`: 以数组的形式指定 `tokens` 数组中每个位置的取值范围,其长度与 `tokens` 长度相同。`tokens[i]` 的取值范围为 `[0, range_table[i])`。他们对应上述可变参数,如通道扩增稀疏、卷积核数量等的取值范围。用户同样可以根据自己的任务变化这些取值范围。
-
-- `create_net`: 根据指定的 `tokens` 构造初始化 `Program`、训练 `Program` 和测试 `Program`。在构建不同的 Light-NAS 网络时,将 token 对应到 Inverted residuals 中的每个参数,这里利用了 bottleneck_params_list,然后相应模型结构的位置读取 bottleneck_params_list 里面的值进行组建网络模型。备注:如果用户根据自身任务用到不同于 light_nas_space 的模型结构,用户可以将其模型结构的可变参数作为输入,并共同组成一个类似 bottleneck_params_list 的输入。当然,bottleneck_params_list 本质上是对 tokens 的重组,用户可以将他们理解为本质是一样的。
-
-在[PaddlePaddle/models/light_nas](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/light_nas/light_nas_space.py)下,定义了经过验证的一个搜索空间,建议一般用户直接用该搜索空间。
-
-备注:值得再次重申的是,如果用户根据自身任务需要用到不同于 Light-NAS 的模型结构时,用户首先需要确定其自身使用的模型结构哪些参数可变。然后将这些可变的参数作为组建网络模型 create_net 的输入,即 bottleneck_params_list。然后将 bottleneck_params_list 与 tokens 对应,并确定各个 token 的变化范围,得到 range_table。搜索时,就可以通过变化 tokens, 得到不同的模型结构了。
-
-在构造 `Compressor` 对象时,按以下方式将 `SearchSpace` 实例传入:
-
-```
-...
-space = LightNASSpace()
-...
-com_pass = Compressor(
- place,
- fluid.global_scope(),
- train_prog,
- train_reader=train_reader,
- train_feed_list=None,
- train_fetch_list=train_fetch_list,
- eval_program=test_prog,
- eval_reader=test_reader,
- eval_feed_list=None,
- eval_fetch_list=val_fetch_list,
- train_optimizer=None,
- search_space=space)
-```
-
-#### 2.4.2 (可选) 基于不同硬件,配置延时评估器
-
-用户需要根据自己定义的搜索空间,类似[LightNASSpace类](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/light_nas/light_nas_space.py)中的 `get_all_ops` 函数,重写获取搜索空间所有可能 op 的方法。目前 `get_all_ops` 函数是根据对Light-NAS的理解 brute force 地找出所有的 op, 用户如果有更好的方法,欢迎贡献思想和代码。
-
-用户需要根据其搜索空间所有可能的 op,生成延时评估器表格。延时评估器表格一般存放在类似[LightNASSpace类](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/light_nas/light_nas_space.py)里面的 `LATENCY_LOOKUP_TABLE_PATH=latency_lookup_table.txt` 路径下。后面会详细介绍延时评估器表格的生成方式。
-
-用户需要通过继承[SearchSpace类](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/contrib/slim/nas/search_space.py#L19)并重写下面方法:
-
-- `get_model_latency`: 参数 `program` 对应搜索到的某一个网络结构。使用该功能,用户能够根据不同硬件提前生成延时评估器表格,然后查询获取各个搜索到的网络的延时。
-
-#### 2.4.3 配置 LightNASStrategy
-
-在配置文件中,配置搜索策略方式如下:
-```
-strategies:
- light_nas_strategy:
- class: 'LightNASStrategy'
- controller: 'sa_controller'
- target_flops: 592948064
- target_latency: 0
- end_epoch: 500
- retrain_epoch: 5
- metric_name: 'acc_top1'
- server_ip: ''
- server_port: 8871
- is_server: True
- search_steps: 100
-```
-其中, 需要在关键字 `strategies` 下注册策略实例,可配置参数有:
-
-- **class:** 策略类的名称,轻量级模型结构搜索策略请设置为LightNASStrategy。
-- **controller:** 用于搜索的controller, 需要在当前配置文件提前注册,下文会详细介绍其注册方法。
-- **target_flops:** FLOPS限制,搜索出的网络结构的FLOPS不超过该数值。
-- **target_latency** 评估延时限制,搜索出的网络结构评估的延时不超过该数值。0 表示不限制,不会启动基于硬件的网络搜索。
-- **end_epoch:** 当前client结束搜索策略的epoch。
-- **retrain_epoch:** 在评估模型结构性能之前,需要训练的epoch数量。(end_epoch-0)/retrain_epoch为当前client搜索出的网络结构的数量。
-- **metric_name:** 评估模型性能的指标。
-- **server_ip:** 指定controller server的ip。默认为空,即自动获取当前机器的ip。
-- **server_port:** 指定controller server监听的端口。
-- **is_server:** 以当前配置文件启动的进程是否包含controller server. 整个搜索任务必须有且只有一个controller server。
-- **search_steps:** controller server搜索的步数,也就是server产出的网络结构的数量。
-
-controller的配置方式如下:
-
-```
-controllers:
- sa_controller:
- class: 'SAController'
- reduce_rate: 0.85
- init_temperature: 10.24
- max_iter_number: 300
-```
-- **class:** distiller类名称,当前可选:`SAController`。
-- **reduce_rate:** float类型;温度的衰减率。
-- **init_temperature:** float类型;初始化温度。
-- **max_iter_number:** int类型;在得到一个满足FLOPS限制的tokens之前,最多尝试的次数。
-
-#### 2.4.4 分布式搜索
-
-单机多任务:
-
-单机多任务是指在一个机器上启动一个 controller server 和多个 client,client 从 controller 获取 tokens,根据 tokens 组建网络并训练评估,最后返回 reward 给 controller server。
-
-在Compressor::run()执行时,会首先判断配置文件中的`is_server`是否为`True`, 然后做如下操作:
-
-- True: 判断当前路径下是否存在 `slim_LightNASStrategy_controller_server.socket` 文件,如果存在,则仅启动一个 client,如果不存在,则启动一个 controller server 和一个 client。
-
-- False: 仅启动一个 client。
-
-多机搜索:
-
-多机搜索是指在一个机器上启动一个controller server,在多台机器上启动若干client。在启动controller server的机器上的配置文件里的is_server要设置为True。其它机器上的配置文件中的`is_server`要手动设置为False, 同时`server_ip`和`server_port`要设置为controller server对应的`ip`和`port`.
-
->注意: 在重启controller server时,`slim_LightNASStrategy_controller_server.socket` 文件可能不会被及时清除,所以需要用户手动删除该文件。在后续版本中,会修复完善该问题。
-
-#### 2.4.5 延时评估器生成方式
-
-1. 延时评估器表格的标准形式
-
- 延时评估器表格一般存放在一个 .txt 文件中。对于不同的硬件平台,我们都会根据搜索空间中的所有可能 op 生成延时评估器表格。延时评估器表格中的每一行都对应一个 op,其内容形式如下:
-
- - `conv flag_bias flag_relu n_in c_in h_in w_in c_out groups kernel padding stride dilation latency`
- - `activation active_type n_in c_in h_in w_in latency`
- - `batch_norm active_type n_in c_in h_in w_in latency`
- - `eltwise eltwise_type n_in c_in h_in w_in latency`
- - `pooling flag_global_pooling n_in c_in h_in w_in kernel padding stride ceil_mode pool_type latency`
- - `softmax axis n_in c_in h_in w_in latency`
-
- 其中 `conv`、`activation`、`batch_norm`、`eltwise`、`pooling`、`softmax` 分别代表卷积运算、激活函数、batch normalization、elementwise 运算、池化以及 softmax 运算。目前主要支持了这些 op。参数含义如下:
-
- - active_type (string) - 激活函数类型,包含:relu, prelu, sigmoid, relu6, tanh。
- - eltwise_type (int) - 按元素操作算子类型,其中 1 表示 elementwise_mul,2 表示elementwise_add,3 表示 elementwise_max。
- - pool_type (int) - 池化类型,其中 1 表示 pooling_max,2 表示 pooling_average_include_padding,3 表示 pooling_average_exclude_padding。
- - flag_bias (int) - 是否有 bias(0:无,1:有)。
- - flag_global_pooling (int) - 是否为全局池化(0:不是,1:是)。
- - flag_relu (int) - 是否有 relu(0:无,1:有)。
- - n_in (int) - 输入 Tensor 的批尺寸 (batch size)。
- - c_in (int) - 输入 Tensor 的通道 (channel) 数。
- - h_in (int) - 输入 Tensor 的特征高度。
- - w_in (int) - 输入 Tensor 的特征宽度。
- - c_out (int) - 输出 Tensor 的通道 (channel) 数。
- - groups (int) - 卷积二维层(Conv2D Layer)的组数。
- - kernel (int) - 卷积核大小。
- - padding (int) - 填充 (padding) 大小。
- - stride (int) - 步长 (stride) 大小。
- - dilation (int) - 膨胀 (dilation) 大小。
- - axis (int) - 执行 softmax 计算的维度索引,应该在 [−1,rank − 1] 范围内,其中 rank 是输入变量的秩。
- - ceil_mode (int) - 是否用 ceil 函数计算输出高度和宽度。0 表示使用 floor 函数,1 表示使用 ceil 函数。
- - latency (float) - 当前op的延时时间
-
-2. 不同硬件平台延时评估器的生成方法
-
- Android 系统:
-
- - 用户从[这里](https://paddle-slim-models.bj.bcebos.com/Android_demo.zip)下载 Android 系统的延时评估器生成工具。
- - 安装ADB。比如 macOS 系统, 可以利用brew一键安装,`brew cask install android-platform-tools`。
- - 连接硬件平台。利用 adb devices 查看当前连接的设备,判断是否正确连接。
- - 进入工具目录 Android_demo,命令行输入 `sh push2android.sh`, 把必要的文件放置到硬件平台。
- - 在 `models/PaddleSlim/light_nas/` 目录下运行 `python get_latency_lookup_table.py` 就可以获取当前搜索空间的延时评估器表格 `latency_lookup_table.txt`。
- - 另外一种方式:用户还可以将`models/PaddleSlim/light_nas/light_nas_space.py` 中的 `get_all_ops` 函数获取的所有 op 写入文件中,比如 `lightnas_ops.txt`,然后调用延时评估器生成工具包 `Android_demo` 目录下的 `get_latency_lookup_table.py` 函数产生评估器表格。
-
- 备注1:我们基于[Paddle Mobile](https://github.com/PaddlePaddle/paddle-mobile)预测库编写,编译并获取重要 op 单测延时、网络模型延时的二进制文件。重要 op 单测延时的二进制文件都被命名为 `get_{op}_latency`,其中对于不同 op 的单测程序,替换 `get_{op}_latency` 中的 `{op}` 为该 op 名称。所有单测均输出一个表示平均延时的浮点数。这些单测文件的调用方法如下:
-
- - `./get_activation_latency "threads test_iter active_type n_in c_in h_in w_in"`
- - `./get_batch_norm_latency "threads test_iter active_type n_in c_in h_in w_in"`
- - `./get_conv_latency "threads test_iter flag_bias flag_relu n_in c_in h_in w_in c_out group kernel padding stride dilation"`
- - `./get_eltwise_latency "threads test_iter eltwise_type n_in c_in h_in w_in"`
- - `./get_pooling_latency "threads test_iter flag_global_pooling n_in c_in h_in w_in kernel padding stride ceil_mode pool_type"`
- - `./get_softmax_latency "threads test_iter axis n_in c_in h_in w_in"`
-
- 可以看出,他们传入了一个字符串参数,这些字符串参数除开最初始的 `threads` 和 `test_iter` 以外,都与延时评估器表格中各个 op 的参数一样,其中
-
- - threads (int) - 线程数(最大为手机支持的线程数)。
- - test_iter (int) - 执行单测次数。
-
- 我们同样提供了测试整个模型延时的二进制文件,命名为 `get_net_latency`,它返回的是整个模型的延时。调用方法如下:
-
- - `./get_net_latency model_path threads test_iter`
-
- 其中 `model_path` 是保存 PaddlePaddle 模型的路径,用户需要利用 [paddle.fluid.io.save_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/io_cn.html#save-inference-model)将参数保存为单独的文件。如何单独使用这些二进制文件可以参看[这里](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/mobile/doc/development_android.md)或者`get_latency_lookup_table.py`里面的类似方法。
-
- 备注2:用户如果有其他 op 的开发需求,可以根据 Paddle Mobile 的[op单测](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/mobile/test/operators)进行开发,基于android端的编译方法可以参见[这里](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/mobile/doc/development_android.md),欢迎大家贡献代码。
-
- 我们提供的示例 op 的单测代码可以在[这里](https://paddle-slim-models.bj.bcebos.com/android_op_test.zip)下载。用户通过 `git clone https://github.com/PaddlePaddle/Paddle-Lite.git` 命令, 将 android_op_test 里面的单测代码放置到 `Paddle-Lite/mobile/test/operators` 目录,然后修改 `Paddle-Lite/mobile/test/CMakeList.txt` 进行编译即可生成所需的二进制文件。
-
- iOS系统:
-
- - 用户从[这里](https://paddle-slim-models.bj.bcebos.com/OpLatency.zip)下载iOS系统的延时评估器生成工具 OpLatency。
- - 与Android系统不同的是,在使用延时评估器生成工具之前,用户需要把从 `models/PaddleSlim/light_nas/light_nas_space.py` 中的 `get_all_ops` 函数里面得到的搜索空间所有 op 参数写入到一个 .txt 文件中。该文件与延时评估器表格类似,每行内容对应一个 op,仅仅缺少该 op 的延时数据。在 LightNAS 中,我们将它命名为`lightnas_ops.txt`。
- - 用户需要安装 Xcode,连接 iOS 硬件平台,目前不支持虚拟设备。注意选中项目名称 OpLatency,在 General-->Signing 中修改 developer 信息。
- - 将上述准备好的 `lightnas_ops.txt` 文件拖入工程。注意根据提示勾选 `Add to targets`。
- - 在 ViewController 我们调用了 OCWrapper 类里面的 `get_latency_lookup_table` 方法,修改其输入输出参数为 `lightnas_ops.txt` 与 `latency_lookup_table.txt`。
- - 运行 OpLatency,生成手机 APP 的同时,程序会在 APP 沙盒中生成一个当前搜索空间的延时评估器表格 `latency_lookup_table.txt`。
- - 点击 Windows-->Devices and Simulators-->OpLatency->Download Container 将沙盒下载到 PC,右键点击显示包内容,在 AppData-->Documents 即能找到延时评估器表格。
-
- 备注1:我们同样提供了测试整个模型延时的方法。可以在 ViewController 调用 OCWrapper 类里面的 `get_net_latency` 方法。其中 `get_net_latency` 的参数为 model 和 params 路径,用户需要利用 [paddle.fluid.io.save_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/io_cn.html#save-inference-model)将所有参数打包存储。
-
- 备注2:用户如果有其他 op 的开发需求,可以根据我们[这里](https://paddle-slim-models.bj.bcebos.com/ios_op_test.zip)提供的代码示例进行开发。使用方法:解压并在命令行运行 `sh run.sh` 即可生成 OpLatency 里面所需的打包文件 `libpaddle-mobile.a` 和头文件 `ios_op_test.h`。
diff --git a/PaddleSlim/light_nas/compress.yaml b/PaddleSlim/light_nas/compress.yaml
deleted file mode 100644
index bdabdf5e6fa03ea07dccbd1d1b4821d2daf85b3a..0000000000000000000000000000000000000000
--- a/PaddleSlim/light_nas/compress.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: 1.0
-controllers:
- sa_controller:
- class: 'SAController'
- reduce_rate: 0.85
- init_temperature: 10.24
- max_iter_number: 300
-strategies:
- light_nas_strategy:
- class: 'LightNASStrategy'
- controller: 'sa_controller'
- target_flops: 592948064
- target_latency: 0
- end_epoch: 500
- retrain_epoch: 5
- metric_name: 'acc_top1'
- server_ip: ''
- server_port: 8871
- is_server: True
- search_steps: 100
-compressor:
- epoch: 500
- strategies:
- - light_nas_strategy
diff --git a/PaddleSlim/light_nas/data b/PaddleSlim/light_nas/data
deleted file mode 120000
index 4909e06efb479a01e44e67265074c726796f4959..0000000000000000000000000000000000000000
--- a/PaddleSlim/light_nas/data
+++ /dev/null
@@ -1 +0,0 @@
-../data
\ No newline at end of file
diff --git a/PaddleSlim/light_nas/get_latency_lookup_table.py b/PaddleSlim/light_nas/get_latency_lookup_table.py
deleted file mode 100644
index e0df4b94a9867e444988e0e84866b650caf20db6..0000000000000000000000000000000000000000
--- a/PaddleSlim/light_nas/get_latency_lookup_table.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Get latency lookup table."""
-from __future__ import print_function
-
-import re
-import argparse
-import subprocess
-
-from light_nas_space import get_all_ops
-
-
-def get_args():
- """Get arguments.
-
- Returns:
- Namespace, arguments.
- """
- parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument(
- '--latency_lookup_table_path',
- default='latency_lookup_table.txt',
- help='Output latency lookup table path.')
- parser.add_argument(
- '--platform', default='android', help='Platform: android/ios/custom.')
- parser.add_argument('--threads', type=int, default=1, help='Threads.')
- parser.add_argument(
- '--test_iter',
- type=int,
- default=100,
- help='Running times of op when estimating latency.')
- args = parser.parse_args()
- return args
-
-
-def get_op_latency(op, platform):
- """Get op latency.
-
- Args:
- op: list, a list of str represents the op and its parameters.
- platform: str, platform name.
-
- Returns:
- float, op latency.
- """
- if platform == 'android':
- commands = 'adb shell "cd /data/local/tmp/bin && LD_LIBRARY_PATH=. ./get_{}_latency \'{}\'"'.format(
- op[0], ' '.join(op[1:]))
- proc = subprocess.Popen(
- commands,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True)
- out = proc.communicate()[0]
- out = [_ for _ in out.decode().split('\n') if 'Latency' in _][-1]
- out = re.findall(r'\d+\.?\d*', out)[0]
- out = float(out)
- elif platform == 'ios':
- print('Please refer the usage doc to get iOS latency lookup table')
- out = 0
- else:
- print('Please define `get_op_latency` for {} platform'.format(platform))
- out = 0
- return out
-
-
-def main():
- """main."""
- args = get_args()
- ops = get_all_ops()
- fid = open(args.latency_lookup_table_path, 'w')
- for op in ops:
- op = [str(item) for item in op]
- latency = get_op_latency(
- op[:1] + [str(args.threads), str(args.test_iter)] + op[1:],
- args.platform)
- fid.write('{} {}\n'.format(' '.join(op), latency))
- fid.close()
-
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/light_nas/get_ops_from_program.py b/PaddleSlim/light_nas/get_ops_from_program.py
deleted file mode 100644
index 78ed335e68bd96bc4ff4e92c18ed0559a2f94f4b..0000000000000000000000000000000000000000
--- a/PaddleSlim/light_nas/get_ops_from_program.py
+++ /dev/null
@@ -1,300 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Get ops from program."""
-
-
-def conv_op_params(blocks, current_op):
- """Getting params of conv op
- Args:
- blocks: BlockDesc, current block
- current_op: OpDesc, current op
- Returns:
- (list): op name and hyperparamters
- """
- tmp, res = [], []
- # op_name
- tmp.append('conv')
- # flag_bias
- if not current_op.input('Bias'):
- tmp.append(0)
- else:
- tmp.append(1)
- # flag_relu
- tmp.append(int(current_op.attr('fuse_relu')))
- # batch size
- tmp.append(1)
- # channels, height, width
- in_shapes = blocks.vars[current_op.input('Input')[0]].shape
- tmp = tmp + [int(in_shapes[1]), int(in_shapes[2]), int(in_shapes[3])]
-
- # output channels
- w_shapes = blocks.vars[current_op.input('Filter')[0]].shape
- tmp.append(int(w_shapes[0]))
-
- # group
- tmp.append(int(current_op.attr('groups')))
-
- # kernel size
- tmp.append(int(w_shapes[2]))
- if w_shapes[2] != w_shapes[3]:
- res.append(int(w_shapes[3]))
-
- # padding
- paddings = current_op.attr('paddings')
- tmp.append(int(paddings[0]))
- if paddings[0] != paddings[1]:
- res.append(int(paddings[0]))
-
- # strides
- strides = current_op.attr('strides')
- tmp.append(int(strides[0]))
- if strides[0] != strides[1]:
- res.append(int(strides[1]))
-
- # dilations
- dilations = current_op.attr('dilations')
- tmp.append(int(dilations[0]))
- if dilations[0] != dilations[1]:
- res.append(int(dilations[1]))
-
- tmp = tmp + res
- return tmp
-
-
-def batch_norm_op_params(blocks, current_op):
- """Getting params of batch_norm op
- Args:
- blocks: BlockDesc, current block
- current_op: OpDesc, current op
- Returns:
- (list): op name and hyperparamters
- """
- tmp = []
- # op name
- tmp.append('batch_norm')
- # activation type
- if not current_op.attr('fuse_with_relu'):
- tmp.append('None')
- else:
- tmp.append('relu')
- # batch size
- tmp.append(1)
- # input channels, height, width
- in_shapes = blocks.vars[current_op.input("X")[0]].shape
- tmp = tmp + [int(in_shapes[1]), int(in_shapes[2]), int(in_shapes[3])]
- return tmp
-
-
-def eltwise_op_params(blocks, current_op):
- """Getting params of eltwise op
- Args:
- blocks: BlockDesc, current block
- current_op: OpDesc, current op
- Returns:
- (list): op name and hyperparamters
- """
- # op name
- tmp = ['eltwise']
- # elementwise type, TODO: add more ops
- if current_op.type == 'elementwise_mul':
- tmp.append(1)
- elif current_op.type == 'elementwise_add':
- tmp.append(2)
- else:
- tmp.append(3)
- # batch size
- tmp.append(1)
- # input channels, height, width
- in_shapes = blocks.vars[current_op.input('X')[0]].shape
- while len(in_shapes) < 4:
- in_shapes = in_shapes + (1, )
-
- for i in range(1, len(in_shapes)):
- tmp.append(int(in_shapes[i]))
- return tmp
-
-
-def activation_op_params(blocks, current_op):
- """Getting params of activation op
- Args:
- blocks: BlockDesc, current block
- current_op: OpDesc, current op
- Returns:
- (list): op name and hyperparamters
- """
- tmp = []
- # op name
- tmp.append('activation')
- # activation type
- tmp.append(current_op.type)
- # batch size
- tmp.append(1)
- # input channels, height, width
- in_shapes = blocks.vars[current_op.input('X')[0]].shape
- while len(in_shapes) < 4:
- in_shapes = in_shapes + (1, )
-
- for i in range(1, len(in_shapes)):
- tmp.append(int(in_shapes[i]))
- return tmp
-
-
-def pooling_op_params(blocks, current_op):
- """Getting params of pooling op
- Args:
- blocks: BlockDesc, current block
- current_op: OpDesc, current op
- Returns:
- (list): op name and hyperparamters
- """
- tmp, res = [], []
- # op name
- tmp.append('pooling')
- # global pooling
- tmp.append(int(current_op.attr('global_pooling')))
- # batch size
- tmp.append(1)
- # channels, height, width
- in_shapes = blocks.vars[current_op.input('X')[0]].shape
- tmp = tmp + [int(in_shapes[1]), int(in_shapes[2]), int(in_shapes[3])]
- # kernel size
- ksize = current_op.attr('ksize')
- tmp.append(int(ksize[0]))
- if ksize[0] != ksize[1]:
- res.append(int(ksize[1]))
-
- # padding
- paddings = current_op.attr('paddings')
- tmp.append(int(paddings[0]))
- if paddings[0] != paddings[1]:
- res.append(int(paddings[1]))
-
- # stride
- strides = current_op.attr('strides')
- tmp.append(int(strides[0]))
- if strides[0] != strides[1]:
- res.append(int(strides[1]))
-
- # ceil mode
- tmp.append(int(current_op.attr('ceil_mode')))
-
- # pool type
- pool_type = current_op.attr('pooling_type')
- exclusive = current_op.attr('exclusive')
- if pool_type == 'max' and (not exclusive):
- tmp.append(1)
- elif pool_type == 'avg' and (not exclusive):
- tmp.append(2)
- else:
- tmp.append(3)
-
- tmp = tmp + res
- return tmp
-
-
-def softmax_op_params(blocks, current_op):
- """Getting params of softmax op
- Args:
- blocks: BlockDesc, current block
- current_op: OpDesc, current op
- Returns:
- (list): op name and hyperparamters
- """
- # op name
- tmp = ['softmax']
- # axis
- tmp.append(current_op.attr('axis'))
- # batch size
- tmp.append(1)
- # input channels, height, width
- in_shapes = blocks.vars[current_op.input('X')[0]].shape
- while len(in_shapes) < 4:
- in_shapes = in_shapes + (1, )
-
- for i in range(1, len(in_shapes)):
- tmp.append(int(in_shapes[i]))
-
- return tmp
-
-
-def fc_op_params(blocks, current_op):
- """Getting params of fc op
- Note:
- fc op is converted to conv op with 1x1 kernels
- Args:
- blocks: BlockDesc, current block
- current_op: OpDesc, current op
- Returns:
- (list): op name and hyperparamters
- """
- # op name
- tmp = ['conv']
- # flag bias
- tmp.append(0)
- # flag relu
- tmp.append(0)
- # batch size
- tmp.append(1)
- # input channels, height, width
- channels = 1
- in_shape = blocks.vars[current_op.input('X')[0]].shape
- for i in range(1, len(in_shape)):
- channels *= in_shape[i]
- tmp = tmp + [int(channels), 1, 1]
- # output channels
- tmp.append(int(blocks.vars[current_op.output('Out')[0]].shape[1]))
- # groups, kernel size, padding, stride, dilation
- tmp = tmp + [1, 1, 0, 1, 1]
- return tmp
-
-
-def get_ops_from_program(program):
- """Getting ops params from a paddle program
- Args:
- program(Program): The program to get ops.
- Returns:
- (list): ops.
- """
- blocks = program.global_block()
- ops = []
- i = 0
- while i < len(blocks.ops):
- current_op = blocks.ops[i]
- if current_op.type in ['conv2d', 'depthwise_conv2d']:
- tmp = conv_op_params(blocks, current_op)
- elif current_op.type in [
- 'elementwise_add', 'elementwise_mul', 'elementwise_max'
- ]:
- tmp = eltwise_op_params(blocks, current_op)
- elif current_op.type in [
- 'relu', 'prelu', 'sigmoid', 'relu6', 'elu', 'brelu',
- 'leaky_relu'
- ]:
- tmp = activation_op_params(blocks, current_op)
- elif current_op.type == 'batch_norm':
- tmp = batch_norm_op_params(blocks, current_op)
- elif current_op.type == 'pool2d':
- tmp = pooling_op_params(blocks, current_op)
- elif current_op.type == 'batch_norm':
- tmp = batch_norm_op_params(blocks, current_op)
- elif current_op.type == 'softmax':
- tmp = softmax_op_params(blocks, current_op)
- elif current_op.type == 'mul':
- tmp = fc_op_params(blocks, current_op)
- else:
- tmp = None
- if tmp:
- ops.append(tmp)
- i += 1
- return ops
diff --git a/PaddleSlim/light_nas/light_nas_space.py b/PaddleSlim/light_nas/light_nas_space.py
deleted file mode 100644
index b99bb2ac5dd37a4c8f9a11db42d535d30deca58a..0000000000000000000000000000000000000000
--- a/PaddleSlim/light_nas/light_nas_space.py
+++ /dev/null
@@ -1,473 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Light-NAS space."""
-import sys
-import math
-from paddle.fluid.contrib.slim.nas import SearchSpace
-import paddle.fluid as fluid
-import paddle
-sys.path.append('..')
-from models import LightNASNet
-import reader
-from get_ops_from_program import get_ops_from_program
-
-total_images = 1281167
-lr = 0.1
-num_epochs = 240
-batch_size = 512
-lr_strategy = "cosine_decay"
-l2_decay = 4e-5
-momentum_rate = 0.9
-image_shape = [3, 224, 224]
-class_dim = 1000
-
-__all__ = ['LightNASSpace']
-
-NAS_FILTER_SIZE = [[18, 24, 30], [24, 32, 40], [48, 64, 80], [72, 96, 120],
- [120, 160, 192]]
-NAS_LAYERS_NUMBER = [[1, 2, 3], [2, 3, 4], [3, 4, 5], [2, 3, 4], [2, 3, 4]]
-NAS_KERNEL_SIZE = [3, 5]
-NAS_FILTERS_MULTIPLIER = [3, 4, 5, 6]
-NAS_SHORTCUT = [0, 1]
-NAS_SE = [0, 1]
-LATENCY_LOOKUP_TABLE_PATH = None
-
-
-def get_bottleneck_params_list(var):
- """Get bottleneck_params_list from var.
- Args:
- var: list, variable list.
- Returns:
- list, bottleneck_params_list.
- """
- params_list = [
- 1, 16, 1, 1, 3, 1, 0, \
- 6, 24, 2, 2, 3, 1, 0, \
- 6, 32, 3, 2, 3, 1, 0, \
- 6, 64, 4, 2, 3, 1, 0, \
- 6, 96, 3, 1, 3, 1, 0, \
- 6, 160, 3, 2, 3, 1, 0, \
- 6, 320, 1, 1, 3, 1, 0, \
- ]
- for i in range(5):
- params_list[i * 7 + 7] = NAS_FILTERS_MULTIPLIER[var[i * 6]]
- params_list[i * 7 + 8] = NAS_FILTER_SIZE[i][var[i * 6 + 1]]
- params_list[i * 7 + 9] = NAS_LAYERS_NUMBER[i][var[i * 6 + 2]]
- params_list[i * 7 + 11] = NAS_KERNEL_SIZE[var[i * 6 + 3]]
- params_list[i * 7 + 12] = NAS_SHORTCUT[var[i * 6 + 4]]
- params_list[i * 7 + 13] = NAS_SE[var[i * 6 + 5]]
- return params_list
-
-
-def ops_of_inverted_residual_unit(in_c,
- in_shape,
- expansion,
- kernels,
- num_filters,
- s,
- ifshortcut=True,
- ifse=True):
- """Get ops of possible repeated inverted residual unit
- Args:
- in_c: list, a list of numbers of input channels
- in_shape: int, size of input feature map
- expansion: int, expansion factor
- kernels: list, a list of possible kernel size
- s: int, stride of depthwise conv
- ifshortcut: bool
- ifse: bool
- Returns:
- op_params: list, a list of op params
- """
- op_params = []
- for c in in_c:
- for t in expansion:
- # expansion
- op_params.append(('conv', 0, 0, 1, c, in_shape, in_shape, c * t, 1,
- 1, 0, 1, 1))
- op_params.append(('batch_norm', 'None', 1, c * t, in_shape,
- in_shape))
- op_params.append(('activation', 'relu6', 1, c * t, in_shape,
- in_shape))
-
- # depthwise
- for k in kernels:
- op_params.append(('conv', 0, 0, 1, c * t, in_shape, in_shape,
- c * t, c * t, k, int(int(k - 1) / 2), s, 1))
- op_params.append(('batch_norm', 'None', 1, c * t, int(in_shape / s),
- int(in_shape / s)))
- op_params.append(('activation', 'relu6', 1, c * t,
- int(in_shape / s), int(in_shape / s)))
-
- # shrink
- for out_c in num_filters:
- op_params.append(('conv', 0, 0, 1, c * t, int(in_shape / s),
- int(in_shape / s), out_c, 1, 1, 0, 1, 1))
- op_params.append(('batch_norm', 'None', 1, out_c,
- int(in_shape / s), int(in_shape / s)))
-
- # shortcut
- if ifshortcut:
- op_params.append(('eltwise', 2, 1, out_c, int(in_shape / s),
- int(in_shape / s)))
- if ifse:
- op_params.append(('pooling', 1, 1, out_c, int(in_shape / s),
- int(in_shape / s), 0, 0, 1, 0, 3))
- op_params.append(('conv', 0, 0, 1, out_c, 1, 1,
- int(out_c / 4), 1, 1, 0, 1, 1))
- op_params.append(('eltwise', 2, 1, int(out_c / 4), 1, 1))
- op_params.append(
- ('activation', 'relu', 1, int(out_c / 4), 1, 1))
- op_params.append(('conv', 0, 0, 1, int(out_c / 4), 1, 1,
- out_c, 1, 1, 0, 1, 1))
- op_params.append(('eltwise', 2, 1, out_c, 1, 1))
- op_params.append(('activation', 'sigmoid', 1, out_c, 1, 1))
- op_params.append(('eltwise', 1, 1, out_c, int(in_shape / s),
- int(in_shape / s)))
- op_params.append(('activation', 'relu', 1, out_c,
- int(in_shape / s), int(in_shape / s)))
-
- return op_params
-
-
-def get_all_ops(ifshortcut=True, ifse=True, strides=[1, 2, 2, 2, 1, 2, 1]):
- """Get all possible ops of current search space
- Args:
- ifshortcut: bool, shortcut or not
- ifse: bool, se or not
- strides: list, list of strides for bottlenecks
- Returns:
- op_params: list, a list of all possible params
- """
- op_params = []
- # conv1_1
- op_params.append(('conv', 0, 0, 1, image_shape[0], image_shape[1],
- image_shape[2], 32, 1, 3, 1, 2, 1))
- op_params.append(('batch_norm', 'None', 1, 32, int(image_shape[1] / 2),
- int(image_shape[2] / 2)))
- op_params.append(('activation', 'relu6', 1, 32, int(image_shape[1] / 2),
- int(image_shape[2] / 2)))
-
- # bottlenecks, TODO: different h and w for images
- in_c, in_shape = [32], int(image_shape[1] / 2)
- for i in range(len(NAS_FILTER_SIZE) + 2):
- if i == 0:
- expansion, kernels, num_filters, s = [1], [3], [16], strides[i]
- elif i == len(NAS_FILTER_SIZE) + 1:
- expansion, kernels, num_filters, s = [6], [3], [320], strides[i]
- else:
- expansion, kernels, num_filters, s = NAS_FILTERS_MULTIPLIER, \
- NAS_KERNEL_SIZE, \
- NAS_FILTER_SIZE[i-1], \
- strides[i]
-
- # first block
- tmp_ops = ops_of_inverted_residual_unit(
- in_c, in_shape, expansion, kernels, num_filters, s, False, ifse)
- op_params = op_params + tmp_ops
-
- in_c, in_shape = num_filters, int(in_shape / s)
-
- # repeated block: possibly more ops, but it is ok
- tmp_ops = ops_of_inverted_residual_unit(in_c, in_shape, expansion,
- kernels, num_filters, 1,
- ifshortcut, ifse)
- op_params = op_params + tmp_ops
-
- # last conv
- op_params.append(('conv', 0, 0, 1, 320, in_shape, in_shape, 1280, 1, 1, 0,
- 1, 1))
- op_params.append(('batch_norm', 'None', 1, 1280, in_shape, in_shape))
- op_params.append(('activation', 'relu6', 1, 1280, in_shape, in_shape))
- op_params.append(('pooling', 1, 1, 1280, in_shape, in_shape, in_shape, 0, 1,
- 0, 3))
- # fc, converted to 1x1 conv
- op_params.append(('conv', 0, 0, 1, 1280, 1, 1, class_dim, 1, 1, 0, 1, 1))
- op_params.append(('eltwise', 2, 1, 1000, 1, 1))
-
- op_params.append(('softmax', -1, 1, 1000, 1, 1))
- op_params.append(('eltwise', 1, 1, 1, 1, 1))
- op_params.append(('eltwise', 2, 1, 1, 1, 1))
- return list(set(op_params))
-
-
-class LightNASSpace(SearchSpace):
- def __init__(self):
- super(LightNASSpace, self).__init__()
- if LATENCY_LOOKUP_TABLE_PATH:
- self.init_latency_lookup_table(LATENCY_LOOKUP_TABLE_PATH)
-
- def init_latency_lookup_table(self, latency_lookup_table_path):
- """Init lookup table.
- Args:
- latency_lookup_table_path: str, latency lookup table path.
- """
- self._latency_lookup_table = dict()
- for line in open(latency_lookup_table_path):
- line = line.split()
- self._latency_lookup_table[tuple(line[:-1])] = float(line[-1])
-
- def init_tokens(self):
- """Get init tokens in search space.
- """
- return [
- 3, 1, 1, 0, 1, 0, 3, 1, 1, 0, 1, 0, 3, 1, 1, 0, 1, 0, 3, 1, 1, 0, 1,
- 0, 3, 1, 1, 0, 1, 0
- ]
-
- def range_table(self):
- """Get range table of current search space.
- """
- # [NAS_FILTER_SIZE, NAS_LAYERS_NUMBER, NAS_KERNEL_SIZE, NAS_FILTERS_MULTIPLIER, NAS_SHORTCUT, NAS_SE] * 5
- return [
- 4, 3, 3, 2, 2, 2, 4, 3, 3, 2, 2, 2, 4, 3, 3, 2, 2, 2, 4, 3, 3, 2, 2,
- 2, 4, 3, 3, 2, 2, 2
- ]
-
- def get_model_latency(self, program):
- """Get model latency according to program.
- Args:
- program(Program): The program to get latency.
- Return:
- (float): model latency.
- """
- ops = get_ops_from_program(program)
- latency = sum(
- [self._latency_lookup_table[tuple(map(str, op))] for op in ops])
- return latency
-
- def create_net(self, tokens=None):
- """Create a network for training by tokens.
- """
- if tokens is None:
- tokens = self.init_tokens()
-
- bottleneck_params_list = get_bottleneck_params_list(tokens)
-
- startup_prog = fluid.Program()
- train_prog = fluid.Program()
- test_prog = fluid.Program()
- train_py_reader, train_cost, train_acc1, train_acc5, global_lr = build_program(
- is_train=True,
- main_prog=train_prog,
- startup_prog=startup_prog,
- bottleneck_params_list=bottleneck_params_list)
- test_py_reader, test_cost, test_acc1, test_acc5 = build_program(
- is_train=False,
- main_prog=test_prog,
- startup_prog=startup_prog,
- bottleneck_params_list=bottleneck_params_list)
- test_prog = test_prog.clone(for_test=True)
- train_batch_size = batch_size / 4
- test_batch_size = batch_size
- train_reader = paddle.batch(
- reader.train(), batch_size=train_batch_size, drop_last=True)
- test_reader = paddle.batch(reader.val(), batch_size=test_batch_size)
-
- with fluid.program_guard(train_prog, startup_prog):
- train_py_reader.decorate_paddle_reader(train_reader)
-
- with fluid.program_guard(test_prog, startup_prog):
- test_py_reader.decorate_paddle_reader(test_reader)
- return startup_prog, train_prog, test_prog, (
- train_cost, train_acc1, train_acc5,
- global_lr), (test_cost, test_acc1,
- test_acc5), train_py_reader, test_py_reader
-
-
-def build_program(is_train,
- main_prog,
- startup_prog,
- bottleneck_params_list=None):
- with fluid.program_guard(main_prog, startup_prog):
- py_reader = fluid.layers.py_reader(
- capacity=16,
- shapes=[[-1] + image_shape, [-1, 1]],
- lod_levels=[0, 0],
- dtypes=["float32", "int64"],
- use_double_buffer=True)
- with fluid.unique_name.guard():
- image, label = fluid.layers.read_file(py_reader)
- model = LightNASNet()
- avg_cost, acc_top1, acc_top5 = net_config(
- image,
- label,
- model,
- class_dim=class_dim,
- bottleneck_params_list=bottleneck_params_list,
- scale_loss=1.0)
-
- avg_cost.persistable = True
- acc_top1.persistable = True
- acc_top5.persistable = True
- if is_train:
- params = model.params
- params["total_images"] = total_images
- params["lr"] = lr
- params["num_epochs"] = num_epochs
- params["learning_strategy"]["batch_size"] = batch_size
- params["learning_strategy"]["name"] = lr_strategy
- params["l2_decay"] = l2_decay
- params["momentum_rate"] = momentum_rate
- optimizer = optimizer_setting(params)
- optimizer.minimize(avg_cost)
- global_lr = optimizer._global_learning_rate()
-
- if is_train:
- return py_reader, avg_cost, acc_top1, acc_top5, global_lr
- else:
- return py_reader, avg_cost, acc_top1, acc_top5
-
-
-def net_config(image,
- label,
- model,
- class_dim=1000,
- bottleneck_params_list=None,
- scale_loss=1.0):
- bottleneck_params_list = [
- bottleneck_params_list[i:i + 7]
- for i in range(0, len(bottleneck_params_list), 7)
- ]
- out = model.net(input=image,
- bottleneck_params_list=bottleneck_params_list,
- class_dim=class_dim)
- cost, pred = fluid.layers.softmax_with_cross_entropy(
- out, label, return_softmax=True)
- if scale_loss > 1:
- avg_cost = fluid.layers.mean(x=cost) * float(scale_loss)
- else:
- avg_cost = fluid.layers.mean(x=cost)
- acc_top1 = fluid.layers.accuracy(input=pred, label=label, k=1)
- acc_top5 = fluid.layers.accuracy(input=pred, label=label, k=5)
- return avg_cost, acc_top1, acc_top5
-
-
-def optimizer_setting(params):
- """optimizer setting.
- Args:
- params: dict, params.
- """
- ls = params["learning_strategy"]
- l2_decay = params["l2_decay"]
- momentum_rate = params["momentum_rate"]
- if ls["name"] == "piecewise_decay":
- if "total_images" not in params:
- total_images = IMAGENET1000
- else:
- total_images = params["total_images"]
- batch_size = ls["batch_size"]
- step = int(total_images / batch_size + 1)
- bd = [step * e for e in ls["epochs"]]
- base_lr = params["lr"]
- lr = []
- lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
- optimizer = fluid.optimizer.Momentum(
- learning_rate=fluid.layers.piecewise_decay(
- boundaries=bd, values=lr),
- momentum=momentum_rate,
- regularization=fluid.regularizer.L2Decay(l2_decay))
- elif ls["name"] == "cosine_decay":
- if "total_images" not in params:
- total_images = IMAGENET1000
- else:
- total_images = params["total_images"]
- batch_size = ls["batch_size"]
- step = int(total_images / batch_size + 1)
- lr = params["lr"]
- num_epochs = params["num_epochs"]
- optimizer = fluid.optimizer.Momentum(
- learning_rate=fluid.layers.cosine_decay(
- learning_rate=lr, step_each_epoch=step, epochs=num_epochs),
- momentum=momentum_rate,
- regularization=fluid.regularizer.L2Decay(l2_decay))
- elif ls["name"] == "cosine_warmup_decay":
- if "total_images" not in params:
- total_images = IMAGENET1000
- else:
- total_images = params["total_images"]
- batch_size = ls["batch_size"]
- l2_decay = params["l2_decay"]
- momentum_rate = params["momentum_rate"]
- step = int(math.ceil(float(total_images) / batch_size))
- lr = params["lr"]
- num_epochs = params["num_epochs"]
- optimizer = fluid.optimizer.Momentum(
- learning_rate=cosine_decay_with_warmup(
- learning_rate=lr, step_each_epoch=step, epochs=num_epochs),
- momentum=momentum_rate,
- regularization=fluid.regularizer.L2Decay(l2_decay))
- elif ls["name"] == "exponential_decay":
- if "total_images" not in params:
- total_images = IMAGENET1000
- else:
- total_images = params["total_images"]
- batch_size = ls["batch_size"]
- num_epochs = params["num_epochs"]
- start_lr = params["lr"]
- total_step = int((total_images / batch_size) * num_epochs)
- decay_step = int((total_images / batch_size) * 2.4)
- lr = start_lr
- lr = fluid.layers.exponential_decay(
- learning_rate=start_lr,
- decay_steps=decay_step,
- decay_rate=0.97,
- staircase=True)
- optimizer = fluid.optimizer.SGDOptimizer(learning_rate=lr)
- elif ls["name"] == "exponential_decay_with_RMSProp":
- if "total_images" not in params:
- total_images = IMAGENET1000
- else:
- total_images = params["total_images"]
- batch_size = ls["batch_size"]
- step = int(math.ceil(float(total_images) / batch_size))
- decay_step = int(2.4 * step)
- lr = params["lr"]
- num_epochs = params["num_epochs"]
- optimizer = fluid.optimizer.RMSProp(
- learning_rate=fluid.layers.exponential_decay(
- learning_rate=lr,
- decay_steps=decay_step,
- decay_rate=0.97,
- staircase=False),
- regularization=fluid.regularizer.L2Decay(l2_decay),
- momentum=0.9,
- rho=0.9,
- epsilon=0.001)
- elif ls["name"] == "linear_decay":
- if "total_images" not in params:
- total_images = IMAGENET1000
- else:
- total_images = params["total_images"]
- batch_size = ls["batch_size"]
- num_epochs = params["num_epochs"]
- start_lr = params["lr"]
- end_lr = 0
- total_step = int((total_images / batch_size) * num_epochs)
- lr = fluid.layers.polynomial_decay(
- start_lr, total_step, end_lr, power=1)
- optimizer = fluid.optimizer.Momentum(
- learning_rate=lr,
- momentum=momentum_rate,
- regularization=fluid.regularizer.L2Decay(l2_decay))
- elif ls["name"] == "adam":
- lr = params["lr"]
- optimizer = fluid.optimizer.Adam(learning_rate=lr)
- else:
- lr = params["lr"]
- optimizer = fluid.optimizer.Momentum(
- learning_rate=lr,
- momentum=momentum_rate,
- regularization=fluid.regularizer.L2Decay(l2_decay))
- return optimizer
diff --git a/PaddleSlim/light_nas/run.sh b/PaddleSlim/light_nas/run.sh
deleted file mode 100644
index 396247cfe26bb2139a64eb2cd10b39d639349824..0000000000000000000000000000000000000000
--- a/PaddleSlim/light_nas/run.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-# enable GC strategy
-export FLAGS_fast_eager_deletion_mode=1
-export FLAGS_eager_delete_tensor_gb=0.0
-export CUDA_VISIBLE_DEVICES=0,1,2,3
-python search.py
diff --git a/PaddleSlim/light_nas/search.py b/PaddleSlim/light_nas/search.py
deleted file mode 100644
index d0eacc34291f9ba15bc6fedf6b3c10079e6254ee..0000000000000000000000000000000000000000
--- a/PaddleSlim/light_nas/search.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# copyright (c) 2019 paddlepaddle authors. all rights reserved.
-#
-# licensed under the apache license, version 2.0 (the "license");
-# you may not use this file except in compliance with the license.
-# you may obtain a copy of the license at
-#
-# http://www.apache.org/licenses/license-2.0
-#
-# unless required by applicable law or agreed to in writing, software
-# distributed under the license is distributed on an "as is" basis,
-# without warranties or conditions of any kind, either express or implied.
-# see the license for the specific language governing permissions and
-# limitations under the license.
-
-import paddle.fluid as fluid
-from paddle.fluid.contrib.slim.core import Compressor
-from light_nas_space import LightNASSpace
-
-
-def search():
- if not fluid.core.is_compiled_with_cuda():
- return
-
- space = LightNASSpace()
-
- startup_prog, train_prog, test_prog, train_metrics, test_metrics, train_reader, test_reader = space.create_net(
- )
- train_cost, train_acc1, train_acc5, global_lr = train_metrics
- test_cost, test_acc1, test_acc5 = test_metrics
-
- place = fluid.CUDAPlace(0)
- exe = fluid.Executor(place)
- exe.run(startup_prog)
-
- val_fetch_list = [('acc_top1', test_acc1.name), ('acc_top5',
- test_acc5.name)]
- train_fetch_list = [('loss', train_cost.name)]
-
- com_pass = Compressor(
- place,
- fluid.global_scope(),
- train_prog,
- train_reader=train_reader,
- train_feed_list=None,
- train_fetch_list=train_fetch_list,
- eval_program=test_prog,
- eval_reader=test_reader,
- eval_feed_list=None,
- eval_fetch_list=val_fetch_list,
- train_optimizer=None,
- search_space=space)
- com_pass.config('./compress.yaml')
- eval_graph = com_pass.run()
-
-
-if __name__ == '__main__':
- search()
diff --git a/PaddleSlim/models/__init__.py b/PaddleSlim/models/__init__.py
deleted file mode 100644
index 2a51d7467360e8a007be5ccaee956ec7f5e6c0f5..0000000000000000000000000000000000000000
--- a/PaddleSlim/models/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .mobilenet import MobileNet
-from .resnet import ResNet50, ResNet101, ResNet152
-from .googlenet import GoogleNet
-from .light_nasnet import LightNASNet
diff --git a/PaddleSlim/models/googlenet.py b/PaddleSlim/models/googlenet.py
deleted file mode 100644
index adc423b7d723f95d42b6fb410f6784cd22659ebe..0000000000000000000000000000000000000000
--- a/PaddleSlim/models/googlenet.py
+++ /dev/null
@@ -1,233 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import paddle
-import paddle.fluid as fluid
-from paddle.fluid.param_attr import ParamAttr
-
-__all__ = ['GoogleNet']
-
-train_parameters = {
- "input_size": [3, 224, 224],
- "input_mean": [0.485, 0.456, 0.406],
- "input_std": [0.229, 0.224, 0.225],
- "learning_strategy": {
- "name": "piecewise_decay",
- "batch_size": 256,
- "epochs": [10, 16, 30],
- "steps": [0.1, 0.01, 0.001, 0.0001]
- }
-}
-
-
-class GoogleNet():
- def __init__(self):
- self.params = train_parameters
-
- def conv_layer(self,
- input,
- num_filters,
- filter_size,
- stride=1,
- groups=1,
- act=None,
- name=None):
- channels = input.shape[1]
- stdv = (3.0 / (filter_size**2 * channels))**0.5
- param_attr = ParamAttr(
- initializer=fluid.initializer.Uniform(-stdv, stdv),
- name=name + "_weights")
- conv = fluid.layers.conv2d(
- input=input,
- num_filters=num_filters,
- filter_size=filter_size,
- stride=stride,
- padding=(filter_size - 1) // 2,
- groups=groups,
- act=act,
- param_attr=param_attr,
- bias_attr=False,
- name=name)
- return conv
-
- def xavier(self, channels, filter_size, name):
- stdv = (3.0 / (filter_size**2 * channels))**0.5
- param_attr = ParamAttr(
- initializer=fluid.initializer.Uniform(-stdv, stdv),
- name=name + "_weights")
-
- return param_attr
-
- def inception(self,
- input,
- channels,
- filter1,
- filter3R,
- filter3,
- filter5R,
- filter5,
- proj,
- name=None):
- conv1 = self.conv_layer(
- input=input,
- num_filters=filter1,
- filter_size=1,
- stride=1,
- act=None,
- name="inception_" + name + "_1x1")
- conv3r = self.conv_layer(
- input=input,
- num_filters=filter3R,
- filter_size=1,
- stride=1,
- act=None,
- name="inception_" + name + "_3x3_reduce")
- conv3 = self.conv_layer(
- input=conv3r,
- num_filters=filter3,
- filter_size=3,
- stride=1,
- act=None,
- name="inception_" + name + "_3x3")
- conv5r = self.conv_layer(
- input=input,
- num_filters=filter5R,
- filter_size=1,
- stride=1,
- act=None,
- name="inception_" + name + "_5x5_reduce")
- conv5 = self.conv_layer(
- input=conv5r,
- num_filters=filter5,
- filter_size=5,
- stride=1,
- act=None,
- name="inception_" + name + "_5x5")
- pool = fluid.layers.pool2d(
- input=input,
- pool_size=3,
- pool_stride=1,
- pool_padding=1,
- pool_type='max')
- convprj = fluid.layers.conv2d(
- input=pool,
- filter_size=1,
- num_filters=proj,
- stride=1,
- padding=0,
- name="inception_" + name + "_3x3_proj",
- param_attr=ParamAttr(
- name="inception_" + name + "_3x3_proj_weights"),
- bias_attr=False)
- cat = fluid.layers.concat(input=[conv1, conv3, conv5, convprj], axis=1)
- cat = fluid.layers.relu(cat)
- return cat
-
- def net(self, input, class_dim=1000):
- conv = self.conv_layer(
- input=input,
- num_filters=64,
- filter_size=7,
- stride=2,
- act=None,
- name="conv1")
- pool = fluid.layers.pool2d(
- input=conv, pool_size=3, pool_type='max', pool_stride=2)
-
- conv = self.conv_layer(
- input=pool,
- num_filters=64,
- filter_size=1,
- stride=1,
- act=None,
- name="conv2_1x1")
- conv = self.conv_layer(
- input=conv,
- num_filters=192,
- filter_size=3,
- stride=1,
- act=None,
- name="conv2_3x3")
- pool = fluid.layers.pool2d(
- input=conv, pool_size=3, pool_type='max', pool_stride=2)
-
- ince3a = self.inception(pool, 192, 64, 96, 128, 16, 32, 32, "ince3a")
- ince3b = self.inception(ince3a, 256, 128, 128, 192, 32, 96, 64,
- "ince3b")
- pool3 = fluid.layers.pool2d(
- input=ince3b, pool_size=3, pool_type='max', pool_stride=2)
-
- ince4a = self.inception(pool3, 480, 192, 96, 208, 16, 48, 64, "ince4a")
- ince4b = self.inception(ince4a, 512, 160, 112, 224, 24, 64, 64,
- "ince4b")
- ince4c = self.inception(ince4b, 512, 128, 128, 256, 24, 64, 64,
- "ince4c")
- ince4d = self.inception(ince4c, 512, 112, 144, 288, 32, 64, 64,
- "ince4d")
- ince4e = self.inception(ince4d, 528, 256, 160, 320, 32, 128, 128,
- "ince4e")
- pool4 = fluid.layers.pool2d(
- input=ince4e, pool_size=3, pool_type='max', pool_stride=2)
-
- ince5a = self.inception(pool4, 832, 256, 160, 320, 32, 128, 128,
- "ince5a")
- ince5b = self.inception(ince5a, 832, 384, 192, 384, 48, 128, 128,
- "ince5b")
- pool5 = fluid.layers.pool2d(
- input=ince5b, pool_size=7, pool_type='avg', pool_stride=7)
- dropout = fluid.layers.dropout(x=pool5, dropout_prob=0.4)
- out = fluid.layers.fc(input=dropout,
- size=class_dim,
- act='softmax',
- param_attr=self.xavier(1024, 1, "out"),
- name="out",
- bias_attr=ParamAttr(name="out_offset"))
-
- pool_o1 = fluid.layers.pool2d(
- input=ince4a, pool_size=5, pool_type='avg', pool_stride=3)
- conv_o1 = self.conv_layer(
- input=pool_o1,
- num_filters=128,
- filter_size=1,
- stride=1,
- act=None,
- name="conv_o1")
- fc_o1 = fluid.layers.fc(input=conv_o1,
- size=1024,
- act='relu',
- param_attr=self.xavier(2048, 1, "fc_o1"),
- name="fc_o1",
- bias_attr=ParamAttr(name="fc_o1_offset"))
- dropout_o1 = fluid.layers.dropout(x=fc_o1, dropout_prob=0.7)
- out1 = fluid.layers.fc(input=dropout_o1,
- size=class_dim,
- act='softmax',
- param_attr=self.xavier(1024, 1, "out1"),
- name="out1",
- bias_attr=ParamAttr(name="out1_offset"))
-
- pool_o2 = fluid.layers.pool2d(
- input=ince4d, pool_size=5, pool_type='avg', pool_stride=3)
- conv_o2 = self.conv_layer(
- input=pool_o2,
- num_filters=128,
- filter_size=1,
- stride=1,
- act=None,
- name="conv_o2")
- fc_o2 = fluid.layers.fc(input=conv_o2,
- size=1024,
- act='relu',
- param_attr=self.xavier(2048, 1, "fc_o2"),
- name="fc_o2",
- bias_attr=ParamAttr(name="fc_o2_offset"))
- dropout_o2 = fluid.layers.dropout(x=fc_o2, dropout_prob=0.7)
- out2 = fluid.layers.fc(input=dropout_o2,
- size=class_dim,
- act='softmax',
- param_attr=self.xavier(1024, 1, "out2"),
- name="out2",
- bias_attr=ParamAttr(name="out2_offset"))
-
- # last fc layer is "out"
- return out, out1, out2
diff --git a/PaddleSlim/models/light_nasnet.py b/PaddleSlim/models/light_nasnet.py
deleted file mode 100644
index 3c741e90e869b8c1a7dad5695d17578032cec770..0000000000000000000000000000000000000000
--- a/PaddleSlim/models/light_nasnet.py
+++ /dev/null
@@ -1,339 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""LightNASNet."""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import math
-import paddle.fluid as fluid
-from paddle.fluid.param_attr import ParamAttr
-
-__all__ = ['LightNASNet']
-
-train_parameters = {
- "input_size": [3, 224, 224],
- "input_mean": [0.485, 0.456, 0.406],
- "input_std": [0.229, 0.224, 0.225],
- "learning_strategy": {
- "name": "piecewise_decay",
- "batch_size": 256,
- "epochs": [30, 60, 90],
- "steps": [0.1, 0.01, 0.001, 0.0001]
- }
-}
-
-
-class LightNASNet(object):
- """LightNASNet."""
-
- def __init__(self):
- self.params = train_parameters
-
- def net(self, input, bottleneck_params_list=None, class_dim=1000,
- scale=1.0):
- """Build network.
- Args:
- input: Variable, input.
- class_dim: int, class dim.
- scale: float, scale.
- Returns:
- Variable, network output.
- """
- if bottleneck_params_list is None:
- # MobileNetV2
- # bottleneck_params_list = [
- # (1, 16, 1, 1, 3, 1, 0),
- # (6, 24, 2, 2, 3, 1, 0),
- # (6, 32, 3, 2, 3, 1, 0),
- # (6, 64, 4, 2, 3, 1, 0),
- # (6, 96, 3, 1, 3, 1, 0),
- # (6, 160, 3, 2, 3, 1, 0),
- # (6, 320, 1, 1, 3, 1, 0),
- # ]
- bottleneck_params_list = [
- (1, 16, 1, 1, 3, 1, 0),
- (3, 24, 3, 2, 3, 1, 0),
- (3, 40, 3, 2, 5, 1, 0),
- (6, 80, 3, 2, 5, 1, 0),
- (6, 96, 2, 1, 3, 1, 0),
- (6, 192, 4, 2, 5, 1, 0),
- (6, 320, 1, 1, 3, 1, 0),
- ]
-
- #conv1
- input = self.conv_bn_layer(
- input,
- num_filters=int(32 * scale),
- filter_size=3,
- stride=2,
- padding=1,
- if_act=True,
- name='conv1_1')
-
- # bottleneck sequences
- i = 1
- in_c = int(32 * scale)
- for layer_setting in bottleneck_params_list:
- t, c, n, s, k, ifshortcut, ifse = layer_setting
- i += 1
- input = self.invresi_blocks(
- input=input,
- in_c=in_c,
- t=t,
- c=int(c * scale),
- n=n,
- s=s,
- k=k,
- ifshortcut=ifshortcut,
- ifse=ifse,
- name='conv' + str(i))
- in_c = int(c * scale)
- #last_conv
- input = self.conv_bn_layer(
- input=input,
- num_filters=int(1280 * scale) if scale > 1.0 else 1280,
- filter_size=1,
- stride=1,
- padding=0,
- if_act=True,
- name='conv9')
-
- input = fluid.layers.pool2d(
- input=input,
- pool_size=7,
- pool_stride=1,
- pool_type='avg',
- global_pooling=True)
-
- output = fluid.layers.fc(input=input,
- size=class_dim,
- param_attr=ParamAttr(name='fc10_weights'),
- bias_attr=ParamAttr(name='fc10_offset'))
- return output
-
- def conv_bn_layer(self,
- input,
- filter_size,
- num_filters,
- stride,
- padding,
- num_groups=1,
- if_act=True,
- name=None,
- use_cudnn=True):
- """Build convolution and batch normalization layers.
- Args:
- input: Variable, input.
- filter_size: int, filter size.
- num_filters: int, number of filters.
- stride: int, stride.
- padding: int, padding.
- num_groups: int, number of groups.
- if_act: bool, whether using activation.
- name: str, name.
- use_cudnn: bool, whether use cudnn.
- Returns:
- Variable, layers output.
- """
- conv = fluid.layers.conv2d(
- input=input,
- num_filters=num_filters,
- filter_size=filter_size,
- stride=stride,
- padding=padding,
- groups=num_groups,
- act=None,
- use_cudnn=use_cudnn,
- param_attr=ParamAttr(name=name + '_weights'),
- bias_attr=False)
- bn_name = name + '_bn'
- bn = fluid.layers.batch_norm(
- input=conv,
- param_attr=ParamAttr(name=bn_name + "_scale"),
- bias_attr=ParamAttr(name=bn_name + "_offset"),
- moving_mean_name=bn_name + '_mean',
- moving_variance_name=bn_name + '_variance')
- if if_act:
- return fluid.layers.relu6(bn)
- else:
- return bn
-
- def shortcut(self, input, data_residual):
- """Build shortcut layer.
- Args:
- input: Variable, input.
- data_residual: Variable, residual layer.
- Returns:
- Variable, layer output.
- """
- return fluid.layers.elementwise_add(input, data_residual)
-
- def squeeze_excitation(self,
- input,
- num_channels,
- reduction_ratio,
- name=None):
- """Build squeeze excitation layers.
- Args:
- input: Variable, input.
- num_channels: int, number of channels.
- reduction_ratio: float, reduction ratio.
- name: str, name.
- Returns:
- Variable, layers output.
- """
- pool = fluid.layers.pool2d(
- input=input, pool_size=0, pool_type='avg', global_pooling=True)
- stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
- squeeze = fluid.layers.fc(
- input=pool,
- size=num_channels // reduction_ratio,
- act='relu',
- param_attr=fluid.param_attr.ParamAttr(
- initializer=fluid.initializer.Uniform(-stdv, stdv),
- name=name + '_sqz_weights'),
- bias_attr=ParamAttr(name=name + '_sqz_offset'))
- stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
- excitation = fluid.layers.fc(
- input=squeeze,
- size=num_channels,
- act='sigmoid',
- param_attr=fluid.param_attr.ParamAttr(
- initializer=fluid.initializer.Uniform(-stdv, stdv),
- name=name + '_exc_weights'),
- bias_attr=ParamAttr(name=name + '_exc_offset'))
- scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
- return scale
-
- def inverted_residual_unit(self,
- input,
- num_in_filter,
- num_filters,
- ifshortcut,
- ifse,
- stride,
- filter_size,
- expansion_factor,
- reduction_ratio=4,
- name=None):
- """Build inverted residual unit.
- Args:
- input: Variable, input.
- num_in_filter: int, number of in filters.
- num_filters: int, number of filters.
- ifshortcut: bool, whether using shortcut.
- stride: int, stride.
- filter_size: int, filter size.
- padding: int, padding.
- expansion_factor: float, expansion factor.
- name: str, name.
- Returns:
- Variable, layers output.
- """
- num_expfilter = int(round(num_in_filter * expansion_factor))
- channel_expand = self.conv_bn_layer(
- input=input,
- num_filters=num_expfilter,
- filter_size=1,
- stride=1,
- padding=0,
- num_groups=1,
- if_act=True,
- name=name + '_expand')
-
- bottleneck_conv = self.conv_bn_layer(
- input=channel_expand,
- num_filters=num_expfilter,
- filter_size=filter_size,
- stride=stride,
- padding=int((filter_size - 1) / 2),
- num_groups=num_expfilter,
- if_act=True,
- name=name + '_dwise',
- use_cudnn=False)
-
- linear_out = self.conv_bn_layer(
- input=bottleneck_conv,
- num_filters=num_filters,
- filter_size=1,
- stride=1,
- padding=0,
- num_groups=1,
- if_act=False,
- name=name + '_linear')
- out = linear_out
- if ifshortcut:
- out = self.shortcut(input=input, data_residual=out)
- if ifse:
- scale = self.squeeze_excitation(
- input=linear_out,
- num_channels=num_filters,
- reduction_ratio=reduction_ratio,
- name=name + '_fc')
- out = fluid.layers.elementwise_add(x=out, y=scale, act='relu')
- return out
-
- def invresi_blocks(self,
- input,
- in_c,
- t,
- c,
- n,
- s,
- k,
- ifshortcut,
- ifse,
- name=None):
- """Build inverted residual blocks.
- Args:
- input: Variable, input.
- in_c: int, number of in filters.
- t: float, expansion factor.
- c: int, number of filters.
- n: int, number of layers.
- s: int, stride.
- k: int, filter size.
- ifshortcut: bool, if adding shortcut layers or not.
- ifse: bool, if adding squeeze excitation layers or not.
- name: str, name.
- Returns:
- Variable, layers output.
- """
- first_block = self.inverted_residual_unit(
- input=input,
- num_in_filter=in_c,
- num_filters=c,
- ifshortcut=False,
- ifse=ifse,
- stride=s,
- filter_size=k,
- expansion_factor=t,
- name=name + '_1')
-
- last_residual_block = first_block
- last_c = c
-
- for i in range(1, n):
- last_residual_block = self.inverted_residual_unit(
- input=last_residual_block,
- num_in_filter=last_c,
- num_filters=c,
- ifshortcut=ifshortcut,
- ifse=ifse,
- stride=1,
- filter_size=k,
- expansion_factor=t,
- name=name + '_' + str(i + 1))
- return last_residual_block
diff --git a/PaddleSlim/models/mobilenet.py b/PaddleSlim/models/mobilenet.py
deleted file mode 100644
index 921d6226ca2a65d5c9b57e27bf6607c7376c51f6..0000000000000000000000000000000000000000
--- a/PaddleSlim/models/mobilenet.py
+++ /dev/null
@@ -1,197 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import paddle.fluid as fluid
-from paddle.fluid.initializer import MSRA
-from paddle.fluid.param_attr import ParamAttr
-
-__all__ = ['MobileNet']
-
-train_parameters = {
- "input_size": [3, 224, 224],
- "input_mean": [0.485, 0.456, 0.406],
- "input_std": [0.229, 0.224, 0.225],
- "learning_strategy": {
- "name": "piecewise_decay",
- "batch_size": 256,
- "epochs": [10, 16, 30],
- "steps": [0.1, 0.01, 0.001, 0.0001]
- }
-}
-
-
-class MobileNet():
- def __init__(self):
- self.params = train_parameters
-
- def net(self, input, class_dim=1000, scale=1.0):
- # conv1: 112x112
- input = self.conv_bn_layer(
- input,
- filter_size=3,
- channels=3,
- num_filters=int(32 * scale),
- stride=2,
- padding=1,
- name="conv1")
-
- # 56x56
- input = self.depthwise_separable(
- input,
- num_filters1=32,
- num_filters2=64,
- num_groups=32,
- stride=1,
- scale=scale,
- name="conv2_1")
-
- input = self.depthwise_separable(
- input,
- num_filters1=64,
- num_filters2=128,
- num_groups=64,
- stride=2,
- scale=scale,
- name="conv2_2")
-
- # 28x28
- input = self.depthwise_separable(
- input,
- num_filters1=128,
- num_filters2=128,
- num_groups=128,
- stride=1,
- scale=scale,
- name="conv3_1")
-
- input = self.depthwise_separable(
- input,
- num_filters1=128,
- num_filters2=256,
- num_groups=128,
- stride=2,
- scale=scale,
- name="conv3_2")
-
- # 14x14
- input = self.depthwise_separable(
- input,
- num_filters1=256,
- num_filters2=256,
- num_groups=256,
- stride=1,
- scale=scale,
- name="conv4_1")
-
- input = self.depthwise_separable(
- input,
- num_filters1=256,
- num_filters2=512,
- num_groups=256,
- stride=2,
- scale=scale,
- name="conv4_2")
-
- # 14x14
- for i in range(5):
- input = self.depthwise_separable(
- input,
- num_filters1=512,
- num_filters2=512,
- num_groups=512,
- stride=1,
- scale=scale,
- name="conv5" + "_" + str(i + 1))
- # 7x7
- input = self.depthwise_separable(
- input,
- num_filters1=512,
- num_filters2=1024,
- num_groups=512,
- stride=2,
- scale=scale,
- name="conv5_6")
-
- input = self.depthwise_separable(
- input,
- num_filters1=1024,
- num_filters2=1024,
- num_groups=1024,
- stride=1,
- scale=scale,
- name="conv6")
-
- input = fluid.layers.pool2d(
- input=input,
- pool_size=0,
- pool_stride=1,
- pool_type='avg',
- global_pooling=True)
-
- output = fluid.layers.fc(input=input,
- size=class_dim,
- act='softmax',
- param_attr=ParamAttr(
- initializer=MSRA(), name="fc7_weights"),
- bias_attr=ParamAttr(name="fc7_offset"))
-
- return output
-
- def conv_bn_layer(self,
- input,
- filter_size,
- num_filters,
- stride,
- padding,
- channels=None,
- num_groups=1,
- act='relu',
- use_cudnn=True,
- name=None):
- conv = fluid.layers.conv2d(
- input=input,
- num_filters=num_filters,
- filter_size=filter_size,
- stride=stride,
- padding=padding,
- groups=num_groups,
- act=None,
- use_cudnn=use_cudnn,
- param_attr=ParamAttr(
- initializer=MSRA(), name=name + "_weights"),
- bias_attr=False)
- bn_name = name + "_bn"
- return fluid.layers.batch_norm(
- input=conv,
- act=act,
- param_attr=ParamAttr(name=bn_name + "_scale"),
- bias_attr=ParamAttr(name=bn_name + "_offset"),
- moving_mean_name=bn_name + '_mean',
- moving_variance_name=bn_name + '_variance')
-
- def depthwise_separable(self,
- input,
- num_filters1,
- num_filters2,
- num_groups,
- stride,
- scale,
- name=None):
- depthwise_conv = self.conv_bn_layer(
- input=input,
- filter_size=3,
- num_filters=int(num_filters1 * scale),
- stride=stride,
- padding=1,
- num_groups=int(num_groups * scale),
- use_cudnn=False,
- name=name + "_dw")
-
- pointwise_conv = self.conv_bn_layer(
- input=depthwise_conv,
- filter_size=1,
- num_filters=int(num_filters2 * scale),
- stride=1,
- padding=0,
- name=name + "_sep")
- return pointwise_conv
diff --git a/PaddleSlim/models/resnet.py b/PaddleSlim/models/resnet.py
deleted file mode 100644
index a27bd52db3882c169778141a66b9752976e3a82d..0000000000000000000000000000000000000000
--- a/PaddleSlim/models/resnet.py
+++ /dev/null
@@ -1,165 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import paddle
-import paddle.fluid as fluid
-import math
-from paddle.fluid.param_attr import ParamAttr
-
-__all__ = ["ResNet", "ResNet50", "ResNet101", "ResNet152"]
-
-train_parameters = {
- "input_size": [3, 224, 224],
- "input_mean": [0.485, 0.456, 0.406],
- "input_std": [0.229, 0.224, 0.225],
- "learning_strategy": {
- "name": "piecewise_decay",
- "batch_size": 256,
- "epochs": [10, 16, 30],
- "steps": [0.1, 0.01, 0.001, 0.0001]
- }
-}
-
-
-class ResNet():
- def __init__(self, layers=50):
- self.params = train_parameters
- self.layers = layers
-
- def net(self, input, class_dim=1000, conv1_name='conv1', fc_name=None):
- layers = self.layers
- supported_layers = [50, 101, 152]
- assert layers in supported_layers, \
- "supported layers are {} but input layer is {}".format(supported_layers, layers)
-
- if layers == 50:
- depth = [3, 4, 6, 3]
- elif layers == 101:
- depth = [3, 4, 23, 3]
- elif layers == 152:
- depth = [3, 8, 36, 3]
- num_filters = [64, 128, 256, 512]
-
- # TODO(wanghaoshuang@baidu.com):
- # fix name("conv1") conflict between student and teacher in distillation.
- conv = self.conv_bn_layer(
- input=input,
- num_filters=64,
- filter_size=7,
- stride=2,
- act='relu',
- name=conv1_name)
- conv = fluid.layers.pool2d(
- input=conv,
- pool_size=3,
- pool_stride=2,
- pool_padding=1,
- pool_type='max')
-
- for block in range(len(depth)):
- for i in range(depth[block]):
- if layers in [101, 152] and block == 2:
- if i == 0:
- conv_name = "res" + str(block + 2) + "a"
- else:
- conv_name = "res" + str(block + 2) + "b" + str(i)
- else:
- conv_name = "res" + str(block + 2) + chr(97 + i)
- conv = self.bottleneck_block(
- input=conv,
- num_filters=num_filters[block],
- stride=2 if i == 0 and block != 0 else 1,
- name=conv_name)
-
- pool = fluid.layers.pool2d(
- input=conv, pool_size=7, pool_type='avg', global_pooling=True)
- stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
- out = fluid.layers.fc(input=pool,
- size=class_dim,
- act='softmax',
- name=fc_name,
- param_attr=fluid.param_attr.ParamAttr(
- initializer=fluid.initializer.Uniform(-stdv,
- stdv)))
- return out
-
- def conv_bn_layer(self,
- input,
- num_filters,
- filter_size,
- stride=1,
- groups=1,
- act=None,
- name=None):
- conv = fluid.layers.conv2d(
- input=input,
- num_filters=num_filters,
- filter_size=filter_size,
- stride=stride,
- padding=(filter_size - 1) // 2,
- groups=groups,
- act=None,
- param_attr=ParamAttr(name=name + "_weights"),
- bias_attr=False,
- name=name + '.conv2d.output.1')
- if name == "conv1":
- bn_name = "bn_" + name
- else:
- bn_name = "bn" + name[3:]
- return fluid.layers.batch_norm(
- input=conv,
- act=act,
- name=bn_name + '.output.1',
- param_attr=ParamAttr(name=bn_name + '_scale'),
- bias_attr=ParamAttr(bn_name + '_offset'),
- moving_mean_name=bn_name + '_mean',
- moving_variance_name=bn_name + '_variance', )
-
- def shortcut(self, input, ch_out, stride, name):
- ch_in = input.shape[1]
- if ch_in != ch_out or stride != 1:
- return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
- else:
- return input
-
- def bottleneck_block(self, input, num_filters, stride, name):
- conv0 = self.conv_bn_layer(
- input=input,
- num_filters=num_filters,
- filter_size=1,
- act='relu',
- name=name + "_branch2a")
- conv1 = self.conv_bn_layer(
- input=conv0,
- num_filters=num_filters,
- filter_size=3,
- stride=stride,
- act='relu',
- name=name + "_branch2b")
- conv2 = self.conv_bn_layer(
- input=conv1,
- num_filters=num_filters * 4,
- filter_size=1,
- act=None,
- name=name + "_branch2c")
-
- short = self.shortcut(
- input, num_filters * 4, stride, name=name + "_branch1")
-
- return fluid.layers.elementwise_add(
- x=short, y=conv2, act='relu', name=name + ".add.output.5")
-
-
-def ResNet50():
- model = ResNet(layers=50)
- return model
-
-
-def ResNet101():
- model = ResNet(layers=101)
- return model
-
-
-def ResNet152():
- model = ResNet(layers=152)
- return model
diff --git a/PaddleSlim/quant_low_level_api/README.md b/PaddleSlim/quant_low_level_api/README.md
deleted file mode 100644
index afd9818c6431bd568cb147553bc8e9b09486961c..0000000000000000000000000000000000000000
--- a/PaddleSlim/quant_low_level_api/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
----
-模型量化是使用更少的比特数表示神经网络的权重和激活的方法,具有加快推理速度、减小存储大小、降低功耗等优点。
-
-目前,模型量化主要分为量化训练(Quantization Aware Training)和训练后量化(Post Training Quantization)。量化训练是在训练过程中对量化进行建模以确定量化参数,具有为复杂模型提供更高的精度的优点。训练后量化是基于采样数据,采用KL散度等方法计算量化比例因子的方法。它具有无需重新训练、快速获得量化模型的方法。
-
-模型量化的原理和Low-Level API使用方法可以参考如下文档:
-* [模型量化原理](../docs/tutorial.md)
-* [量化训练Low-Level API使用方法和示例](./quantization_aware_training.md)
-* [训练后量化Low-Level API使用方法和示例](./post_training_quantization.md)
diff --git a/PaddleSlim/quant_low_level_api/post_training_quantization.md b/PaddleSlim/quant_low_level_api/post_training_quantization.md
deleted file mode 100644
index ff9d5d56c6cf19fdb00efccec7a68d633897b32a..0000000000000000000000000000000000000000
--- a/PaddleSlim/quant_low_level_api/post_training_quantization.md
+++ /dev/null
@@ -1,147 +0,0 @@
-
-
----
-# 训练后量化Low-Level API使用方法和示例
-
-## 目录
-
-- [训练后量化使用说明](#1-训练后量化使用说明)
-- [训练后量化使用示例](#2-训练后量化使用示例)
-
-## 1. 训练后量化使用说明
-
-1)**准备模型和校准数据**
-
-首先,需要准备已经训练好的FP32预测模型,即 `save_inference_model()` 保存的模型。训练后量化读取校准数据进行前向计算,所以需要准备校准数据集。校准数据集应为测试集(或训练集)中具有代表性的一部分,如随机取出的部分数据,这样可以计算得到更加准确的量化比例因子。建议样本数据的数量为100~500。
-
-2)**配置校准数据生成器**
-
-训练后量化内部使用异步数据读取的方式读取校准数据,用户只需要根据模型的输入,配置读取数据的sample_generator。sample_generator是Python生成器,用作`DataLoader.set_sample_generator()`的数据源,**必须每次返回单个样本**。建议参考官方文档[异步数据读取](https://www.paddlepaddle.org.cn/documentation/docs/zh/user_guides/howto/prepare_data/use_py_reader.html)。
-
-3)**调用训练后量化**
-
-机器上安装PaddlePaddle develop分支编译的whl包,然后调用PostTrainingQuantization实现训练后量化,以下对api接口进行详细介绍。
-
-``` python
-class PostTrainingQuantization(
- executor,
- sample_generator,
- model_dir,
- model_filename=None,
- params_filename=None,
- batch_size=10,
- batch_nums=None,
- scope=None,
- algo="KL",
- quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
- is_full_quantize=False,
- is_use_cache_file=False,
- cache_dir="./temp_post_training")
-```
-调用上述api,传入训练后量化必要的参数。参数说明:
-* executor:执行模型的executor,可以在cpu或者gpu上执行。
-* sample_generator:第二步中配置的校准数据生成器。
-* model_dir:待量化模型的路径,其中保存模型文件和权重文件。
-* model_filename:待量化模型的模型文件名,如果模型文件名不是`__model__`,则需要使用model_filename设置模型文件名。
-* params_filename:待量化模型的权重文件名,如果所有权重保存成一个文件,则需要使用params_filename设置权重文件名。
-* batch_size:一次读取样本数据的数量。
-* batch_nums:读取样本数据的次数。如果设置为None,则从sample_generator中读取所有样本数据进行训练后量化;如果设置为非None,则从sample_generator中读取`batch_size*batch_nums`个样本数据。
-* scope:模型运行时使用的scope,默认为None,则会使用global_scope()。
-* algo:计算待量化激活Tensor的量化比例因子的方法。设置为`KL`,则使用KL散度方法,设置为`direct`,则使用abs max方法。默认为`KL`。
-* quantizable_op_type: 需要量化的op类型,默认是`["conv2d", "depthwise_conv2d", "mul"]`,列表中的值可以是任意支持量化的op类型。
-* is_full_quantize:是否进行全量化。设置为True,则对模型中所有支持量化的op进行量化;设置为False,则只对`quantizable_op_type` 中op类型进行量化。目前,支持的量化类型如下:'conv2d', 'depthwise_conv2d', 'mul', "pool2d", "elementwise_add", "concat", "softmax", "argmax", "transpose", "equal", "gather", "greater_equal", "greater_than", "less_equal", "less_than", "mean", "not_equal", "reshape", "reshape2", "bilinear_interp", "nearest_interp", "trilinear_interp", "slice", "squeeze", "elementwise_sub"。
-* is_use_cache_file:是否使用缓存文件。如果设置为True,训练后量化过程中的采样数据会保存到磁盘文件中;如果设置为False,所有采样数据会保存到内存中。当待量化的模型很大或者校准数据数量很大,建议设置is_use_cache_file为True。默认为False。
-* cache_dir:当is_use_cache_file等于True,会将采样数据保存到该文件中。量化完成后,该文件中的临时文件会自动删除。
-
-```
-PostTrainingQuantization.quantize()
-```
-调用上述接口开始训练后量化。根据样本数量、模型的大小和量化op类型不同,训练后量化需要的时间也不一样。比如使用ImageNet2012数据集中100图片对`MobileNetV1`进行训练后量化,花费大概1分钟。
-
-```
-PostTrainingQuantization.save_quantized_model(save_model_path)
-```
-调用上述接口保存训练后量化模型,其中save_model_path为保存的路径。
-
-**训练后量化支持部分量化功能**
-* 方法1:设置quantizable_op_type,则只会对quantizable_op_type中的Op类型进行量化,模型中其他Op类型保持不量化。
-* 方法2:构建网络的时候,将不需要量化的特定Op定义在 `skip_quant` 的name_scope中,则可以跳过特定Op的量化,示例如下。
-```python
-with fluid.name_scope('skip_quant'):
- pool = fluid.layers.pool2d(input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
- # 不对pool2d进行量化
-```
-
-## 2. 训练后量化使用示例
-
-下面以MobileNetV1为例,介绍训练后量化Low-Level API的使用方法。
-
-> 该示例的代码放在[models/PaddleSlim/quant_low_level_api/](https://github.com/PaddlePaddle/models/tree/develop/PaddleSlim/quant_low_level_api)目录下。如果需要执行该示例,首先clone下来[models](https://github.com/PaddlePaddle/models.git),然后执行[run_post_training_quanzation.sh](run_post_training_quanzation.sh)脚本,最后量化模型保存在`mobilenetv1_int8_model`目录下。
-
-1)**准备模型和校准数据**
-
-安装最新版PaddlePaddle,准备已经训练好的FP32预测模型。
-
-准备校准数据,文件结构如下。val文件夹中有100张图片,val_list.txt文件中包含图片的label。
-```bash
-samples_100
-└──val
-└──val_list.txt
-```
-
-2)**配置校准数据生成器**
-
-MobileNetV1的输入是图片和标签,所以配置读取校准数据的sample_generator,每次返回一张图片和一个标签。详细代码在[models/PaddleSlim/reader.py](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/reader.py)。
-
-3)**调用训练后量化**
-
-调用训练后量化的核心代码如下,详细代码在[post_training_quantization.py](post_training_quantization.py)。
-``` python
-place = fluid.CUDAPlace(0) if args.use_gpu == "True" else fluid.CPUPlace()
-exe = fluid.Executor(place)
-sample_generator = reader.val(data_dir=args.data_path)
-
-ptq = PostTrainingQuantization(
- executor=exe,
- sample_generator=sample_generator,
- model_dir=args.model_dir,
- model_filename=args.model_filename,
- params_filename=args.params_filename,
- batch_size=args.batch_size,
- batch_nums=args.batch_nums,
- algo=args.algo,
- is_full_quantize=args.is_full_quantize == "True")
-quantized_program = ptq.quantize()
-ptq.save_quantized_model(args.save_model_path)
-```
-4)**测试训练后量化模型精度**
-
-使用ImageNet2012测试集中100张图片做校准数据集,对`conv2d`, `depthwise_conv2d`, `mul`, `pool2d`, `elementwise_add`和`concat`进行训练后量化,然后在ImageNet2012验证集上测试。下表列出了常见分类模型训练后量化前后的精度对比。
-
-模型 | FP32 Top1 | FP32 Top5 | INT8 Top1 | INT8 Top5| Top1 Diff | Tp5 Diff
--|:-:|:-:|:-:|:-:|:-:|:-:
-googlenet | 70.50% | 89.59% | 70.12% | 89.38% | -0.38% | -0.21%
-mobilenetv1 | 70.91% | 89.54% | 70.24% | 89.03% | -0.67% | -0.51%
-mobilenetv2 | 71.90% | 90.56% | 71.36% | 90.17% | -0.54% | -0.39%
-resnet50 | 76.35% | 92.80% | 76.26% | 92.81% | -0.09% | +0.01%
-resnet101 | 77.49% | 93.57% | 75.44% | 92.56% | -2.05% | -1.01%
-vgg16 | 72.08% | 90.63% | 71.93% | 90.64% | -0.15% | +0.01%
-vgg19 | 72.56% | 90.83% | 72.55% | 90.77% | -0.01% | -0.06%
diff --git a/PaddleSlim/quant_low_level_api/post_training_quantization.py b/PaddleSlim/quant_low_level_api/post_training_quantization.py
deleted file mode 100644
index 9b68500882794e8d9e8010359da483479266bef6..0000000000000000000000000000000000000000
--- a/PaddleSlim/quant_low_level_api/post_training_quantization.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import sys
-import os
-import six
-import numpy as np
-import argparse
-import paddle.fluid as fluid
-sys.path.append('..')
-import reader
-from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
-
-parser = argparse.ArgumentParser()
-parser.add_argument(
- "--model_dir", type=str, default="", help="path/to/fp32_model_params")
-parser.add_argument(
- "--data_path", type=str, default="/dataset/ILSVRC2012/", help="")
-parser.add_argument("--save_model_path", type=str, default="")
-parser.add_argument(
- "--model_filename",
- type=str,
- default=None,
- help="The name of file to load the inference program, If it is None, the default filename __model__ will be used"
-)
-parser.add_argument(
- "--params_filename",
- type=str,
- default=None,
- help="The name of file to load all parameters, If parameters were saved in separate files, set it as None"
-)
-parser.add_argument(
- "--algo",
- type=str,
- default="KL",
- help="use KL or direct method to quantize the activation tensor, set it as KL or direct"
-)
-parser.add_argument("--is_full_quantize", type=str, default="False", help="")
-parser.add_argument("--batch_size", type=int, default=10, help="")
-parser.add_argument("--batch_nums", type=int, default=10, help="")
-parser.add_argument("--use_gpu", type=str, default="False", help="")
-args = parser.parse_args()
-
-print("-------------------args----------------------")
-for arg, value in sorted(six.iteritems(vars(args))):
- print("%s: %s" % (arg, value))
-print("---------------------------------------------")
-
-place = fluid.CUDAPlace(0) if args.use_gpu == "True" else fluid.CPUPlace()
-exe = fluid.Executor(place)
-sample_generator = reader.val(data_dir=args.data_path)
-
-ptq = PostTrainingQuantization(
- executor=exe,
- sample_generator=sample_generator,
- model_dir=args.model_dir,
- model_filename=args.model_filename,
- params_filename=args.params_filename,
- batch_size=args.batch_size,
- batch_nums=args.batch_nums,
- algo=args.algo,
- is_full_quantize=args.is_full_quantize == "True")
-quantized_program = ptq.quantize()
-ptq.save_quantized_model(args.save_model_path)
-
-print("post training quantization finish.\n")
diff --git a/PaddleSlim/quant_low_level_api/quantization_aware_training.md b/PaddleSlim/quant_low_level_api/quantization_aware_training.md
deleted file mode 100644
index 7ea6946f50912f6345bcec6ee0da7a64f81545c3..0000000000000000000000000000000000000000
--- a/PaddleSlim/quant_low_level_api/quantization_aware_training.md
+++ /dev/null
@@ -1,202 +0,0 @@
-
-
----
-# 量化训练Low-Level API使用示例
-
-## 目录
-
-- [量化训练Low-Level APIs介绍](#1-量化训练low-level-apis介绍)
-- [基于Low-Level API的量化训练](#2-基于low-level-api的量化训练)
-## 1. 量化训练Low-Level APIs介绍
-量化训练Low-Level APIs主要涉及到PaddlePaddle框架中的五个IrPass,即`QuantizationTransformPass`、`AddQuantDequantPass`、`QuantizationFreezePass`、`ConvertToInt8Pass`以及`TransformForMobilePass`。这五个IrPass的具体功能描述如下:
-
-* `QuantizationTransformPass`: QuantizationTransformPass主要负责在IrGraph的`conv2d`、`depthwise_conv2d`、`mul`等算子的各个输入前插入连续的量化op和反量化op,并改变相应反向算子的某些输入,示例如图1。
-
-
-
-图1:应用QuantizationTransformPass后的结果
-
-
-QuantizationTransformPass支持对模型中特定类别op进行量化,只需要设置输入参数`quantizable_op_type`,默认`quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul']`。比如设置`quantizable_op_type=['conv2d']`,则该pass只会对模型中的`conv2d` 进行量化。注意,设置QuantizationTransformPass的`quantizable_op_type` 后, 也需要在QuantizationFreezePass 和 ConvertToInt8Pass传入相同的 `quantizable_op_type` 。
-
-QuantizationTransformPass也支持对模型中的个别op不进行量化。如下示例:首先定义 `skip_pattern` ;然后在构建模型时候,在skip_pattern的name_scope中定义不需要量化的op,即示例中的 `conv1` ;最后在调用QuantizationTransformPass的时候,传输设置的`skip_pattern`参数,则可以实现不对 `conv1` 进行量化。
-
-```
-# define network
-skip_pattern=['skip_quant']
-......
-with fluid.name_scope(skip_pattern[0]):
- conv1 = fluid.layers.conv2d(
- input=input,
- filter_size=filter_size,
- num_filters=ch_out,
- stride=stride,
- padding=padding,
- act=None,
- bias_attr=bias_attr)
-......
-# init QuantizationTransformPass and set skip_pattern
-transform_pass = QuantizationTransformPass(
- scope=fluid.global_scope(),
- place=place,
- activation_quantize_type=activation_quant_type,
- weight_quantize_type=weight_quantize_type,
- skip_pattern=skip_pattern)
-# apply QuantizationTransformPass
-```
-
-* `AddQuantDequantPass` :AddQuantDequantPass主要负责在IrGraph的 `elementwise_add` 和 `pool2d` 等算子的各个输入前插入 `QuantDequant` op,在量化训练中收集待量化op输入 `Tensor` 的量化 `scale` 信息。该Pass使用方法和QuantizationTransformPass相似,同样支持对模型中特定类别op进行量化,支持对模型中的个别op不进行量化。注意,目前PaddleLite还不支持`elementwise_add` 和 `pool2d` 的int8 kernel。
-
-* `QuantizationFreezePass`:QuantizationFreezePass主要用于改变IrGraph中量化op和反量化op的顺序,即将类似图1中的量化op和反量化op顺序改变为图2中的布局。除此之外,QuantizationFreezePass还会将`conv2d`、`depthwise_conv2d`、`mul`等算子的权重离线量化为int8_t范围内的值(但数据类型仍为float32),以减少预测过程中对权重的量化操作,示例如图2:
-
-
-
-图2:应用QuantizationFreezePass后的结果
-
-
-* `ConvertToInt8Pass`:ConvertToInt8Pass必须在QuantizationFreezePass之后执行,其主要目的是将执行完QuantizationFreezePass后输出的权重类型由`FP32`更改为`INT8`。换言之,用户可以选择将量化后的权重保存为float32类型(不执行ConvertToInt8Pass)或者int8_t类型(执行ConvertToInt8Pass),示例如图3:
-
-
-
-图3:应用ConvertToInt8Pass后的结果
-
-
-* `TransformForMobilePass`:经TransformForMobilePass转换后,用户可得到兼容[paddle-mobile](https://github.com/PaddlePaddle/paddle-mobile)移动端预测库的量化模型。paddle-mobile中的量化op和反量化op的名称分别为`quantize`和`dequantize`。`quantize`算子和PaddlePaddle框架中的`fake_quantize_abs_max`算子簇的功能类似,`dequantize` 算子和PaddlePaddle框架中的`fake_dequantize_max_abs`算子簇的功能相同。若选择paddle-mobile执行量化训练输出的模型,则需要将`fake_quantize_abs_max`等算子改为`quantize`算子以及将`fake_dequantize_max_abs`等算子改为`dequantize`算子,示例如图4:
-
-
-
-图4:应用TransformForMobilePass后的结果
-
-
-## 2. 基于Low-Level API的量化训练
-本小节以ResNet50和MobileNetV1为例,介绍了PaddlePaddle量化训练Low-Level API的使用方法,具体如下:
-
-1) 执行如下命令clone [Pddle models repo](https://github.com/PaddlePaddle/models):
-```bash
-git clone https://github.com/PaddlePaddle/models.git
-```
-
-2) 准备数据集(包括训练数据集和验证数据集)。以ILSVRC2012数据集为例,数据集应包含如下结构:
-```bash
-data
-└──ILSVRC2012
- ├── train
- ├── train_list.txt
- ├── val
- └── val_list.txt
-```
-3)切换到`models/PaddleSlim/quant_low_level_api`目录下,修改`run_quantization_aware_training.sh`内容,即将**data_dir**设置为第2)步所准备的数据集路径。最后,执行`run_quantization_aware_training.sh`脚本即可进行量化训练。
-
-### 2.1 量化训练Low-Level API使用小结:
-
-* 参照[quantization_aware_training.py](quantization_aware_training.py)文件的内容,总结使用量化训练Low-Level API的方法如下:
-```python
-#startup_program = fluid.Program()
-#train_program = fluid.Program()
-#train_cost = build_program(
-# main_prog=train_program,
-# startup_prog=startup_program,
-# is_train=True)
-#build_program(
-# main_prog=test_program,
-# startup_prog=startup_program,
-# is_train=False)
-#test_program = test_program.clone(for_test=True)
-# The above pseudo code is used to build up the model.
-# ---------------------------------------------------------------------------------
-# The following code are part of Quantization Aware Training logic:
-# 0) Convert Programs to IrGraphs.
-main_graph = IrGraph(core.Graph(train_program.desc), for_test=False)
-test_graph = IrGraph(core.Graph(test_program.desc), for_test=True)
-# 1) Make some quantization transforms in the graph before training and testing.
-# According to the weight and activation quantization type, the graph will be added
-# some fake quantize operators and fake dequantize operators.
-transform_pass = QuantizationTransformPass(
- scope=fluid.global_scope(), place=place,
- activation_quantize_type=activation_quant_type,
- weight_quantize_type=weight_quant_type)
-transform_pass.apply(main_graph)
-transform_pass.apply(test_graph)
-# Compile the train_graph for training.
-binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
- loss_name=train_cost.name, build_strategy=build_strategy)
-# Convert the transformed test_graph to test program for testing.
-test_prog = test_graph.to_program()
-# For training
-exe.run(binary, fetch_list=train_fetch_list)
-# For testing
-exe.run(program=test_prog, fetch_list=test_fetch_list)
-# 2) Freeze the graph after training by adjusting the quantize
-# operators' order for the inference.
-freeze_pass = QuantizationFreezePass(
- scope=fluid.global_scope(),
- place=place,
- weight_quantize_type=weight_quant_type)
-freeze_pass.apply(test_graph)
-# 3) Convert the weights into int8_t type.
-# [This step is optional.]
-convert_int8_pass = ConvertToInt8Pass(scope=fluid.global_scope(), place=place)
-convert_int8_pass.apply(test_graph)
-# 4) Convert the freezed graph for paddle-mobile execution.
-# [This step is optional. But, if you execute this step, you must execute the step 3).]
-mobile_pass = TransformForMobilePass()
-mobile_pass.apply(test_graph)
-```
-* [run_quantization_aware_training.sh](run_quantization_aware_training.sh)脚本中的命令配置详解:
-
-```bash
- --model:指定量化训练的模型,如MobileNet、ResNet50。
- --pretrained_fp32_model:指定预训练float32模型参数的位置。
- --checkpoint:指定模型断点训练的checkpoint路径。若指定了checkpoint路径,则不应该再指定pretrained_fp32_model路径。
- --use_gpu:选择是否使用GPU训练。
- --data_dir:指定训练数据集和验证数据集的位置。
- --batch_size:设置训练batch size大小。
- --total_images:指定训练数据图像的总数。
- --class_dim:指定类别总数。
- --image_shape:指定图像的尺寸。
- --model_save_dir:指定模型保存的路径。
- --lr_strategy:学习率衰减策略。
- --num_epochs:训练的总epoch数。
- --lr:初始学习率,指定预训练模型参数进行fine-tune时一般设置一个较小的初始学习率。
- --act_quant_type:激活量化类型,可选moving_average_abs_max, range_abs_max和abs_max。
- --wt_quant_type:权重量化类型,可选abs_max, channel_wise_abs_max。
-```
-
-> **备注:** 量化训练结束后,用户可在其指定的模型保存路径下看到float、int8和mobile三个目录。下面对三个目录下保存的模型特点进行解释说明:
-> - **float目录:** 参数范围为int8范围但参数数据类型为float32的量化模型。
-> - **int8目录:** 参数范围为int8范围且参数数据类型为int8的量化模型。
-> - **mobile目录:** 参数特点与int8目录相同且兼容[paddle-mobile](https://github.com/PaddlePaddle/paddle-mobile)的量化模型。
->
-> **注意:** 目前PaddlePaddle框架在Server端只支持使用float目录下的量化模型做预测。
-
-### 2.2 测试QAT量化模型精度
-
-使用ImageNet2012的训练集进行训练,然后在ImageNet2012验证集上测试。其中,我们对`conv2d`, `depthwise_conv2d`, `mul`, `pool2d`, `elementwise_add`和`concat`进行量化,训练5个epoch。下表列出了常见分类模型QAT量化前后的精度。
-
-模型 | FP32 Top1 | FP32 Top5 | INT8 Top1 | INT8 Top5| Top1 Diff | Tp5 Diff
--|:-:|:-:|:-:|:-:|:-:|:-:
-googlenet | 70.50% | 89.59% | 69.96% | 89.18% | -0.54% | -0.41%
-mobilenetv1 | 70.91% | 89.54% | 70.50% | 89.42% | -0.41% | -0.12%
-mobilenetv2 | 71.90% | 90.56% | 72.05% | 90.56% | +0.15% | -0.00%
-resnet50 | 76.35% | 92.80% | 76.52% | 92.93% | +0.17% | +0.13%
-resnet101 | 77.49% | 93.57% | 77.80% | 93.78% | +0.31% | +0.21%
-vgg16 | 72.08% | 90.63% | 71.53% | 89.70% | -0.55% | -0.93%
-vgg19 | 72.56% | 90.83% | 71.99% | 89.93% | -0.57% | -0.90%
diff --git a/PaddleSlim/quant_low_level_api/quantization_aware_training.py b/PaddleSlim/quant_low_level_api/quantization_aware_training.py
deleted file mode 100644
index b25fa367f2cf4801575535237a84088f31e87370..0000000000000000000000000000000000000000
--- a/PaddleSlim/quant_low_level_api/quantization_aware_training.py
+++ /dev/null
@@ -1,392 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import os
-import numpy as np
-import time
-import functools
-import paddle
-import paddle.fluid as fluid
-from paddle.fluid.framework import IrGraph
-from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
-from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
-from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
-from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
-from paddle.fluid import core
-import argparse
-import subprocess
-import sys
-sys.path.append('..')
-import reader
-import models
-from utility import add_arguments, print_arguments
-from utility import save_persistable_nodes, load_persistable_nodes
-
-parser = argparse.ArgumentParser(description=__doc__)
-add_arg = functools.partial(add_arguments, argparser=parser)
-# yapf: disable
-add_arg('batch_size', int, 256, "Minibatch size.")
-add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
-add_arg('total_images', int, 1281167, "Training image number.")
-add_arg('num_epochs', int, 120, "number of epochs.")
-add_arg('class_dim', int, 1000, "Class number.")
-add_arg('image_shape', str, "3,224,224", "input image size")
-add_arg('model_save_dir', str, "output", "model save directory")
-add_arg('pretrained_fp32_model', str, None, "Whether to use the pretrained float32 model to initialize the weights.")
-add_arg('checkpoint', str, None, "Whether to resume the training process from the checkpoint.")
-add_arg('lr', float, 0.1, "set learning rate.")
-add_arg('lr_strategy', str, "piecewise_decay", "Set the learning rate decay strategy.")
-add_arg('model', str, "SE_ResNeXt50_32x4d", "Set the network to use.")
-add_arg('data_dir', str, "./data/ILSVRC2012", "The ImageNet dataset root dir.")
-add_arg('act_quant_type', str, "abs_max", "quantization type for activation, valid value:'abs_max','range_abs_max', 'moving_average_abs_max'" )
-add_arg('wt_quant_type', str, "abs_max", "quantization type for weight, valid value:'abs_max','channel_wise_abs_max'" )
-# yapf: enabl
-
-def optimizer_setting(params):
- ls = params["learning_strategy"]
- if ls["name"] == "piecewise_decay":
- if "total_images" not in params:
- total_images = 1281167
- else:
- total_images = params["total_images"]
- batch_size = ls["batch_size"]
- step = int(total_images / batch_size + 1)
-
- bd = [step * e for e in ls["epochs"]]
- print("decay list:{}".format(bd))
- base_lr = params["lr"]
- lr = []
- lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
- optimizer = fluid.optimizer.Momentum(
- learning_rate=fluid.layers.piecewise_decay(
- boundaries=bd, values=lr),
- momentum=0.9,
- regularization=fluid.regularizer.L2Decay(1e-4))
-
- elif ls["name"] == "cosine_decay":
- if "total_images" not in params:
- total_images = 1281167
- else:
- total_images = params["total_images"]
-
- batch_size = ls["batch_size"]
- step = int(total_images / batch_size + 1)
-
- lr = params["lr"]
- num_epochs = params["num_epochs"]
-
- optimizer = fluid.optimizer.Momentum(
- learning_rate=fluid.layers.cosine_decay(
- learning_rate=lr, step_each_epoch=step, epochs=num_epochs),
- momentum=0.9,
- regularization=fluid.regularizer.L2Decay(4e-5))
- elif ls["name"] == "exponential_decay":
- if "total_images" not in params:
- total_images = 1281167
- else:
- total_images = params["total_images"]
- batch_size = ls["batch_size"]
- step = int(total_images / batch_size +1)
- lr = params["lr"]
- num_epochs = params["num_epochs"]
- learning_decay_rate_factor=ls["learning_decay_rate_factor"]
- num_epochs_per_decay = ls["num_epochs_per_decay"]
- NUM_GPUS = 1
-
- optimizer = fluid.optimizer.Momentum(
- learning_rate=fluid.layers.exponential_decay(
- learning_rate = lr * NUM_GPUS,
- decay_steps = step * num_epochs_per_decay / NUM_GPUS,
- decay_rate = learning_decay_rate_factor),
- momentum=0.9,
-
- regularization = fluid.regularizer.L2Decay(4e-5))
-
- else:
- lr = params["lr"]
- optimizer = fluid.optimizer.Momentum(
- learning_rate=lr,
- momentum=0.9,
- regularization=fluid.regularizer.L2Decay(1e-4))
-
- return optimizer
-
-def net_config(image, label, model, args):
- model_list = [m for m in dir(models) if "__" not in m]
- assert args.model in model_list,"{} is not lists: {}".format(
- args.model, model_list)
-
- class_dim = args.class_dim
- model_name = args.model
-
- if model_name == "GoogleNet":
- out0, out1, out2 = model.net(input=image, class_dim=class_dim)
- cost0 = fluid.layers.cross_entropy(input=out0, label=label)
- cost1 = fluid.layers.cross_entropy(input=out1, label=label)
- cost2 = fluid.layers.cross_entropy(input=out2, label=label)
- avg_cost0 = fluid.layers.mean(x=cost0)
- avg_cost1 = fluid.layers.mean(x=cost1)
- avg_cost2 = fluid.layers.mean(x=cost2)
-
- avg_cost = avg_cost0 + 0.3 * avg_cost1 + 0.3 * avg_cost2
- acc_top1 = fluid.layers.accuracy(input=out0, label=label, k=1)
- acc_top5 = fluid.layers.accuracy(input=out0, label=label, k=5)
- out = out0
- else:
- out = model.net(input=image, class_dim=class_dim)
- cost = fluid.layers.cross_entropy(input=out, label=label)
-
- avg_cost = fluid.layers.mean(x=cost)
- acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
- acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
-
- return out, avg_cost, acc_top1, acc_top5
-
-
-def build_program(is_train, main_prog, startup_prog, args):
- image_shape = [int(m) for m in args.image_shape.split(",")]
- model_name = args.model
- model_list = [m for m in dir(models) if "__" not in m]
- assert model_name in model_list, "{} is not in lists: {}".format(args.model,
- model_list)
- model = models.__dict__[model_name]()
- with fluid.program_guard(main_prog, startup_prog):
- py_reader = fluid.layers.py_reader(
- capacity=16,
- shapes=[[-1] + image_shape, [-1, 1]],
- lod_levels=[0, 0],
- dtypes=["float32", "int64"],
- use_double_buffer=True)
- with fluid.unique_name.guard():
- image, label = fluid.layers.read_file(py_reader)
- out, avg_cost, acc_top1, acc_top5 = net_config(image, label, model, args)
- avg_cost.persistable = True
- acc_top1.persistable = True
- acc_top5.persistable = True
- if is_train:
- params = model.params
- params["total_images"] = args.total_images
- params["lr"] = args.lr
- params["num_epochs"] = args.num_epochs
- params["learning_strategy"]["batch_size"] = args.batch_size
- params["learning_strategy"]["name"] = args.lr_strategy
-
- optimizer = optimizer_setting(params)
- optimizer.minimize(avg_cost)
- global_lr = optimizer._global_learning_rate()
- if is_train:
- return image, out, py_reader, avg_cost, acc_top1, acc_top5, global_lr
- else:
- return image, out, py_reader, avg_cost, acc_top1, acc_top5
-
-def train(args):
- # parameters from arguments
- model_name = args.model
- pretrained_fp32_model = args.pretrained_fp32_model
- checkpoint = args.checkpoint
- model_save_dir = args.model_save_dir
- data_dir = args.data_dir
- activation_quant_type = args.act_quant_type
- weight_quant_type = args.wt_quant_type
- print("Using %s as the actiavtion quantize type." % activation_quant_type)
- print("Using %s as the weight quantize type." % weight_quant_type)
-
- startup_prog = fluid.Program()
- train_prog = fluid.Program()
- test_prog = fluid.Program()
-
- _, _, train_py_reader, train_cost, train_acc1, train_acc5, global_lr = build_program(
- is_train=True,
- main_prog=train_prog,
- startup_prog=startup_prog,
- args=args)
- image, out, test_py_reader, test_cost, test_acc1, test_acc5 = build_program(
- is_train=False,
- main_prog=test_prog,
- startup_prog=startup_prog,
- args=args)
- test_prog = test_prog.clone(for_test=True)
-
- place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
- exe = fluid.Executor(place)
- exe.run(startup_prog)
- main_graph = IrGraph(core.Graph(train_prog.desc), for_test=False)
- test_graph = IrGraph(core.Graph(test_prog.desc), for_test=True)
-
- if pretrained_fp32_model:
- def if_exist(var):
- return os.path.exists(os.path.join(pretrained_fp32_model, var.name))
- fluid.io.load_vars(
- exe, pretrained_fp32_model, main_program=train_prog, predicate=if_exist)
-
- if args.use_gpu:
- visible_device = os.getenv('CUDA_VISIBLE_DEVICES')
- if visible_device:
- device_num = len(visible_device.split(','))
- else:
- device_num = subprocess.check_output(
- ['nvidia-smi', '-L']).decode().count('\n')
- else:
- device_num = 1
-
- train_batch_size = args.batch_size / device_num
- test_batch_size = 1 if activation_quant_type == 'abs_max' else 8
- train_reader = paddle.batch(
- reader.train(data_dir=data_dir), batch_size=train_batch_size, drop_last=True)
- test_reader = paddle.batch(reader.val(data_dir=data_dir), batch_size=test_batch_size)
-
- train_py_reader.decorate_paddle_reader(train_reader)
- test_py_reader.decorate_paddle_reader(test_reader)
-
- train_fetch_list = [train_cost.name, train_acc1.name, train_acc5.name, global_lr.name]
- test_fetch_list = [test_cost.name, test_acc1.name, test_acc5.name]
-
- # 1. Make some quantization transforms in the graph before training and testing.
- # According to the weight and activation quantization type, the graph will be added
- # some fake quantize operators and fake dequantize operators.
- transform_pass = QuantizationTransformPass(
- scope=fluid.global_scope(), place=place,
- activation_quantize_type=activation_quant_type,
- weight_quantize_type=weight_quant_type)
- transform_pass.apply(main_graph)
- transform_pass.apply(test_graph)
-
- if checkpoint:
- load_persistable_nodes(exe, checkpoint, main_graph)
-
- build_strategy = fluid.BuildStrategy()
- build_strategy.memory_optimize = False
- build_strategy.enable_inplace = False
- build_strategy.fuse_all_reduce_ops = False
- binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
- loss_name=train_cost.name, build_strategy=build_strategy)
- test_prog = test_graph.to_program()
- params = models.__dict__[args.model]().params
- for pass_id in range(params["num_epochs"]):
-
- train_py_reader.start()
-
- train_info = [[], [], []]
- test_info = [[], [], []]
- train_time = []
- batch_id = 0
- try:
- while True:
- t1 = time.time()
- loss, acc1, acc5, lr = exe.run(binary, fetch_list=train_fetch_list)
- t2 = time.time()
- period = t2 - t1
- loss = np.mean(np.array(loss))
- acc1 = np.mean(np.array(acc1))
- acc5 = np.mean(np.array(acc5))
- train_info[0].append(loss)
- train_info[1].append(acc1)
- train_info[2].append(acc5)
- lr = np.mean(np.array(lr))
- train_time.append(period)
- if batch_id % 10 == 0:
- print("Pass {0}, trainbatch {1}, loss {2}, \
- acc1 {3}, acc5 {4}, lr {5}, time {6}"
- .format(pass_id, batch_id, loss, acc1, acc5, "%.6f" %
- lr, "%2.2f sec" % period))
- sys.stdout.flush()
- batch_id += 1
- except fluid.core.EOFException:
- train_py_reader.reset()
-
- train_loss = np.array(train_info[0]).mean()
- train_acc1 = np.array(train_info[1]).mean()
- train_acc5 = np.array(train_info[2]).mean()
-
- test_py_reader.start()
-
- test_batch_id = 0
- try:
- while True:
- t1 = time.time()
- loss, acc1, acc5 = exe.run(program=test_prog,
- fetch_list=test_fetch_list)
- t2 = time.time()
- period = t2 - t1
- loss = np.mean(loss)
- acc1 = np.mean(acc1)
- acc5 = np.mean(acc5)
- test_info[0].append(loss)
- test_info[1].append(acc1)
- test_info[2].append(acc5)
- if test_batch_id % 10 == 0:
- print("Pass {0},testbatch {1},loss {2}, \
- acc1 {3},acc5 {4},time {5}"
- .format(pass_id, test_batch_id, loss, acc1, acc5,
- "%2.2f sec" % period))
- sys.stdout.flush()
- test_batch_id += 1
- except fluid.core.EOFException:
- test_py_reader.reset()
-
- test_loss = np.array(test_info[0]).mean()
- test_acc1 = np.array(test_info[1]).mean()
- test_acc5 = np.array(test_info[2]).mean()
-
- print("End pass {0}, train_loss {1}, train_acc1 {2}, train_acc5 {3}, "
- "test_loss {4}, test_acc1 {5}, test_acc5 {6}".format(
- pass_id, train_loss, train_acc1, train_acc5, test_loss,
- test_acc1, test_acc5))
- sys.stdout.flush()
-
- save_checkpoint_path = os.path.join(model_save_dir, model_name, str(pass_id))
- if not os.path.isdir(save_checkpoint_path):
- os.makedirs(save_checkpoint_path)
- save_persistable_nodes(exe, save_checkpoint_path, main_graph)
-
- model_path = os.path.join(model_save_dir, model_name, args.act_quant_type)
- float_path = os.path.join(model_path, 'float')
- int8_path = os.path.join(model_path, 'int8')
- mobile_path = os.path.join(model_path, 'mobile')
- if not os.path.isdir(model_path):
- os.makedirs(model_path)
-
- # 2. Freeze the graph after training by adjusting the quantize
- # operators' order for the inference.
- freeze_pass = QuantizationFreezePass(
- scope=fluid.global_scope(),
- place=place,
- weight_quantize_type=weight_quant_type)
- freeze_pass.apply(test_graph)
- server_program = test_graph.to_program()
- fluid.io.save_inference_model(
- dirname=float_path,
- feeded_var_names=[image.name],
- target_vars=[out], executor=exe,
- main_program=server_program)
-
- # 3. Convert the weights into int8_t type.
- # (This step is optional.)
- convert_int8_pass = ConvertToInt8Pass(scope=fluid.global_scope(), place=place)
- convert_int8_pass.apply(test_graph)
- server_int8_program = test_graph.to_program()
- fluid.io.save_inference_model(
- dirname=int8_path,
- feeded_var_names=[image.name],
- target_vars=[out], executor=exe,
- main_program=server_int8_program)
-
- # 4. Convert the freezed graph for paddle-mobile execution.
- # (This step is optional.)
- mobile_pass = TransformForMobilePass()
- mobile_pass.apply(test_graph)
- mobile_program = test_graph.to_program()
- fluid.io.save_inference_model(
- dirname=mobile_path,
- feeded_var_names=[image.name],
- target_vars=[out], executor=exe,
- main_program=mobile_program)
-
-def main():
- args = parser.parse_args()
- print_arguments(args)
- train(args)
-
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/quant_low_level_api/run_post_training_quanzation.sh b/PaddleSlim/quant_low_level_api/run_post_training_quanzation.sh
deleted file mode 100644
index 1d9ea31f93b6034d95580ac398fdc6688858905d..0000000000000000000000000000000000000000
--- a/PaddleSlim/quant_low_level_api/run_post_training_quanzation.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-export CUDA_VISIBLE_DEVICES=0
-
-root_url="https://paddle-inference-dist.bj.bcebos.com/int8"
-mobilenetv1="mobilenetv1_fp32_model"
-samples="samples_100"
-if [ ! -d ${mobilenetv1} ]; then
- wget ${root_url}/${mobilenetv1}.tgz
- tar zxf ${mobilenetv1}.tgz
-fi
-if [ ! -d ${samples} ]; then
- wget ${root_url}/${samples}.tgz
- tar zxf ${samples}.tgz
-fi
-
-python post_training_quantization.py \
- --model_dir=${mobilenetv1} \
- --data_path=${samples} \
- --save_model_path="mobilenetv1_int8_model" \
- --algo="KL" \
- --is_full_quantize=False \
- --batch_size=10 \
- --batch_nums=10 \
- --use_gpu=True \
diff --git a/PaddleSlim/quant_low_level_api/run_quantization_aware_training.sh b/PaddleSlim/quant_low_level_api/run_quantization_aware_training.sh
deleted file mode 100644
index 1996e7adeb5abc332c15ccd81a6755820bc57f72..0000000000000000000000000000000000000000
--- a/PaddleSlim/quant_low_level_api/run_quantization_aware_training.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env bash
-
-# download pretrain model
-root_url="https://paddle-inference-dist.bj.bcebos.com/int8/pretrain"
-MobileNetV1="MobileNetV1_pretrained.zip"
-ResNet50="ResNet50_pretrained.zip"
-GoogleNet="GoogleNet_pretrained.tar"
-data_dir='Your image dataset path, e.g. ILSVRC2012'
-pretrain_dir='../pretrain'
-
-if [ ! -d ${pretrain_dir} ]; then
- mkdir ${pretrain_dir}
-fi
-
-cd ${pretrain_dir}
-
-if [ ! -f ${MobileNetV1} ]; then
- wget ${root_url}/${MobileNetV1}
- unzip ${MobileNetV1}
-fi
-
-if [ ! -f ${ResNet50} ]; then
- wget ${root_url}/${ResNet50}
- unzip ${ResNet50}
-fi
-
-if [ ! -f ${GoogleNet} ]; then
- wget ${root_url}/${GoogleNet}
- tar xf ${GoogleNet}
-fi
-
-cd -
-
-
-export CUDA_VISIBLE_DEVICES=0,1,2,3
-
-#MobileNet v1:
-python quant.py \
- --model=MobileNet \
- --pretrained_fp32_model=${pretrain_dir}/MobileNetV1_pretrained \
- --use_gpu=True \
- --data_dir=${data_dir} \
- --batch_size=256 \
- --total_images=1281167 \
- --class_dim=1000 \
- --image_shape=3,224,224 \
- --model_save_dir=output/ \
- --lr_strategy=piecewise_decay \
- --num_epochs=20 \
- --lr=0.0001 \
- --act_quant_type=abs_max \
- --wt_quant_type=abs_max
-
-
-#ResNet50:
-#python quant.py \
-# --model=ResNet50 \
-# --pretrained_fp32_model=${pretrain_dir}/ResNet50_pretrained \
-# --use_gpu=True \
-# --data_dir=${data_dir} \
-# --batch_size=128 \
-# --total_images=1281167 \
-# --class_dim=1000 \
-# --image_shape=3,224,224 \
-# --model_save_dir=output/ \
-# --lr_strategy=piecewise_decay \
-# --num_epochs=20 \
-# --lr=0.0001 \
-# --act_quant_type=abs_max \
-# --wt_quant_type=abs_max
-
diff --git a/PaddleSlim/reader.py b/PaddleSlim/reader.py
deleted file mode 100644
index e7dc21b7024458d0bdbe5a5f58cece3a148055f5..0000000000000000000000000000000000000000
--- a/PaddleSlim/reader.py
+++ /dev/null
@@ -1,191 +0,0 @@
-import os
-import math
-import random
-import functools
-import numpy as np
-import paddle
-from PIL import Image, ImageEnhance
-
-random.seed(0)
-np.random.seed(0)
-
-DATA_DIM = 224
-
-THREAD = 16
-BUF_SIZE = 10240
-
-DATA_DIR = 'data/ILSVRC2012'
-
-img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
-img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
-
-
-def resize_short(img, target_size):
- percent = float(target_size) / min(img.size[0], img.size[1])
- resized_width = int(round(img.size[0] * percent))
- resized_height = int(round(img.size[1] * percent))
- img = img.resize((resized_width, resized_height), Image.LANCZOS)
- return img
-
-
-def crop_image(img, target_size, center):
- width, height = img.size
- size = target_size
- if center == True:
- w_start = (width - size) / 2
- h_start = (height - size) / 2
- else:
- w_start = np.random.randint(0, width - size + 1)
- h_start = np.random.randint(0, height - size + 1)
- w_end = w_start + size
- h_end = h_start + size
- img = img.crop((w_start, h_start, w_end, h_end))
- return img
-
-
-def random_crop(img, size, scale=[0.08, 1.0], ratio=[3. / 4., 4. / 3.]):
- aspect_ratio = math.sqrt(np.random.uniform(*ratio))
- w = 1. * aspect_ratio
- h = 1. / aspect_ratio
-
- bound = min((float(img.size[0]) / img.size[1]) / (w**2),
- (float(img.size[1]) / img.size[0]) / (h**2))
- scale_max = min(scale[1], bound)
- scale_min = min(scale[0], bound)
-
- target_area = img.size[0] * img.size[1] * np.random.uniform(scale_min,
- scale_max)
- target_size = math.sqrt(target_area)
- w = int(target_size * w)
- h = int(target_size * h)
-
- i = np.random.randint(0, img.size[0] - w + 1)
- j = np.random.randint(0, img.size[1] - h + 1)
-
- img = img.crop((i, j, i + w, j + h))
- img = img.resize((size, size), Image.LANCZOS)
- return img
-
-
-def rotate_image(img):
- angle = np.random.randint(-10, 11)
- img = img.rotate(angle)
- return img
-
-
-def distort_color(img):
- def random_brightness(img, lower=0.5, upper=1.5):
- e = np.random.uniform(lower, upper)
- return ImageEnhance.Brightness(img).enhance(e)
-
- def random_contrast(img, lower=0.5, upper=1.5):
- e = np.random.uniform(lower, upper)
- return ImageEnhance.Contrast(img).enhance(e)
-
- def random_color(img, lower=0.5, upper=1.5):
- e = np.random.uniform(lower, upper)
- return ImageEnhance.Color(img).enhance(e)
-
- ops = [random_brightness, random_contrast, random_color]
- np.random.shuffle(ops)
-
- img = ops[0](img)
- img = ops[1](img)
- img = ops[2](img)
-
- return img
-
-
-def process_image(sample, mode, color_jitter, rotate):
- img_path = sample[0]
-
- img = Image.open(img_path)
- if mode == 'train':
- if rotate: img = rotate_image(img)
- img = random_crop(img, DATA_DIM)
- else:
- img = resize_short(img, target_size=256)
- img = crop_image(img, target_size=DATA_DIM, center=True)
- if mode == 'train':
- if color_jitter:
- img = distort_color(img)
- if np.random.randint(0, 2) == 1:
- img = img.transpose(Image.FLIP_LEFT_RIGHT)
-
- if img.mode != 'RGB':
- img = img.convert('RGB')
-
- img = np.array(img).astype('float32').transpose((2, 0, 1)) / 255
- img -= img_mean
- img /= img_std
-
- if mode == 'train' or mode == 'val':
- return img, sample[1]
- elif mode == 'test':
- return [img]
-
-
-def _reader_creator(file_list,
- mode,
- shuffle=False,
- color_jitter=False,
- rotate=False,
- data_dir=DATA_DIR,
- batch_size=1):
- def reader():
- try:
- with open(file_list) as flist:
- full_lines = [line.strip() for line in flist]
- if shuffle:
- np.random.shuffle(full_lines)
- if mode == 'train' and os.getenv('PADDLE_TRAINING_ROLE'):
- # distributed mode if the env var `PADDLE_TRAINING_ROLE` exits
- trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
- trainer_count = int(os.getenv("PADDLE_TRAINERS", "1"))
- per_node_lines = len(full_lines) // trainer_count
- lines = full_lines[trainer_id * per_node_lines:(
- trainer_id + 1) * per_node_lines]
- print(
- "read images from %d, length: %d, lines length: %d, total: %d"
- % (trainer_id * per_node_lines, per_node_lines,
- len(lines), len(full_lines)))
- else:
- lines = full_lines
-
- for line in lines:
- if mode == 'train' or mode == 'val':
- img_path, label = line.split()
- img_path = os.path.join(data_dir, img_path)
- yield img_path, int(label)
- elif mode == 'test':
- img_path = os.path.join(data_dir, line)
- yield [img_path]
- except Exception as e:
- print("Reader failed!\n{}".format(str(e)))
- os._exit(1)
-
- mapper = functools.partial(
- process_image, mode=mode, color_jitter=color_jitter, rotate=rotate)
-
- return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE)
-
-
-def train(data_dir=DATA_DIR):
- file_list = os.path.join(data_dir, 'train_list.txt')
- return _reader_creator(
- file_list,
- 'train',
- shuffle=True,
- color_jitter=False,
- rotate=False,
- data_dir=data_dir)
-
-
-def val(data_dir=DATA_DIR):
- file_list = os.path.join(data_dir, 'val_list.txt')
- return _reader_creator(file_list, 'val', shuffle=False, data_dir=data_dir)
-
-
-def test(data_dir=DATA_DIR):
- file_list = os.path.join(data_dir, 'val_list.txt')
- return _reader_creator(file_list, 'test', shuffle=False, data_dir=data_dir)
diff --git a/PaddleSlim/run.sh b/PaddleSlim/run.sh
deleted file mode 100644
index 89bffa4d0ec26cecd67c04ccf92b01683d4630ad..0000000000000000000000000000000000000000
--- a/PaddleSlim/run.sh
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env bash
-
-# download pretrain model
-root_url="http://paddle-imagenet-models-name.bj.bcebos.com"
-MobileNetV1="MobileNetV1_pretrained.tar"
-ResNet50="ResNet50_pretrained.tar"
-pretrain_dir='./pretrain'
-
-if [ ! -d ${pretrain_dir} ]; then
- mkdir ${pretrain_dir}
-fi
-
-cd ${pretrain_dir}
-
-if [ ! -f ${MobileNetV1} ]; then
- wget ${root_url}/${MobileNetV1}
- tar xf ${MobileNetV1}
-fi
-
-if [ ! -f ${ResNet50} ]; then
- wget ${root_url}/${ResNet50}
- tar xf ${ResNet50}
-fi
-
-cd -
-
-# enable GC strategy
-export FLAGS_fast_eager_deletion_mode=1
-export FLAGS_eager_delete_tensor_gb=0.0
-
-# for distillation
-#-----------------
-export CUDA_VISIBLE_DEVICES=0,1,2,3
-
-
-# Fixing name conflicts in distillation
-cd ${pretrain_dir}/ResNet50_pretrained
-mv conv1_weights res_conv1_weights
-mv fc_0.w_0 res_fc.w_0
-mv fc_0.b_0 res_fc.b_0
-cd -
-python compress.py \
---model "MobileNet" \
---teacher_model "ResNet50" \
---teacher_pretrained_model ./pretrain/ResNet50_pretrained \
---compress_config ./configs/mobilenetv1_resnet50_distillation.yaml
-
-cd ${pretrain_dir}/ResNet50_pretrained
-mv res_conv1_weights conv1_weights
-mv res_fc.w_0 fc_0.w_0
-mv res_fc.b_0 fc_0.b_0
-cd -
-
-# for sensitivity filter pruning
-#-------------------------------
-#export CUDA_VISIBLE_DEVICES=0
-#python compress.py \
-#--model "MobileNet" \
-#--pretrained_model ./pretrain/MobileNetV1_pretrained \
-#--compress_config ./configs/filter_pruning_sen.yaml
-
-# for uniform filter pruning
-#---------------------------
-#export CUDA_VISIBLE_DEVICES=0
-#python compress.py \
-#--model "MobileNet" \
-#--pretrained_model ./pretrain/MobileNetV1_pretrained \
-#--compress_config ./configs/filter_pruning_uniform.yaml
-
-# for auto filter pruning
-#---------------------------
-#export CUDA_VISIBLE_DEVICES=0
-#python compress.py \
-#--model "MobileNet" \
-#--pretrained_model ./pretrain/MobileNetV1_pretrained \
-#--compress_config ./configs/auto_prune.yaml
-
-# for quantization
-#-----------------
-#export CUDA_VISIBLE_DEVICES=0
-#python compress.py \
-#--batch_size 64 \
-#--model "MobileNet" \
-#--pretrained_model ./pretrain/MobileNetV1_pretrained \
-#--compress_config ./configs/quantization.yaml \
-#--quant_only True
-
-# for distillation with quantization
-#-----------------------------------
-#export CUDA_VISIBLE_DEVICES=4,5,6,7
-#
-## Fixing name conflicts in distillation
-#cd ${pretrain_dir}/ResNet50_pretrained
-#mv conv1_weights res_conv1_weights
-#mv fc_0.w_0 res_fc.w_0
-#mv fc_0.b_0 res_fc.b_0
-#cd -
-#
-#python compress.py \
-#--model "MobileNet" \
-#--teacher_model "ResNet50" \
-#--teacher_pretrained_model ./pretrain/ResNet50_pretrained \
-#--compress_config ./configs/quantization_dist.yaml
-#
-#cd ${pretrain_dir}/ResNet50_pretrained
-#mv res_conv1_weights conv1_weights
-#mv res_fc.w_0 fc_0.w_0
-#mv res_fc.b_0 fc_0.b_0
-#cd -
-
-# for uniform filter pruning with quantization
-#---------------------------------------------
-#export CUDA_VISIBLE_DEVICES=0
-#python compress.py \
-#--model "MobileNet" \
-#--pretrained_model ./pretrain/MobileNetV1_pretrained \
-#--compress_config ./configs/quantization_pruning.yaml
-
diff --git a/PaddleSlim/ssd/README.md b/PaddleSlim/ssd/README.md
deleted file mode 100644
index 797184addf03848c8446ba20c935190156da4190..0000000000000000000000000000000000000000
--- a/PaddleSlim/ssd/README.md
+++ /dev/null
@@ -1,129 +0,0 @@
-本示例压缩目标为[MobileNetV1-SSD](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/ssd). 主要裁剪主干网络的卷积通道数。
-
-## 第一步:观察网络结构
-
-该模型的主干网络为MobileNetV1, 主要包含两种卷积:depthwise convolution和普通1X1卷积, 考虑到depthwise convolution的特殊性,我们只对普通1X1卷积做裁剪。
-
-首先,我们需要知道主干网络中普通1X1卷积的参数(filter weights)的名称,在当前实现中,网络结构定义在`fluid.default_main_program`中,可以通过以下方式打印出网络中所有参数的名称和形状:
-
-```
-for param in fluid.default_main_program().global_block().all_parameters():
- print("{}: {}".format(param.name, param.shape))
-```
-
-上述代码会按网络定义顺序,依次打印相应的参数名称,如下所示:
-
-```
-conv2d_0.w_0 (32L, 3L, 3L, 3L)
-depthwise_conv2d_0.w_0 (32L, 1L, 3L, 3L)
-conv2d_1.w_0 (64L, 32L, 1L, 1L)
-depthwise_conv2d_1.w_0 (64L, 1L, 3L, 3L)
-conv2d_2.w_0 (128L, 64L, 1L, 1L)
-depthwise_conv2d_2.w_0 (128L, 1L, 3L, 3L)
-conv2d_3.w_0 (128L, 128L, 1L, 1L)
-depthwise_conv2d_3.w_0 (128L, 1L, 3L, 3L)
-conv2d_4.w_0 (256L, 128L, 1L, 1L)
-depthwise_conv2d_4.w_0 (256L, 1L, 3L, 3L)
-conv2d_5.w_0 (256L, 256L, 1L, 1L)
-depthwise_conv2d_5.w_0 (256L, 1L, 3L, 3L)
-conv2d_6.w_0 (512L, 256L, 1L, 1L)
-depthwise_conv2d_6.w_0 (512L, 1L, 3L, 3L)
-conv2d_7.w_0 (512L, 512L, 1L, 1L)
-depthwise_conv2d_7.w_0 (512L, 1L, 3L, 3L)
-conv2d_8.w_0 (512L, 512L, 1L, 1L)
-depthwise_conv2d_8.w_0 (512L, 1L, 3L, 3L)
-conv2d_9.w_0 (512L, 512L, 1L, 1L)
-depthwise_conv2d_9.w_0 (512L, 1L, 3L, 3L)
-conv2d_10.w_0 (512L, 512L, 1L, 1L)
-depthwise_conv2d_10.w_0 (512L, 1L, 3L, 3L)
-conv2d_11.w_0 (512L, 512L, 1L, 1L)
-depthwise_conv2d_11.w_0 (512L, 1L, 3L, 3L)
-conv2d_12.w_0 (1024L, 512L, 1L, 1L)
-depthwise_conv2d_12.w_0 (1024L, 1L, 3L, 3L)
-
-```
-
-观察可知,普通1X1卷积名称为`conv2d_1.w_0`~`conv2d_12.w_0`, 用正则表达式可表示为:
-
-```
-"conv2d_[1-9].w_0|conv2d_1[0-2].w_0"
-```
-
-## 第二步:编写配置文件
-
-我们以uniform pruning为例, 需要重点注意以下配置:
-
-- target_ratio:指将被剪裁掉的flops的比例, 该选项的设置还要考虑主干网络参数量占全网络比例,如果该选项设置的太大,某些卷积层的channel会被全部裁剪掉,为了避免该问题,建议多先尝试设置不同的值,观察卷积层被裁剪的情况,然后再设置合适的值。当前示例会以0.2为例。
-
-- pruned_params:将被裁剪的参数的名称,支持正则表达式,注意设置的正则表达式一定不要匹配到不想被裁剪到的参数名。最安全的做法是设置为`param_1_name|param_2_name|param_3_name`类似的格式,这样可以严格匹配指定的参数名。根据第一步,当前示例设置为`conv2d_[1-9].w_0|conv2d_1[0-2].w_0`
-
-
-## 第三步:编写压缩脚本
-
-当前示例的压缩脚本是在脚本[ssd/train.py](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/ssd/train.py)基础上修改的。
-
-需要注意一下几点:
-
-### fluid.metrics.DetectionMAP
-
- PaddleSlim暂时不支持fluid.metrics和fluid.evaluator, 所以这里将metrics.DetectionMAP改写为:
-
-```
-gt_label = fluid.layers.cast(x=gt_label, dtype=gt_box.dtype)
-if difficult:
- difficult = fluid.layers.cast(x=difficult, dtype=gt_box.dtype)
- gt_label = fluid.layers.reshape(gt_label, [-1, 1])
- difficult = fluid.layers.reshape(difficult, [-1, 1])
- label = fluid.layers.concat([gt_label, difficult, gt_box], axis=1)
-else:
- label = fluid.layers.concat([gt_label, gt_box], axis=1)
-
-map_var = fluid.layers.detection.detection_map(
- nmsed_out,
- label,
- class_num,
- background_label=0,
- overlap_threshold=0.5,
- evaluate_difficult=False,
- ap_version=ap_version)
-```
-
-### data reader
-
-注意在构造Compressor时,train_reader和eval_reader给的都是py_reader.
-因为用了py_reader所以不需要再给feed_list.
-
-```
- compressor = Compressor(
- place,
- fluid.global_scope(),
- train_prog,
- train_reader=train_py_reader, # noteeeeeeeeeeeee
- train_feed_list=None, # noteeeeeeeeeeeee
- train_fetch_list=train_fetch_list,
- eval_program=test_prog,
- eval_reader=test_py_reader, # noteeeeeeeeeeeee
- eval_feed_list=None, # noteeeeeeeeeeeee
- eval_fetch_list=val_fetch_list,
- train_optimizer=None)
-```
-
-## 第四步:保存剪裁模型
-
-以下代码为保存剪枝后的模型:
-
-```
-com_pass = Compressor(...)
-com_pass.config(args.compress_config)
-com_pass.run()
-
-pruned_prog = com_pass.eval_graph.program
-
-fluid.io.save_inference_model("./pruned_model/", [image.name, label.name], [acc_top1], exe, main_program=pruned_prog)
-
-# check the shape of parameters
-for param in pruned_prog.global_block().all_parameters():
- print("name: {}; shape: {}".format(param.name, param.shape))
-```
-
-关于save_inference_model api请参考:https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/io_cn.html#save-inference-model
diff --git a/PaddleSlim/ssd/compress.yaml b/PaddleSlim/ssd/compress.yaml
deleted file mode 100644
index 3888a135e6e786ea97127c1b6f33e14f76bb2fa3..0000000000000000000000000000000000000000
--- a/PaddleSlim/ssd/compress.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-version: 1.0
-pruners:
- pruner_1:
- class: 'StructurePruner'
- pruning_axis:
- '*': 0
- criterions:
- '*': 'l1_norm'
-strategies:
- uniform_pruning_strategy:
- class: 'UniformPruneStrategy'
- pruner: 'pruner_1'
- start_epoch: 0
- target_ratio: 0.2
- pruned_params: 'conv2d_[1-9].w_0|conv2d_1[0-2].w_0'
-compressor:
- epoch: 200
- #init_model: './checkpoints/0' # Please enable this option for loading checkpoint.
- checkpoint_path: './checkpoints/'
- strategies:
- - uniform_pruning_strategy
diff --git a/PaddleSlim/ssd/image_util.py b/PaddleSlim/ssd/image_util.py
deleted file mode 120000
index f543a7108ccd46b2833d99d6c79c4a1229fe268c..0000000000000000000000000000000000000000
--- a/PaddleSlim/ssd/image_util.py
+++ /dev/null
@@ -1 +0,0 @@
-../../PaddleCV/ssd/image_util.py
\ No newline at end of file
diff --git a/PaddleSlim/ssd/mobilenet_ssd.py b/PaddleSlim/ssd/mobilenet_ssd.py
deleted file mode 120000
index 4091d19215f5268cb6f4b212f997cb169738d4a9..0000000000000000000000000000000000000000
--- a/PaddleSlim/ssd/mobilenet_ssd.py
+++ /dev/null
@@ -1 +0,0 @@
-../../PaddleCV/ssd/mobilenet_ssd.py
\ No newline at end of file
diff --git a/PaddleSlim/ssd/reader.py b/PaddleSlim/ssd/reader.py
deleted file mode 120000
index 7e98191f89e6d60261be62a670eab2ac420406ab..0000000000000000000000000000000000000000
--- a/PaddleSlim/ssd/reader.py
+++ /dev/null
@@ -1 +0,0 @@
-../../PaddleCV/ssd/reader.py
\ No newline at end of file
diff --git a/PaddleSlim/ssd/run.sh b/PaddleSlim/ssd/run.sh
deleted file mode 100644
index 8a35453d1076870f1707b3fa2af81524137b6707..0000000000000000000000000000000000000000
--- a/PaddleSlim/ssd/run.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-export CUDA_VISIBLE_DEVICES=2
-python train.py \
---dataset="coco2014" \
---data_dir="./data/coco" \
- > ./run.log 2>&1 &
-
-
-tailf run.log
diff --git a/PaddleSlim/ssd/train.py b/PaddleSlim/ssd/train.py
deleted file mode 100644
index 96810ae04bb08bb63da6668434121aeaf412c116..0000000000000000000000000000000000000000
--- a/PaddleSlim/ssd/train.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import os
-import time
-import numpy as np
-import argparse
-import functools
-import shutil
-import math
-import multiprocessing
-from paddle.fluid.contrib.slim import Compressor
-
-
-def set_paddle_flags(**kwargs):
- for key, value in kwargs.items():
- if os.environ.get(key, None) is None:
- os.environ[key] = str(value)
-
-
-# NOTE(paddle-dev): All of these flags should be
-# set before `import paddle`. Otherwise, it would
-# not take any effect.
-set_paddle_flags(
- FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
-)
-
-import paddle
-import paddle.fluid as fluid
-import reader
-from mobilenet_ssd import build_mobilenet_ssd
-from utility import add_arguments, print_arguments, check_cuda
-
-parser = argparse.ArgumentParser(description=__doc__)
-add_arg = functools.partial(add_arguments, argparser=parser)
-# yapf: disable
-add_arg('learning_rate', float, 0.001, "Learning rate.")
-add_arg('batch_size', int, 64, "Minibatch size of all devices.")
-add_arg('epoc_num', int, 120, "Epoch number.")
-add_arg('use_gpu', bool, True, "Whether use GPU.")
-add_arg('parallel', bool, True, "Whether train in parallel on multi-devices.")
-add_arg('dataset', str, 'pascalvoc', "dataset can be coco2014, coco2017, and pascalvoc.")
-add_arg('model_save_dir', str, 'model', "The path to save model.")
-add_arg('pretrained_model', str, 'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
-add_arg('ap_version', str, '11point', "mAP version can be integral or 11point.")
-add_arg('image_shape', str, '3,300,300', "Input image shape.")
-add_arg('mean_BGR', str, '127.5,127.5,127.5', "Mean value for B,G,R channel which will be subtracted.")
-add_arg('data_dir', str, 'data/pascalvoc', "Data directory.")
-add_arg('use_multiprocess', bool, True, "Whether use multi-process for data preprocessing.")
-add_arg('enable_ce', bool, False, "Whether use CE to evaluate the model.")
-#yapf: enable
-
-train_parameters = {
- "pascalvoc": {
- "train_images": 16551,
- "image_shape": [3, 300, 300],
- "class_num": 21,
- "batch_size": 64,
- "lr": 0.001,
- "lr_epochs": [40, 60, 80, 100],
- "lr_decay": [1, 0.5, 0.25, 0.1, 0.01],
- "ap_version": '11point',
- },
- "coco2014": {
- "train_images": 82783,
- "image_shape": [3, 300, 300],
- "class_num": 91,
- "batch_size": 64,
- "lr": 0.001,
- "lr_epochs": [12, 19],
- "lr_decay": [1, 0.5, 0.25],
- "ap_version": 'integral', # should use eval_coco_map.py to test model
- },
- "coco2017": {
- "train_images": 118287,
- "image_shape": [3, 300, 300],
- "class_num": 91,
- "batch_size": 64,
- "lr": 0.001,
- "lr_epochs": [12, 19],
- "lr_decay": [1, 0.5, 0.25],
- "ap_version": 'integral', # should use eval_coco_map.py to test model
- }
-}
-
-def optimizer_setting(train_params):
- batch_size = train_params["batch_size"]
- iters = train_params["train_images"] // batch_size
- lr = train_params["lr"]
- boundaries = [i * iters for i in train_params["lr_epochs"]]
- values = [ i * lr for i in train_params["lr_decay"]]
-
- optimizer = fluid.optimizer.RMSProp(
- learning_rate=fluid.layers.piecewise_decay(boundaries, values),
- regularization=fluid.regularizer.L2Decay(0.00005), )
-
- return optimizer
-
-
-def build_program(main_prog, startup_prog, train_params, is_train):
- image_shape = train_params['image_shape']
- class_num = train_params['class_num']
- ap_version = train_params['ap_version']
- outs = []
- with fluid.program_guard(main_prog, startup_prog):
- py_reader = fluid.layers.py_reader(
- capacity=64,
- shapes=[[-1] + image_shape, [-1, 4], [-1, 1], [-1, 1]],
- lod_levels=[0, 1, 1, 1],
- dtypes=["float32", "float32", "int32", "int32"],
- use_double_buffer=True)
- with fluid.unique_name.guard():
- image, gt_box, gt_label, difficult = fluid.layers.read_file(py_reader)
- locs, confs, box, box_var = build_mobilenet_ssd(image, class_num, image_shape)
- if is_train:
- with fluid.unique_name.guard("train"):
- loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
- box_var)
- loss = fluid.layers.reduce_sum(loss)
- optimizer = optimizer_setting(train_params)
- optimizer.minimize(loss)
- outs = [py_reader, loss]
- else:
- with fluid.unique_name.guard("inference"):
- nmsed_out = fluid.layers.detection_output(
- locs, confs, box, box_var, nms_threshold=0.45)
-
- gt_label = fluid.layers.cast(x=gt_label, dtype=gt_box.dtype)
- if difficult:
- difficult = fluid.layers.cast(x=difficult, dtype=gt_box.dtype)
- gt_label = fluid.layers.reshape(gt_label, [-1, 1])
- difficult = fluid.layers.reshape(difficult, [-1, 1])
- label = fluid.layers.concat([gt_label, difficult, gt_box], axis=1)
- else:
- label = fluid.layers.concat([gt_label, gt_box], axis=1)
-
- map_var = fluid.layers.detection.detection_map(
- nmsed_out,
- label,
- class_num,
- background_label=0,
- overlap_threshold=0.5,
- evaluate_difficult=False,
- ap_version=ap_version)
-
- # nmsed_out and image is used to save mode for inference
- outs = [py_reader, map_var, nmsed_out, image]
- return outs
-
-
-def train(args,
- data_args,
- train_params,
- train_file_list,
- val_file_list):
-
- model_save_dir = args.model_save_dir
- pretrained_model = args.pretrained_model
- use_gpu = args.use_gpu
- parallel = args.parallel
- enable_ce = args.enable_ce
- is_shuffle = True
-
- if not use_gpu:
- devices_num = int(os.environ.get('CPU_NUM',
- multiprocessing.cpu_count()))
- else:
- devices_num = fluid.core.get_cuda_device_count()
-
- batch_size = train_params['batch_size']
- epoc_num = train_params['epoc_num']
- batch_size_per_device = batch_size // devices_num
- num_workers = 8
-
- startup_prog = fluid.Program()
- train_prog = fluid.Program()
- test_prog = fluid.Program()
-
-
- train_py_reader, loss = build_program(
- main_prog=train_prog,
- startup_prog=startup_prog,
- train_params=train_params,
- is_train=True)
- test_py_reader, map_var, _, _ = build_program(
- main_prog=test_prog,
- startup_prog=startup_prog,
- train_params=train_params,
- is_train=False)
-
- test_prog = test_prog.clone(for_test=True)
-
- for param in train_prog.global_block().all_parameters():
- if 'conv' in param.name:
- print param.name, param.shape
- place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
- exe = fluid.Executor(place)
- exe.run(startup_prog)
-
- if pretrained_model:
- def if_exist(var):
- return os.path.exists(os.path.join(pretrained_model, var.name))
- fluid.io.load_vars(exe, pretrained_model, main_program=train_prog,
- predicate=if_exist)
-
-
- test_reader = reader.test(data_args, val_file_list, batch_size)
- test_py_reader.decorate_paddle_reader(test_reader)
- train_reader = reader.train(data_args,
- train_file_list,
- batch_size_per_device,
- shuffle=is_shuffle,
- use_multiprocess=args.use_multiprocess,
- num_workers=num_workers,
- enable_ce=enable_ce)
- train_py_reader.decorate_paddle_reader(train_reader)
-
- train_fetch_list=[("loss", loss.name)]
- val_fetch_list=[("map", map_var.name)]
- compressor = Compressor(
- place,
- fluid.global_scope(),
- train_prog,
- train_reader=train_py_reader,
- train_feed_list=None,
- train_fetch_list=train_fetch_list,
- eval_program=test_prog,
- eval_reader=test_py_reader,
- eval_feed_list=None,
- eval_fetch_list=val_fetch_list,
- train_optimizer=None)
- compressor.config('./compress.yaml')
- compressor.run()
-
-
-def main():
- args = parser.parse_args()
- print_arguments(args)
-
- check_cuda(args.use_gpu)
-
- data_dir = args.data_dir
- dataset = args.dataset
- assert dataset in ['pascalvoc', 'coco2014', 'coco2017']
-
- # for pascalvoc
- label_file = 'label_list'
- train_file_list = 'trainval.txt'
- val_file_list = 'test.txt'
-
- if dataset == 'coco2014':
- train_file_list = 'annotations/instances_train2014.json'
- val_file_list = 'annotations/instances_val2014.json'
- elif dataset == 'coco2017':
- train_file_list = 'annotations/instances_train2017.json'
- val_file_list = 'annotations/instances_val2017.json'
-
- mean_BGR = [float(m) for m in args.mean_BGR.split(",")]
- image_shape = [int(m) for m in args.image_shape.split(",")]
- train_parameters[dataset]['image_shape'] = image_shape
- train_parameters[dataset]['batch_size'] = args.batch_size
- train_parameters[dataset]['lr'] = args.learning_rate
- train_parameters[dataset]['epoc_num'] = args.epoc_num
- train_parameters[dataset]['ap_version'] = args.ap_version
-
- data_args = reader.Settings(
- dataset=args.dataset,
- data_dir=data_dir,
- label_file=label_file,
- resize_h=image_shape[1],
- resize_w=image_shape[2],
- mean_value=mean_BGR,
- apply_distort=True,
- apply_expand=True,
- ap_version = args.ap_version)
- train(args,
- data_args,
- train_parameters[dataset],
- train_file_list=train_file_list,
- val_file_list=val_file_list)
-
-
-if __name__ == '__main__':
- main()
diff --git a/PaddleSlim/ssd/utility.py b/PaddleSlim/ssd/utility.py
deleted file mode 120000
index 1b55d37db5b18bab1a9e1bcc335ef3403eb1ad9d..0000000000000000000000000000000000000000
--- a/PaddleSlim/ssd/utility.py
+++ /dev/null
@@ -1 +0,0 @@
-../../PaddleCV/ssd/utility.py
\ No newline at end of file
diff --git a/PaddleSlim/utility.py b/PaddleSlim/utility.py
deleted file mode 100644
index 90a5ffe75fa1bbb752c0bcfc25ea08e9f966f3be..0000000000000000000000000000000000000000
--- a/PaddleSlim/utility.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""Contains common utility functions."""
-# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
-#
-#Licensed under the Apache License, Version 2.0 (the "License");
-#you may not use this file except in compliance with the License.
-#You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-#Unless required by applicable law or agreed to in writing, software
-#distributed under the License is distributed on an "AS IS" BASIS,
-#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#See the License for the specific language governing permissions and
-#limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import distutils.util
-import os
-import numpy as np
-import six
-import logging
-import paddle.fluid as fluid
-import paddle.compat as cpt
-from paddle.fluid import core
-from paddle.fluid.framework import Program
-
-logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
-_logger = logging.getLogger(__name__)
-_logger.setLevel(logging.INFO)
-
-def print_arguments(args):
- """Print argparse's arguments.
-
- Usage:
-
- .. code-block:: python
-
- parser = argparse.ArgumentParser()
- parser.add_argument("name", default="Jonh", type=str, help="User name.")
- args = parser.parse_args()
- print_arguments(args)
-
- :param args: Input argparse.Namespace for printing.
- :type args: argparse.Namespace
- """
- print("----------- Configuration Arguments -----------")
- for arg, value in sorted(six.iteritems(vars(args))):
- print("%s: %s" % (arg, value))
- print("------------------------------------------------")
-
-
-def add_arguments(argname, type, default, help, argparser, **kwargs):
- """Add argparse's argument.
-
- Usage:
-
- .. code-block:: python
-
- parser = argparse.ArgumentParser()
- add_argument("name", str, "Jonh", "User name.", parser)
- args = parser.parse_args()
- """
- type = distutils.util.strtobool if type == bool else type
- argparser.add_argument(
- "--" + argname,
- default=default,
- type=type,
- help=help + ' Default: %(default)s.',
- **kwargs)
-
-
-def save_persistable_nodes(executor, dirname, graph):
- """
- Save persistable nodes to the given directory by the executor.
-
- Args:
- executor(Executor): The executor to run for saving node values.
- dirname(str): The directory path.
- graph(IrGraph): All the required persistable nodes in the graph will be saved.
- """
- persistable_node_names = set()
- persistable_nodes = []
- all_persistable_nodes = graph.all_persistable_nodes()
- for node in all_persistable_nodes:
- name = cpt.to_text(node.name())
- if name not in persistable_node_names:
- persistable_node_names.add(name)
- persistable_nodes.append(node)
- program = Program()
- var_list = []
- for node in persistable_nodes:
- var_desc = node.var()
- if var_desc.type() == core.VarDesc.VarType.RAW or \
- var_desc.type() == core.VarDesc.VarType.READER:
- continue
- var = program.global_block().create_var(
- name=var_desc.name(),
- shape=var_desc.shape(),
- dtype=var_desc.dtype(),
- type=var_desc.type(),
- lod_level=var_desc.lod_level(),
- persistable=var_desc.persistable())
- var_list.append(var)
- fluid.io.save_vars(executor=executor, dirname=dirname, vars=var_list)
-
-
-def load_persistable_nodes(executor, dirname, graph):
- """
- Load persistable node values from the given directory by the executor.
-
- Args:
- executor(Executor): The executor to run for loading node values.
- dirname(str): The directory path.
- graph(IrGraph): All the required persistable nodes in the graph will be loaded.
- """
- persistable_node_names = set()
- persistable_nodes = []
- all_persistable_nodes = graph.all_persistable_nodes()
- for node in all_persistable_nodes:
- name = cpt.to_text(node.name())
- if name not in persistable_node_names:
- persistable_node_names.add(name)
- persistable_nodes.append(node)
- program = Program()
- var_list = []
-
- def _exist(var):
- return os.path.exists(os.path.join(dirname, var.name))
-
- def _load_var(name, scope):
- return np.array(scope.find_var(name).get_tensor())
-
- def _store_var(name, array, scope, place):
- tensor = scope.find_var(name).get_tensor()
- tensor.set(array, place)
-
- for node in persistable_nodes:
- var_desc = node.var()
- if var_desc.type() == core.VarDesc.VarType.RAW or \
- var_desc.type() == core.VarDesc.VarType.READER:
- continue
- var = program.global_block().create_var(
- name=var_desc.name(),
- shape=var_desc.shape(),
- dtype=var_desc.dtype(),
- type=var_desc.type(),
- lod_level=var_desc.lod_level(),
- persistable=var_desc.persistable())
- if _exist(var):
- var_list.append(var)
- else:
- _logger.info("Cannot find the var %s!!!" %(node.name()))
- fluid.io.load_vars(executor=executor, dirname=dirname, vars=var_list)
diff --git a/README.md b/README.md
index 88cc0af6dc4134a4456c8861614e053eff3d7c09..0f87eeda7621d160270cb1bee67281e0d30acf6c 100644
--- a/README.md
+++ b/README.md
@@ -28,34 +28,34 @@ PaddlePaddle 提供了丰富的计算单元,使得用户可以采用模块化
### 图像分类
-[图像分类](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) 是根据图像的语义信息对不同类别图像进行区分,是计算机视觉中重要的基础问题,是物体检测、图像分割、物体跟踪、行为分析、人脸识别等其他高层视觉任务的基础,在许多领域都有着广泛的应用。如:安防领域的人脸识别和智能视频分析等,交通领域的交通场景识别,互联网领域基于内容的图像检索和相册自动归类,医学领域的图像识别等。
+[图像分类](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) 是根据图像的语义信息对不同类别图像进行区分,是计算机视觉中重要的基础问题,是物体检测、图像分割、物体跟踪、行为分析、人脸识别等其他高层视觉任务的基础,在许多领域都有着广泛的应用。如:安防领域的人脸识别和智能视频分析等,交通领域的交通场景识别,互联网领域基于内容的图像检索和相册自动归类,医学领域的图像识别等。
| **模型名称** | **模型简介** | **数据集** | **评估指标 top-1/top-5 accuracy** |
| - | - | - | - |
-| [AlexNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 首次在 CNN 中成功的应用了 ReLU, Dropout 和 LRN,并使用 GPU 进行运算加速 | ImageNet-2012验证集 | 56.72%/79.17% |
-| [VGG19](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 在 AlexNet 的基础上使用 3*3 小卷积核,增加网络深度,具有很好的泛化能力 | ImageNet-2012验证集 | 72.56%/90.93% |
-| [GoogLeNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 在不增加计算负载的前提下增加了网络的深度和宽度,性能更加优越 | ImageNet-2012验证集 | 70.70%/89.66% |
-| [ResNet50](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | Residual Network,引入了新的残差结构,解决了随着网络加深,准确率下降的问题 | ImageNet-2012验证集 | 76.50%/93.00% |
-| [ResNet200_vd](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 融合多种对 ResNet 改进策略,ResNet200_vd 的 top1 准确率达到 80.93% | ImageNet-2012验证集 | 80.93%/95.33% |
-| [Inceptionv4](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 将 Inception 模块与 Residual Connection 进行结合,通过ResNet的结构极大地加速训练并获得性能的提升 | ImageNet-2012验证集 | 80.77%/95.26% |
-| [MobileNetV1](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 将传统的卷积结构改造成两层卷积结构的网络,在基本不影响准确率的前提下大大减少计算时间,更适合移动端和嵌入式视觉应用 | ImageNet-2012验证集 | 70.99%/89.68% |
-| [MobileNetV2](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | MobileNet结构的微调,直接在 thinner 的 bottleneck层上进行 skip learning 连接以及对 bottleneck layer 不进行 ReLu 非线性处理可取得更好的结果 | ImageNet-2012验证集 | 72.15%/90.65% |
-| [SENet154_vd](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 在ResNeXt 基础、上加入了 SE(Sequeeze-and-Excitation) 模块,提高了识别准确率,在 ILSVRC 2017 的分类项目中取得了第一名 | ImageNet-2012验证集 | 81.40%/95.48% |
-| [ShuffleNetV2](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | ECCV2018,轻量级 CNN 网络,在速度和准确度之间做了很好地平衡。在同等复杂度下,比 ShuffleNet 和 MobileNetv2 更准确,更适合移动端以及无人车领域 | ImageNet-2012验证集 | 70.03%/89.17% |
-| [efficientNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 同时对模型的分辨率,通道数和深度。进行缩放,用极少的参数就可以达到SOTA的精度。 | ImageNet-2012验证集 | 77.38%/93.31% |
-| [xception71](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 对inception-v3的改进,用深度可分离卷积代替普通卷积,降低参数量的同时提高了精度。 | ImageNet-2012验证集 | 81.11%/95.45% |
-| [dpn107](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 融合了densenet和resnext的特点。 | ImageNet-2012验证集 | 80.89%/95.32% |
-| [mobilenetV3_small_x1_0](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 在v2的基础上增加了se模块,并且使用hard-swish激活函数。在分类、检测、分割等视觉任务上都有不错表现。 | ImageNet-2012验证集 | 67.46%/87.12% |
-| [DarkNet53](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 检测框架yolov3使用的backbone,在分类和检测任务上都有不错表现。 | ImageNet-2012验证集 | 78.04%/94.05% |
-| [DenseNet161](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 提出了密集连接的网络结构,更加有利于信息流的传递。 | ImageNet-2012验证集 | 78.57%/94.14% |
-| [ResNeXt152_vd_64x4d](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 提出了cardinatity的概念,用于作为模型复杂度的另外一个度量,并依据该概念有效地提升了模型精度。 | ImageNet-2012验证集 | 81.08%/95.34% |
-| [SqueezeNet1_1](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 提出了新的网络架构Fire Module,通过减少参数来进行模型压缩。 | ImageNet-2012验证集 | 60.08%/81.85% |
-
-更多图像分类模型请参考 [Image Classification](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification)。
+| [AlexNet](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 首次在 CNN 中成功的应用了 ReLU, Dropout 和 LRN,并使用 GPU 进行运算加速 | ImageNet-2012验证集 | 56.72%/79.17% |
+| [VGG19](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 在 AlexNet 的基础上使用 3*3 小卷积核,增加网络深度,具有很好的泛化能力 | ImageNet-2012验证集 | 72.56%/90.93% |
+| [GoogLeNet](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 在不增加计算负载的前提下增加了网络的深度和宽度,性能更加优越 | ImageNet-2012验证集 | 70.70%/89.66% |
+| [ResNet50](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | Residual Network,引入了新的残差结构,解决了随着网络加深,准确率下降的问题 | ImageNet-2012验证集 | 76.50%/93.00% |
+| [ResNet200_vd](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 融合多种对 ResNet 改进策略,ResNet200_vd 的 top1 准确率达到 80.93% | ImageNet-2012验证集 | 80.93%/95.33% |
+| [Inceptionv4](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 将 Inception 模块与 Residual Connection 进行结合,通过ResNet的结构极大地加速训练并获得性能的提升 | ImageNet-2012验证集 | 80.77%/95.26% |
+| [MobileNetV1](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 将传统的卷积结构改造成两层卷积结构的网络,在基本不影响准确率的前提下大大减少计算时间,更适合移动端和嵌入式视觉应用 | ImageNet-2012验证集 | 70.99%/89.68% |
+| [MobileNetV2](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | MobileNet结构的微调,直接在 thinner 的 bottleneck层上进行 skip learning 连接以及对 bottleneck layer 不进行 ReLu 非线性处理可取得更好的结果 | ImageNet-2012验证集 | 72.15%/90.65% |
+| [SENet154_vd](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 在ResNeXt 基础、上加入了 SE(Sequeeze-and-Excitation) 模块,提高了识别准确率,在 ILSVRC 2017 的分类项目中取得了第一名 | ImageNet-2012验证集 | 81.40%/95.48% |
+| [ShuffleNetV2](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | ECCV2018,轻量级 CNN 网络,在速度和准确度之间做了很好地平衡。在同等复杂度下,比 ShuffleNet 和 MobileNetv2 更准确,更适合移动端以及无人车领域 | ImageNet-2012验证集 | 70.03%/89.17% |
+| [efficientNet](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 同时对模型的分辨率,通道数和深度。进行缩放,用极少的参数就可以达到SOTA的精度。 | ImageNet-2012验证集 | 77.38%/93.31% |
+| [xception71](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 对inception-v3的改进,用深度可分离卷积代替普通卷积,降低参数量的同时提高了精度。 | ImageNet-2012验证集 | 81.11%/95.45% |
+| [dpn107](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 融合了densenet和resnext的特点。 | ImageNet-2012验证集 | 80.89%/95.32% |
+| [mobilenetV3_small_x1_0](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 在v2的基础上增加了se模块,并且使用hard-swish激活函数。在分类、检测、分割等视觉任务上都有不错表现。 | ImageNet-2012验证集 | 67.46%/87.12% |
+| [DarkNet53](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 检测框架yolov3使用的backbone,在分类和检测任务上都有不错表现。 | ImageNet-2012验证集 | 78.04%/94.05% |
+| [DenseNet161](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 提出了密集连接的网络结构,更加有利于信息流的传递。 | ImageNet-2012验证集 | 78.57%/94.14% |
+| [ResNeXt152_vd_64x4d](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 提出了cardinatity的概念,用于作为模型复杂度的另外一个度量,并依据该概念有效地提升了模型精度。 | ImageNet-2012验证集 | 81.08%/95.34% |
+| [SqueezeNet1_1](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification) | 提出了新的网络架构Fire Module,通过减少参数来进行模型压缩。 | ImageNet-2012验证集 | 60.08%/81.85% |
+
+更多图像分类模型请参考 [Image Classification](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/image_classification)。
### 目标检测
-目标检测任务的目标是给定一张图像或是一个视频帧,让计算机找出其中所有目标的位置,并给出每个目标的具体类别。对于计算机而言,能够“看到”的是图像被编码之后的数字,但很难解图像或是视频帧中出现了人或是物体这样的高层语义概念,也就更加难以定位目标出现在图像中哪个区域。目标检测模型请参考 [PaddleDetection](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleDetection)。
+目标检测任务的目标是给定一张图像或是一个视频帧,让计算机找出其中所有目标的位置,并给出每个目标的具体类别。对于计算机而言,能够“看到”的是图像被编码之后的数字,但很难解图像或是视频帧中出现了人或是物体这样的高层语义概念,也就更加难以定位目标出现在图像中哪个区域。目标检测模型请参考 [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection)。
| 模型名称 | 模型简介 | 数据集 | 评估指标 mAP |
| ------------------------------------------------------------ | ------------------------------------------------------------ | ---------- | ------------------------------------------------------- |
@@ -64,7 +64,7 @@ PaddlePaddle 提供了丰富的计算单元,使得用户可以采用模块化
| [Mask-RCNN](https://github.com/PaddlePaddle/PaddleDetection) | 经典的两阶段框架,在 Faster R-CNN模型基础上添加分割分支,得到掩码结果,实现了掩码和类别预测关系的解藕,可得到像素级别的检测结果。 | MS-COCO | 基于ResNet 50 Mask mAP(0.50: 0.95) = 31.4% |
| [RetinaNet](https://github.com/PaddlePaddle/PaddleDetection) | 经典的一阶段框架,由主干网络、FPN结构、和两个分别用于回归物体位置和预测物体类别的子网络组成。在训练过程中使用 Focal Loss,解决了传统一阶段检测器存在前景背景类别不平衡的问题,进一步提高了一阶段检测器的精度。 | MS-COCO | 基于ResNet 50 mAP (0.50: 0.95) = 36% |
| [YOLOv3](https://github.com/PaddlePaddle/PaddleDetection) | 速度和精度均衡的目标检测网络,相比于原作者 darknet 中的 YOLO v3 实现,PaddlePaddle 实现参考了论文 [Bag of Tricks for Image Classification with Convolutional Neural Networks](https://arxiv.org/pdf/1812.01187.pdf) 增加了 mixup,label_smooth 等处理,精度 (mAP(0.50: 0.95)) 相比于原作者提高了 4.7 个绝对百分点,在此基础上加入 synchronize batch normalization, 最终精度相比原作者提高 5.9 个绝对百分点。 | MS-COCO | 基于DarkNet mAP(0.50: 0.95)= 38.9% |
-| [PyramidBox](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/face_detection) | **PyramidBox** **模型是百度自主研发的人脸检测模型**,利用上下文信息解决困难人脸的检测问题,网络表达能力高,鲁棒性强。于18年3月份在 WIDER Face 数据集上取得第一名 | WIDER FACE | mAP (Easy/Medium/Hard set)= 96.0%/ 94.8%/ 88.8% |
+| [PyramidBox](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/face_detection) | **PyramidBox** **模型是百度自主研发的人脸检测模型**,利用上下文信息解决困难人脸的检测问题,网络表达能力高,鲁棒性强。于18年3月份在 WIDER Face 数据集上取得第一名 | WIDER FACE | mAP (Easy/Medium/Hard set)= 96.0%/ 94.8%/ 88.8% |
| [Cascade RCNN](https://github.com/PaddlePaddle/PaddleDetection) | Cascade R-CNN 在 Faster R-CNN 框架下,通过级联多个检测器,在训练过程中选取不同的 IoU 阈值,逐步提高目标定位的精度,从而获取优异的检测性能。 | MS-COCO | 基于ResNet 50 mAP (0.50: 0.95) = 40.9% |
| [Faceboxes](https://github.com/PaddlePaddle/PaddleDetection) | 经典的人脸检测网络,被称为“高精度 CPU 实时人脸检测器”。网络中使用率 CReLU、density_prior_bo x等组件,使得模型的精度和速度得到平衡与提升。相比于 PyramidBox,预测与计算更快,模型更小,精度也保持高水平。 | WIDER FACE | mAP (Easy/Medium/Hard Set) = 0.898/0.872/0.752 |
| [BlazeFace](https://github.com/PaddlePaddle/PaddleDetection) | 高速的人脸检测网络,由5个单的和6个双 BlazeBlocks、和 SSD 的架构构成。它轻巧但性能良好,并且专为移动 GPU 推理量身定制。 | WIDER FACE | mAP Easy/Medium/Hard Set = 0.915/0.892/0.797 |
@@ -75,34 +75,35 @@ PaddlePaddle 提供了丰富的计算单元,使得用户可以采用模块化
| 模型名称 | 模型简介 | 数据集 | 评估指标 |
| ------------------------------------------------------------ | ------------------------------------------------------------ | --------- | --------------- |
-| [ICNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/icnet) | 主要用于图像实时语义分割,能够兼顾速度和准确性,易于线上部署 | Cityscapes | Mean IoU=67.0% |
-| [DeepLab V3+](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/deeplabv3%2B) | 通过 encoder-decoder 进行多尺度信息的融合,同时保留了原来的空洞卷积和 ASSP 层, 其骨干网络使用了 Xception 模型,提高了语义分割的健壮性和运行速率 | Cityscapes | Mean IoU=78.81% |
-| [PSPNet (res101)](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/Research/SemSegPaddle) | 通过利用不同子区域和全局的上下文信息来增强语义分割质量,同时提出deeply supervised 的辅助loss去改善模型的优化 | Cityscapes | Mean IoU = 78.1 |
-| [GloRe (res101)](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/Research/SemSegPaddle)| 提出一个轻量级的、可端到端训练的全局推理单元GloRe来高效推理image regions之间的关系,增强了模型上下文建模能力| Cityscapes | Mean IoU = 78.4 |
-| [PSPNet (res101)](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/Research/SemSegPaddle) | -| PASCAL Context | Mean IoU = 48.9 |
-| [GloRe (res101)](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/Research/SemSegPaddle)| -| PASCAL Context | Mean IoU = 48.4 |
+| [ICNet](https://github.com/PaddlePaddle/PaddleSeg) | 主要用于图像实时语义分割,能够兼顾速度和准确性,易于线上部署 | Cityscapes | Mean IoU=67.0% |
+| [DeepLab V3+](https://github.com/PaddlePaddle/PaddleSeg) | 通过 encoder-decoder 进行多尺度信息的融合,同时保留了原来的空洞卷积和 ASSP 层, 其骨干网络使用了 Xception 模型,提高了语义分割的健壮性和运行速率 | Cityscapes | Mean IoU=78.81% |
+| [PSPNet (res101)](https://github.com/PaddlePaddle/Research/tree/master/CV/SemSegPaddle) | 通过利用不同子区域和全局的上下文信息来增强语义分割质量,同时提出deeply supervised 的辅助loss去改善模型的优化 | Cityscapes | Mean IoU = 78.1 |
+| [GloRe (res101)](https://github.com/PaddlePaddle/Research/tree/master/CV/SemSegPaddle) | 提出一个轻量级的、可端到端训练的全局推理单元GloRe来高效推理image regions之间的关系,增强了模型上下文建模能力| Cityscapes | Mean IoU = 78.4 |
+| [PSPNet (res101)](https://github.com/PaddlePaddle/Research/tree/master/CV/SemSegPaddle) | -| PASCAL Context | Mean IoU = 48.9 |
+| [GloRe (res101)](https://github.com/PaddlePaddle/Research/tree/master/CV/SemSegPaddle) | -| PASCAL Context | Mean IoU = 48.4 |
+
### 关键点检测
人体骨骼关键点检测 (Pose Estimation) 主要检测人体的一些关键点,如关节,五官等,通过关键点描述人体骨骼信息。人体骨骼关键点检测对于描述人体姿态,预测人体行为至关重要。是诸多计算机视觉任务的基础,例如动作分类,异常行为检测,以及自动驾驶等等。
| 模型名称 | 模型简介 | 数据集 | 评估指标 |
| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------ | ------------ |
-| [Simple Baselines](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/human_pose_estimation) | coco2018 关键点检测项目亚军方案,网络结构非常简单,效果达到 state of the art | COCO val2017 | AP = 72.7% |
+| [Simple Baselines](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/human_pose_estimation) | coco2018 关键点检测项目亚军方案,网络结构非常简单,效果达到 state of the art | COCO val2017 | AP = 72.7% |
### 图像生成
-图像生成是指根据输入向量,生成目标图像。这里的输入向量可以是随机的噪声或用户指定的条件向量。具体的应用场景有:手写体生成、人脸合成、风格迁移、图像修复等。[PaddleGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) 包含和图像生成相关的多个模型。
+图像生成是指根据输入向量,生成目标图像。这里的输入向量可以是随机的噪声或用户指定的条件向量。具体的应用场景有:手写体生成、人脸合成、风格迁移、图像修复等。[gan](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan) 包含和图像生成相关的多个模型。
| 模型名称 | 模型简介 | 数据集 |
| ------------------------------------------------------------ | ------------------------------------------------------------ | ---------- |
-| [CGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 条件生成对抗网络,一种带条件约束的 GAN,使用额外信息对模型增加条件,可以指导数据生成过程 | Mnist |
-| [DCGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 深度卷积生成对抗网络,将 GAN 和卷积网络结合起来,以解决 GAN 训练不稳定的问题 | Mnist |
-| [Pix2Pix](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 图像翻译,通过成对图片将某一类图片转换成另外一类图片,可用于风格迁移 | Cityscapes |
-| [CycleGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 图像翻译,可以通过非成对的图片将某一类图片转换成另外一类图片,可用于风格迁移 | Cityscapes |
-| [StarGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 多领域属性迁移,引入辅助分类帮助单个判别器判断多个属性,可用于人脸属性转换 | Celeba |
-| [AttGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 利用分类损失和重构损失来保证改变特定的属性,可用于人脸特定属性转换 | Celeba |
-| [STGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 人脸特定属性转换,只输入有变化的标签,引入 GRU 结构,更好的选择变化的属性 | Celeba |
-| [SPADE](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 提出一种考虑空间语义信息的归一化方法,从而更好的保留语义信息,生成更为逼真的图像,可用于图像翻译。 | Cityscapes |
+| [CGAN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan) | 条件生成对抗网络,一种带条件约束的 GAN,使用额外信息对模型增加条件,可以指导数据生成过程 | Mnist |
+| [DCGAN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan) | 深度卷积生成对抗网络,将 GAN 和卷积网络结合起来,以解决 GAN 训练不稳定的问题 | Mnist |
+| [Pix2Pix](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan) | 图像翻译,通过成对图片将某一类图片转换成另外一类图片,可用于风格迁移 | Cityscapes |
+| [CycleGAN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan) | 图像翻译,可以通过非成对的图片将某一类图片转换成另外一类图片,可用于风格迁移 | Cityscapes |
+| [StarGAN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan) | 多领域属性迁移,引入辅助分类帮助单个判别器判断多个属性,可用于人脸属性转换 | Celeba |
+| [AttGAN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan) | 利用分类损失和重构损失来保证改变特定的属性,可用于人脸特定属性转换 | Celeba |
+| [STGAN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan) | 人脸特定属性转换,只输入有变化的标签,引入 GRU 结构,更好的选择变化的属性 | Celeba |
+| [SPADE](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/gan) | 提出一种考虑空间语义信息的归一化方法,从而更好的保留语义信息,生成更为逼真的图像,可用于图像翻译。 | Cityscapes |
### 场景文字识别
@@ -110,8 +111,8 @@ PaddlePaddle 提供了丰富的计算单元,使得用户可以采用模块化
| 模型名称 | 模型简介 | 数据集 | 评估指标 |
| ------------------------------------------------------------ | ------------------------------------------------------------ | -------------------------- | -------------- |
-| [CRNN-CTC](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/ocr_recognition) | 使用 CTC model 识别图片中单行英文字符,用于端到端的文本行图片识别方法 | 单行不定长的英文字符串图片 | 错误率= 22.3% |
-| [OCR Attention](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/ocr_recognition) | 使用 attention 识别图片中单行英文字符,用于端到端的自然场景文本识别 | 单行不定长的英文字符串图片 | 错误率 = 15.8% |
+| [CRNN-CTC](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/ocr_recognition) | 使用 CTC model 识别图片中单行英文字符,用于端到端的文本行图片识别方法 | 单行不定长的英文字符串图片 | 错误率= 22.3% |
+| [OCR Attention](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/ocr_recognition) | 使用 attention 识别图片中单行英文字符,用于端到端的自然场景文本识别 | 单行不定长的英文字符串图片 | 错误率 = 15.8% |
### 度量学习
@@ -119,11 +120,11 @@ PaddlePaddle 提供了丰富的计算单元,使得用户可以采用模块化
| 模型名称 | 模型简介 | 数据集 | 评估指标 Recall@Rank-1(使用arcmargin训练) |
| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------ | ------------------------------------------- |
-| [ResNet50未微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 使用 arcmargin loss 训练的特征模型 | Stanford Online Product(SOP) | 78.11% |
-| [ResNet50使用triplet微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 在 arcmargin loss 基础上,使用 triplet loss 微调的特征模型 | Stanford Online Product(SOP) | 79.21% |
-| [ResNet50使用quadruplet微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 在 arcmargin loss 基础上,使用 quadruplet loss 微调的特征模型 | Stanford Online Product(SOP) | 79.59% |
-| [ResNet50使用eml微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 在 arcmargin loss 基础上,使用 eml loss 微调的特征模型 | Stanford Online Product(SOP) | 80.11% |
-| [ResNet50使用npairs微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 在 arcmargin loss基础上,使用npairs loss 微调的特征模型 | Stanford Online Product(SOP) | 79.81% |
+| [ResNet50未微调](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/metric_learning) | 使用 arcmargin loss 训练的特征模型 | Stanford Online Product(SOP) | 78.11% |
+| [ResNet50使用triplet微调](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/metric_learning) | 在 arcmargin loss 基础上,使用 triplet loss 微调的特征模型 | Stanford Online Product(SOP) | 79.21% |
+| [ResNet50使用quadruplet微调](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/metric_learning) | 在 arcmargin loss 基础上,使用 quadruplet loss 微调的特征模型 | Stanford Online Product(SOP) | 79.59% |
+| [ResNet50使用eml微调](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/metric_learning) | 在 arcmargin loss 基础上,使用 eml loss 微调的特征模型 | Stanford Online Product(SOP) | 80.11% |
+| [ResNet50使用npairs微调](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/metric_learning) | 在 arcmargin loss基础上,使用npairs loss 微调的特征模型 | Stanford Online Product(SOP) | 79.81% |
### 视频分类和动作定位
@@ -131,18 +132,18 @@ PaddlePaddle 提供了丰富的计算单元,使得用户可以采用模块化
| 模型名称 | 模型简介 | 数据集 | 评估指标 |
| ------------------------------------------------------------ | ------------------------------------------------------------ | -------------------------- | ----------- |
-| [TSN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | ECCV'16 提出的基于 2D-CNN 经典解决方案 | Kinetics-400 | Top-1 = 67% |
-| [Non-Local](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | 视频非局部关联建模模型 | Kinetics-400 | Top-1 = 74% |
-| [StNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | AAAI'19 提出的视频联合时空建模方法 | Kinetics-400 | Top-1 = 69% |
-| [TSM](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | 基于时序移位的简单高效视频时空建模方法 | Kinetics-400 | Top-1 = 70% |
-| [Attention LSTM](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | 常用模型,速度快精度高 | Youtube-8M | GAP = 86% |
-| [Attention Cluster](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | CVPR'18 提出的视频多模态特征注意力聚簇融合方法 | Youtube-8M | GAP = 84% |
-| [NeXtVlad](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | 2nd-Youtube-8M 比赛第 3 名的模型 | Youtube-8M | GAP = 87% |
-| [C-TCN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | 2018 年 ActivityNet 夺冠方案 | ActivityNet1.3 | MAP=31% |
-| [BSN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | 为视频动作定位问题提供高效的 proposal 生成方法 | ActivityNet1.3 | AUC=66.64% |
-| [BMN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | 2019 年 ActivityNet 夺冠方案 | ActivityNet1.3 | AUC=67.19% |
-| [ETS](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo/models/ets) | 视频摘要生成领域的基准模型 | ActivityNet Captions | METEOR:10.0 |
-| [TALL](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo/models/tall) | 视频Grounding方向的BaseLine模型 | TACoS | R1@IOU5=0.13 |
+| [TSN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | ECCV'16 提出的基于 2D-CNN 经典解决方案 | Kinetics-400 | Top-1 = 67% |
+| [Non-Local](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | 视频非局部关联建模模型 | Kinetics-400 | Top-1 = 74% |
+| [StNet](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | AAAI'19 提出的视频联合时空建模方法 | Kinetics-400 | Top-1 = 69% |
+| [TSM](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | 基于时序移位的简单高效视频时空建模方法 | Kinetics-400 | Top-1 = 70% |
+| [Attention LSTM](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | 常用模型,速度快精度高 | Youtube-8M | GAP = 86% |
+| [Attention Cluster](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | CVPR'18 提出的视频多模态特征注意力聚簇融合方法 | Youtube-8M | GAP = 84% |
+| [NeXtVlad](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | 2nd-Youtube-8M 比赛第 3 名的模型 | Youtube-8M | GAP = 87% |
+| [C-TCN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | 2018 年 ActivityNet 夺冠方案 | ActivityNet1.3 | MAP=31% |
+| [BSN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | 为视频动作定位问题提供高效的 proposal 生成方法 | ActivityNet1.3 | AUC=66.64% |
+| [BMN](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video) | 2019 年 ActivityNet 夺冠方案 | ActivityNet1.3 | AUC=67.19% |
+| [ETS](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video/models/ets) | 视频摘要生成领域的基准模型 | ActivityNet Captions | METEOR:10.0 |
+| [TALL](https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleCV/video/models/tall) | 视频Grounding方向的BaseLine模型 | TACoS | R1@IOU5=0.13 |
## PaddleNLP