diff --git a/tools/test_hubserving.py b/deploy/hubserving/test_hubserving.py similarity index 100% rename from tools/test_hubserving.py rename to deploy/hubserving/test_hubserving.py diff --git a/tools/serving/image_http_client.py b/deploy/paddleserving/image_http_client.py similarity index 100% rename from tools/serving/image_http_client.py rename to deploy/paddleserving/image_http_client.py diff --git a/tools/serving/image_service_cpu.py b/deploy/paddleserving/image_service_cpu.py similarity index 100% rename from tools/serving/image_service_cpu.py rename to deploy/paddleserving/image_service_cpu.py diff --git a/tools/serving/image_service_gpu.py b/deploy/paddleserving/image_service_gpu.py similarity index 100% rename from tools/serving/image_service_gpu.py rename to deploy/paddleserving/image_service_gpu.py diff --git a/tools/serving/utils.py b/deploy/paddleserving/utils.py similarity index 100% rename from tools/serving/utils.py rename to deploy/paddleserving/utils.py diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml index 6688261e62f505a4619fa0d72ce6d498e7e450e9..a34068cd06f122e0f388ce169f3e699ef6c35a7d 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml @@ -36,6 +36,7 @@ Optimizer: lr: name: Cosine learning_rate: 1.3 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00002 @@ -49,6 +50,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - RandCropImage: size: 224 - RandFlipImage: @@ -75,6 +79,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - ResizeImage: resize_short: 256 - CropImage: diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml index 81fb746831699e1a600acc71d315ffcd5efd6c7e..fb9bc47db0fd43a7a7ee0a086ce4ca6487853484 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml @@ -36,6 +36,7 @@ Optimizer: lr: name: Cosine learning_rate: 1.3 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00002 @@ -49,6 +50,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - RandCropImage: size: 224 - RandFlipImage: @@ -75,6 +79,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - ResizeImage: resize_short: 256 - CropImage: diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml index 84ea9307facbad3c082dcd113db99be5bf8307fa..590fa35f197caf119769d11b8632bed367f1c315 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml @@ -36,6 +36,7 @@ Optimizer: lr: name: Cosine learning_rate: 1.3 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00002 @@ -49,6 +50,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - RandCropImage: size: 224 - RandFlipImage: @@ -75,6 +79,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - ResizeImage: resize_short: 256 - CropImage: diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml index c3f9be6926651db0cd7d2c3b42b62d97d885c4f0..15c945e96924fd72254b8d51efb450e912e2e186 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml @@ -35,7 +35,8 @@ Optimizer: momentum: 0.9 lr: name: Cosine - learning_rate: 1.3 + learning_rate: 0.65 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00002 @@ -65,7 +66,7 @@ DataLoader: sampler: name: DistributedBatchSampler - batch_size: 512 + batch_size: 256 drop_last: False shuffle: True loader: diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml index cf20657969bacbb6713cd13c167c4847aa22b892..1def84365d7c8103c2dd0781b7a61df84e43b2f7 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml @@ -36,6 +36,7 @@ Optimizer: lr: name: Cosine learning_rate: 1.3 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00004 @@ -49,6 +50,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - RandCropImage: size: 224 - RandFlipImage: @@ -75,6 +79,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - ResizeImage: resize_short: 256 - CropImage: diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml index a84356ac207d368f93613723adea260543e11066..0849c5bdea56f5ed108d0d0d6410d2efbbe3cda0 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml @@ -36,6 +36,7 @@ Optimizer: lr: name: Cosine learning_rate: 1.3 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00001 @@ -49,6 +50,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - RandCropImage: size: 224 - RandFlipImage: @@ -75,6 +79,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - ResizeImage: resize_short: 256 - CropImage: diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml index 31adeb4f62a2f67b5df9b09595b0f93324b0d3bc..e4d71e35ec2b79cc4f80a105b2e4bb3b2263cd9e 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml @@ -36,6 +36,7 @@ Optimizer: lr: name: Cosine learning_rate: 1.3 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00001 @@ -49,6 +50,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - RandCropImage: size: 224 - RandFlipImage: @@ -75,6 +79,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - ResizeImage: resize_short: 256 - CropImage: diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml index b732e60a0821ea43fa8a342a34fc4ae2b071998b..75c6ff1740560fab1cc54d6a44bd8c5298fd75c6 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml @@ -36,6 +36,7 @@ Optimizer: lr: name: Cosine learning_rate: 1.3 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00002 @@ -49,6 +50,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - RandCropImage: size: 224 - RandFlipImage: @@ -75,6 +79,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - ResizeImage: resize_short: 256 - CropImage: diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml index 1cdcac100b91499237420aea74b72ea2f07a922d..68749f43c10fadc5bebcba7d49e88e75459eafd4 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml @@ -36,6 +36,7 @@ Optimizer: lr: name: Cosine learning_rate: 1.3 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00002 @@ -49,6 +50,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - RandCropImage: size: 224 - RandFlipImage: @@ -75,6 +79,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - ResizeImage: resize_short: 256 - CropImage: diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml index fc1ab0b1447412526bf94c030fb4268f67a34f51..fb6109350f2065ca4783684d6a56e7adcc730716 100644 --- a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml @@ -36,6 +36,7 @@ Optimizer: lr: name: Cosine learning_rate: 1.3 + warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.00002 @@ -49,6 +50,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - RandCropImage: size: 224 - RandFlipImage: @@ -75,6 +79,9 @@ DataLoader: image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False - ResizeImage: resize_short: 256 - CropImage: diff --git a/tools/ema.py b/ppcls/utils/ema.py similarity index 100% rename from tools/ema.py rename to ppcls/utils/ema.py diff --git a/tools/feature_maps_visualization/download_resnet50_pretrained.sh b/ppcls/utils/feature_maps_visualization/download_resnet50_pretrained.sh similarity index 100% rename from tools/feature_maps_visualization/download_resnet50_pretrained.sh rename to ppcls/utils/feature_maps_visualization/download_resnet50_pretrained.sh diff --git a/tools/feature_maps_visualization/fm_vis.py b/ppcls/utils/feature_maps_visualization/fm_vis.py similarity index 100% rename from tools/feature_maps_visualization/fm_vis.py rename to ppcls/utils/feature_maps_visualization/fm_vis.py diff --git a/tools/feature_maps_visualization/resnet.py b/ppcls/utils/feature_maps_visualization/resnet.py similarity index 100% rename from tools/feature_maps_visualization/resnet.py rename to ppcls/utils/feature_maps_visualization/resnet.py diff --git a/tools/feature_maps_visualization/utils.py b/ppcls/utils/feature_maps_visualization/utils.py similarity index 100% rename from tools/feature_maps_visualization/utils.py rename to ppcls/utils/feature_maps_visualization/utils.py diff --git a/tools/static/dali.py b/ppcls/utils/static/dali.py similarity index 100% rename from tools/static/dali.py rename to ppcls/utils/static/dali.py diff --git a/tools/static/program.py b/ppcls/utils/static/program.py similarity index 100% rename from tools/static/program.py rename to ppcls/utils/static/program.py diff --git a/tools/static/run_dali.sh b/ppcls/utils/static/run_dali.sh similarity index 100% rename from tools/static/run_dali.sh rename to ppcls/utils/static/run_dali.sh diff --git a/tools/static/save_load.py b/ppcls/utils/static/save_load.py similarity index 100% rename from tools/static/save_load.py rename to ppcls/utils/static/save_load.py diff --git a/tools/static/train.py b/ppcls/utils/static/train.py similarity index 100% rename from tools/static/train.py rename to ppcls/utils/static/train.py diff --git a/tools/benchmark/benchmark.sh b/tools/benchmark/benchmark.sh deleted file mode 100644 index fc50a6eda148656c7dd0a46ac12d014f79873a4e..0000000000000000000000000000000000000000 --- a/tools/benchmark/benchmark.sh +++ /dev/null @@ -1,3 +0,0 @@ -python3.7 -m paddle.distributed.launch \ - --selected_gpus="0" \ - tools/benchmark/benchmark_acc.py diff --git a/tools/benchmark/benchmark_acc.py b/tools/benchmark/benchmark_acc.py deleted file mode 100644 index aa471713a11a91b9e512ea6593d250e16ef2dcad..0000000000000000000000000000000000000000 --- a/tools/benchmark/benchmark_acc.py +++ /dev/null @@ -1,123 +0,0 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import os -import sys -__dir__ = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(__dir__) -sys.path.append(os.path.abspath(os.path.join(__dir__, '..'))) -sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) - -import paddle - -from multiprocessing import Manager -import tools.eval as eval -from ppcls.utils.model_zoo import _download, _decompress -from ppcls.utils import logger - - -def parse_args(): - def str2bool(v): - return v.lower() in ("true", "t", "1") - - parser = argparse.ArgumentParser() - parser.add_argument( - "-b", - "--benchmark_file_list", - type=str, - default="./tools/benchmark/benchmark_list.txt") - parser.add_argument( - "-p", "--pretrained_dir", type=str, default="./pretrained/") - - return parser.parse_args() - - -def parse_model_infos(benchmark_file_list): - model_infos = [] - with open(benchmark_file_list, "r") as fin: - lines = fin.readlines() - for idx, line in enumerate(lines): - strs = line.strip("\n").strip("\r").split(" ") - if len(strs) != 4: - logger.info( - "line {0}(info: {1}) format wrong, it should be splited into 4 parts, but got {2}". - format(idx, line, len(strs))) - model_infos.append({ - "top1_acc": float(strs[0]), - "model_name": strs[1], - "config_path": strs[2], - "pretrain_path": strs[3], - }) - return model_infos - - -def main(args): - benchmark_file_list = args.benchmark_file_list - model_infos = parse_model_infos(benchmark_file_list) - right_models = [] - wrong_models = [] - - for model_info in model_infos: - try: - pretrained_url = model_info["pretrain_path"] - fname = _download(pretrained_url, args.pretrained_dir) - pretrained_path = os.path.splitext(fname)[0] - if pretrained_url.endswith("tar"): - path = _decompress(fname) - pretrained_path = os.path.join( - os.path.dirname(pretrained_path), path) - - args.config = model_info["config_path"] - args.override = [ - "pretrained_model={}".format(pretrained_path), - "VALID.batch_size=256", - "VALID.num_workers=16", - "load_static_weights=True", - "print_interval=100", - ] - - manager = Manager() - return_dict = manager.dict() - - # A hack method to avoid name conflict. - # Multi-process maybe a better method here. - # More details can be seen in branch 2.0-beta. - # TODO: fluid needs to be removed in the future. - with paddle.utils.unique_name.guard(): - eval.main(args, return_dict) - - top1_acc = return_dict.get("top1_acc", 0.0) - except Exception as e: - logger.error(e) - top1_acc = 0.0 - diff = abs(top1_acc - model_info["top1_acc"]) - if diff > 0.001: - err_info = "[{}]Top-1 acc diff should be <= 0.001 but got diff {}, gt acc: {}, eval acc: {}".format( - model_info["model_name"], diff, model_info["top1_acc"], - top1_acc) - logger.warning(err_info) - wrong_models.append(model_info["model_name"]) - else: - right_models.append(model_info["model_name"]) - - logger.info("[number of right models: {}, they are: {}".format( - len(right_models), right_models)) - logger.info("[number of wrong models: {}, they are: {}".format( - len(wrong_models), wrong_models)) - - -if __name__ == '__main__': - args = parse_args() - main(args) diff --git a/tools/benchmark/benchmark_list.txt b/tools/benchmark/benchmark_list.txt deleted file mode 100644 index 66ad980c75f86c3c2017b263466e69da52f1c328..0000000000000000000000000000000000000000 --- a/tools/benchmark/benchmark_list.txt +++ /dev/null @@ -1,29 +0,0 @@ -0.7098 ResNet18 configs/ResNet/ResNet18.yaml https://paddle-imagenet-models-name.bj.bcebos.com/ResNet18_pretrained.tar -0.7650 ResNet50 configs/ResNet/ResNet50.yaml https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar -0.7226 ResNet18_vd configs/ResNet/ResNet18_vd.yaml https://paddle-imagenet-models-name.bj.bcebos.com/ResNet18_vd_pretrained.tar -0.7912 ResNet50_vd configs/ResNet/ResNet50_vd.yaml https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar -0.7099 MobileNetV1 configs/MobileNetV1/MobileNetV1.yaml https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_pretrained.tar -0.7215 MobileNetV2 configs/MobileNetV2/MobileNetV2.yaml https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_pretrained.tar -0.7532 MobileNetV3_large_x1_0 configs/MobileNetV3/MobileNetV3_large_x1_0.yaml https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x1_0_pretrained.tar -0.6880 ShuffleNetV2 configs/ShuffleNet/ShuffleNetV2.yaml https://paddle-imagenet-models-name.bj.bcebos.com/ShuffleNetV2_pretrained.tar -0.7933 Res2Net50_26w_4s configs/Res2Net/Res2Net50_26w_4s.yaml https://paddle-imagenet-models-name.bj.bcebos.com/Res2Net50_26w_4s_pretrained.tar -0.7775 ResNeXt50_32x4d configs/ResNeXt/ResNeXt50_32x4d.yaml https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt50_32x4d_pretrained.tar -0.7333 SE_ResNet18_vd configs/SENet/SE_ResNet18_vd.yaml https://paddle-imagenet-models-name.bj.bcebos.com/SE_ResNet18_vd_pretrained.tar -0.7952 SE_ResNet50_vd configs/SENet/SE_ResNet50_vd.yaml https://paddle-imagenet-models-name.bj.bcebos.com/SE_ResNet50_vd_pretrained.tar -0.8024 SE_ResNeXt50_vd_32x4d configs/SENet/SE_ResNeXt50_vd_32x4d.yaml https://paddle-imagenet-models-name.bj.bcebos.com/SE_ResNeXt50_vd_32x4d_pretrained.tar -0.7566 DenseNet121 configs/DenseNet/DenseNet121.yaml https://paddle-imagenet-models-name.bj.bcebos.com/DenseNet121_pretrained.tar -0.7678 DPN68 configs/DPN/DPN68.yaml https://paddle-imagenet-models-name.bj.bcebos.com/DPN68_pretrained.tar -0.7692 HRNet_W18_C configs/HRNet/HRNet_W18_C.yaml https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W18_C_pretrained.tar -0.7070 GoogLeNet configs/Inception/GoogLeNet.yaml https://paddle-imagenet-models-name.bj.bcebos.com/GoogLeNet_pretrained.tar -0.7930 Xception41 configs/Xception/Xception41.yaml https://paddle-imagenet-models-name.bj.bcebos.com/Xception41_pretrained.tar -0.7955 Xception41_deeplab configs/Xception/Xception41_deeplab.yaml https://paddle-imagenet-models-name.bj.bcebos.com/Xception41_deeplab_pretrained.tar -0.8077 InceptionV4 configs/Inception/InceptionV4.yaml https://paddle-imagenet-models-name.bj.bcebos.com/InceptionV4_pretrained.tar -0.8255 ResNeXt101_32x8d_wsl configs/ResNeXt101_wsl/ResNeXt101_32x8d_wsl.yaml https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_32x8d_wsl_pretrained.tar -0.8035 ResNeSt50_fast_1s1x64d configs/ResNeSt/ResNeSt50_fast_1s1x64d.yaml https://paddle-imagenet-models-name.bj.bcebos.com/ResNeSt50_fast_1s1x64d_pretrained.pdparams -0.8083 ResNeSt50 configs/ResNeSt/ResNeSt50.yaml https://paddle-imagenet-models-name.bj.bcebos.com/ResNeSt50_pretrained.pdparams -0.785 RegNetX_4GF configs/RegNet/RegNetX_4GF.yaml https://paddle-imagenet-models-name.bj.bcebos.com/RegNetX_4GF_pretrained.pdparams -0.7402 GhostNet_x1_0 configs/GhostNet/GhostNet_x1_0.yaml https://paddle-imagenet-models-name.bj.bcebos.com/GhostNet_x1_0_pretrained.pdparams -0.567 AlexNet configs/AlexNet/AlexNet.yaml https://paddle-imagenet-models-name.bj.bcebos.com/AlexNet_pretrained.tar -0.596 SqueezeNet1_0 configs/SqueezeNet/SqueezeNet1_0.yaml https://paddle-imagenet-models-name.bj.bcebos.com/SqueezeNet1_0_pretrained.tar -0.693 VGG11 configs/VGG/VGG11.yaml https://paddle-imagenet-models-name.bj.bcebos.com/VGG11_pretrained.tar -0.780 DarkNet53 configs/DarkNet/DarkNet53.yaml https://paddle-imagenet-models-name.bj.bcebos.com/DarkNet53_ImageNet1k_pretrained.tar diff --git a/tools/benchmark/run_multi_nodes.sh b/tools/benchmark/run_multi_nodes.sh deleted file mode 100755 index 4a111999843a3ac64e11ba88cef20e768ad8653a..0000000000000000000000000000000000000000 --- a/tools/benchmark/run_multi_nodes.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -# IP Addresses of all nodes, modify it corresponding to your own environment -ALL_NODE_IPS="10.10.10.1,10.10.10.2" -# IP Address of the current node, modify it corresponding to your own environment -CUR_NODE_IPS="10.10.10.1" - -python -m paddle.distributed.launch \ - --cluster_node_ips=$ALL_NODE_IPS \ - --node_ip=$CUR_NODE_IPS \ - --gpus="0,1,2,3" \ - tools/train.py \ - -c ./configs/ResNet/ResNet50.yaml \ - -o print_interval=10 diff --git a/tools/benchmark/run_single_node.sh b/tools/benchmark/run_single_node.sh deleted file mode 100755 index 5ec44455700d821951da9ff5e51d50fd8e621b1d..0000000000000000000000000000000000000000 --- a/tools/benchmark/run_single_node.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash - -python -m paddle.distributed.launch \ - --gpus="0,1,2,3" \ - tools/train.py \ - -c ./configs/ResNet/ResNet50.yaml \ - -o print_interval=10 diff --git a/tools/download.py b/tools/download.py deleted file mode 100644 index 7053634c54e875529fa7a1a01d325641405fa764..0000000000000000000000000000000000000000 --- a/tools/download.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import os -import sys -__dir__ = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(__dir__) -sys.path.append(os.path.abspath(os.path.join(__dir__, '..'))) - -from ppcls import model_zoo - - -def parse_args(): - def str2bool(v): - return v.lower() in ("true", "t", "1") - - parser = argparse.ArgumentParser() - parser.add_argument('-a', '--architecture', type=str, default='ResNet50') - parser.add_argument('-p', '--path', type=str, default='./pretrained/') - parser.add_argument('--postfix', type=str, default="pdparams") - parser.add_argument('-d', '--decompress', type=str2bool, default=False) - parser.add_argument('-l', '--list', type=str2bool, default=False) - - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - if args.list: - model_zoo.list_models() - else: - model_zoo.get(args.architecture, args.path, args.decompress, - args.postfix) - - -if __name__ == '__main__': - main() diff --git a/tools/eval.sh b/tools/eval.sh index f67ba9c95b6c5642fa70a17c2987ba20d7672051..c13ea6d032408afd858de45ccbec7cd45cd969f8 100644 --- a/tools/eval.sh +++ b/tools/eval.sh @@ -1,6 +1,7 @@ -python3.7 -m paddle.distributed.launch \ - --gpus="0,1,2,3" \ - tools/eval.py \ - -c ./configs/ResNet/ResNet50.yaml \ - -o pretrained_model="./ResNet50_pretrained" \ - -o use_gpu=True +#!/usr/bin/env bash + +# for single card eval +# python3.7 tools/eval.py -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml + +# for multi-cards eval +python3.7 -m paddle.distributed.launch --gpus="0,1,2,3" tools/eval.py -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml diff --git a/tools/export_serving_model.py b/tools/export_serving_model.py deleted file mode 100644 index 6bf7cbe9b8bf2a0db343bf4e9fbaf59152601494..0000000000000000000000000000000000000000 --- a/tools/export_serving_model.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import os -from ppcls.arch import backbone - -import paddle.fluid as fluid -import paddle_serving_client.io as serving_io - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("-m", "--model", type=str) - parser.add_argument("-p", "--pretrained_model", type=str) - parser.add_argument("-o", "--output_path", type=str, default="") - parser.add_argument("--class_dim", type=int, default=1000) - parser.add_argument("--img_size", type=int, default=224) - - return parser.parse_args() - - -def create_input(img_size=224): - image = fluid.data( - name='image', shape=[None, 3, img_size, img_size], dtype='float32') - return image - - -def create_model(args, model, input, class_dim=1000): - if args.model == "GoogLeNet": - out, _, _ = model.net(input=input, class_dim=class_dim) - else: - out = model.net(input=input, class_dim=class_dim) - out = fluid.layers.softmax(out) - return out - - -def main(): - args = parse_args() - - model = backbone.__dict__[args.model]() - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - - startup_prog = fluid.Program() - infer_prog = fluid.Program() - - with fluid.program_guard(infer_prog, startup_prog): - with fluid.unique_name.guard(): - image = create_input(args.img_size) - out = create_model(args, model, image, class_dim=args.class_dim) - - infer_prog = infer_prog.clone(for_test=True) - fluid.load( - program=infer_prog, model_path=args.pretrained_model, executor=exe) - - model_path = os.path.join(args.output_path, "ppcls_model") - conf_path = os.path.join(args.output_path, "ppcls_client_conf") - serving_io.save_model(model_path, conf_path, {"image": image}, - {"prediction": out}, infer_prog) - - -if __name__ == "__main__": - main() diff --git a/tools/infer/infer.py b/tools/infer/infer.py deleted file mode 100644 index 241cb3c3a06356d1518752629ec765f8de531f3d..0000000000000000000000000000000000000000 --- a/tools/infer/infer.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import cv2 -import os -import sys - -import paddle -import paddle.nn.functional as F - -__dir__ = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(__dir__) -sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) - -from ppcls.utils.save_load import load_dygraph_pretrain -from ppcls.utils import logger -from ppcls.arch import backbone -from utils import parse_args, get_image_list, preprocess, postprocess, save_prelabel_results - - -def main(): - args = parse_args() - # assign the place - place = paddle.set_device('gpu' if args.use_gpu else 'cpu') - multilabel = True if args.multilabel else False - - net = backbone.__dict__[args.model](class_dim=args.class_num) - load_dygraph_pretrain(net, args.pretrained_model, args.load_static_weights) - image_list = get_image_list(args.image_file) - batch_input_list = [] - img_path_list = [] - cnt = 0 - for idx, img_path in enumerate(image_list): - img = cv2.imread(img_path) - if img is None: - logger.warning( - "Image file failed to read and has been skipped. The path: {}". - format(img_path)) - continue - else: - img = img[:, :, ::-1] - data = preprocess(img, args) - batch_input_list.append(data) - img_path_list.append(img_path) - cnt += 1 - - if cnt % args.batch_size == 0 or (idx + 1) == len(image_list): - batch_tensor = paddle.to_tensor(batch_input_list) - net.eval() - batch_outputs = net(batch_tensor) - if args.model == "GoogLeNet": - batch_outputs = batch_outputs[0] - if multilabel: - batch_outputs = F.sigmoid(batch_outputs) - else: - batch_outputs = F.softmax(batch_outputs) - batch_outputs = batch_outputs.numpy() - batch_result_list = postprocess(batch_outputs, args.top_k, multilabel=multilabel) - - for number, result_dict in enumerate(batch_result_list): - filename = img_path_list[number].split("/")[-1] - clas_ids = result_dict["clas_ids"] - if multilabel: - print("File:{}, multilabel result: ".format(filename)) - for id, score in zip(clas_ids, result_dict["scores"]): - print("\tclass id: {}, probability: {:.2f}".format(id, score)) - else: - scores_str = "[{}]".format(", ".join("{:.2f}".format( - r) for r in result_dict["scores"])) - print("File:{}, Top-{} result: class id(s): {}, score(s): {}". - format(filename, args.top_k, clas_ids, scores_str)) - - if args.pre_label_image: - save_prelabel_results(clas_ids[0], img_path_list[number], - args.pre_label_out_idr) - - batch_input_list = [] - img_path_list = [] - - -if __name__ == "__main__": - main() diff --git a/tools/program.py b/tools/program.py deleted file mode 100644 index 731aa044479350314d5c1e3d4d6da02c6f10ffd1..0000000000000000000000000000000000000000 --- a/tools/program.py +++ /dev/null @@ -1,446 +0,0 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import time -import datetime -from collections import OrderedDict - -import paddle -from paddle import to_tensor -import paddle.nn.functional as F - -from ppcls.optimizer import LearningRateBuilder -from ppcls.optimizer import OptimizerBuilder -from ppcls.arch import backbone -from ppcls.arch.loss import MultiLabelLoss -from ppcls.arch.loss import CELoss -from ppcls.arch.loss import MixCELoss -from ppcls.arch.loss import JSDivLoss -from ppcls.arch.loss import GoogLeNetLoss -from ppcls.utils.misc import AverageMeter -from ppcls.utils import logger -from ppcls.utils import profiler -from ppcls.utils import multi_hot_encode -from ppcls.utils import hamming_distance -from ppcls.utils import accuracy_score - - -def create_model(architecture, classes_num): - """ - Create a model - - Args: - architecture(dict): architecture information, - name(such as ResNet50) is needed - image(variable): model input variable - classes_num(int): num of classes - - Returns: - out(variable): model output variable - """ - name = architecture["name"] - params = architecture.get("params", {}) - return backbone.__dict__[name](class_dim=classes_num, **params) - - -def create_loss(feeds, - out, - architecture, - classes_num=1000, - epsilon=None, - use_mix=False, - use_distillation=False, - multilabel=False): - """ - Create a loss for optimization, such as: - 1. CrossEnotry loss - 2. CrossEnotry loss with label smoothing - 3. CrossEnotry loss with mix(mixup, cutmix, fmix) - 4. CrossEnotry loss with label smoothing and (mixup, cutmix, fmix) - 5. GoogLeNet loss - - Args: - out(variable): model output variable - feeds(dict): dict of model input variables - architecture(dict): architecture information, - name(such as ResNet50) is needed - classes_num(int): num of classes - epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0 - use_mix(bool): whether to use mix(include mixup, cutmix, fmix) - - Returns: - loss(variable): loss variable - """ - if architecture["name"] == "GoogLeNet": - assert len(out) == 3, "GoogLeNet should have 3 outputs" - loss = GoogLeNetLoss(class_dim=classes_num, epsilon=epsilon) - return loss(out[0], out[1], out[2], feeds["label"]) - - if use_distillation: - assert len(out) == 2, ("distillation output length must be 2, " - "but got {}".format(len(out))) - loss = JSDivLoss(class_dim=classes_num, epsilon=epsilon) - return loss(out[1], out[0]) - - if use_mix: - loss = MixCELoss(class_dim=classes_num, epsilon=epsilon) - feed_y_a = feeds['y_a'] - feed_y_b = feeds['y_b'] - feed_lam = feeds['lam'] - return loss(out, feed_y_a, feed_y_b, feed_lam) - else: - if not multilabel: - loss = CELoss(class_dim=classes_num, epsilon=epsilon) - else: - loss = MultiLabelLoss(class_dim=classes_num, epsilon=epsilon) - return loss(out, feeds["label"]) - - -def create_metric(out, - label, - architecture, - topk=5, - classes_num=1000, - use_distillation=False, - multilabel=False, - mode="train", - use_xpu=False): - """ - Create measures of model accuracy, such as top1 and top5 - - Args: - out(variable): model output variable - feeds(dict): dict of model input variables(included label) - topk(int): usually top5 - classes_num(int): num of classes - use_distillation(bool): whether to use distillation training - mode(str): mode, train/valid - - Returns: - fetchs(dict): dict of measures - """ - if architecture["name"] == "GoogLeNet": - assert len(out) == 3, "GoogLeNet should have 3 outputs" - out = out[0] - else: - # just need student label to get metrics - if use_distillation: - out = out[1] - softmax_out = F.softmax(out) - - fetch_list = [] - metric_names = [] - if not multilabel: - softmax_out = F.softmax(out) - - # set top1 to fetchs - top1 = paddle.metric.accuracy(softmax_out, label=label, k=1) - # set topk to fetchs - k = min(topk, classes_num) - topk = paddle.metric.accuracy(softmax_out, label=label, k=k) - - metric_names.append("top1") - metric_names.append("top{}".format(k)) - - fetch_list.append(top1) - fetch_list.append(topk) - else: - out = F.sigmoid(out) - preds = multi_hot_encode(out.numpy()) - targets = label.numpy() - ham_dist = to_tensor(hamming_distance(preds, targets)) - accuracy = to_tensor(accuracy_score(preds, targets, base="label")) - - ham_dist_name = "hamming_distance" - accuracy_name = "multilabel_accuracy" - metric_names.append(ham_dist_name) - metric_names.append(accuracy_name) - - fetch_list.append(accuracy) - fetch_list.append(ham_dist) - - # multi cards' eval - if not use_xpu: - if mode != "train" and paddle.distributed.get_world_size() > 1: - for idx, fetch in enumerate(fetch_list): - fetch_list[idx] = paddle.distributed.all_reduce( - fetch, op=paddle.distributed.ReduceOp. - SUM) / paddle.distributed.get_world_size() - - fetchs = OrderedDict() - for idx, name in enumerate(metric_names): - fetchs[name] = fetch_list[idx] - return fetchs - - -def create_fetchs(feeds, net, config, mode="train"): - """ - Create fetchs as model outputs(included loss and measures), - will call create_loss and create_metric(if use_mix). - - Args: - out(variable): model output variable - feeds(dict): dict of model input variables. - If use mix_up, it will not include label. - architecture(dict): architecture information, - name(such as ResNet50) is needed - topk(int): usually top5 - classes_num(int): num of classes - epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0 - use_mix(bool): whether to use mix(include mixup, cutmix, fmix) - - Returns: - fetchs(dict): dict of model outputs(included loss and measures) - """ - architecture = config.ARCHITECTURE - topk = config.topk - classes_num = config.classes_num - epsilon = config.get('ls_epsilon') - use_mix = config.get('use_mix') and mode == 'train' - use_distillation = config.get('use_distillation') - multilabel = config.get('multilabel', False) - use_xpu = config.get("use_xpu", False) - - out = net(feeds["image"]) - - fetchs = OrderedDict() - fetchs['loss'] = create_loss(feeds, out, architecture, classes_num, - epsilon, use_mix, use_distillation, - multilabel) - if not use_mix: - metric = create_metric( - out, - feeds["label"], - architecture, - topk, - classes_num, - use_distillation, - multilabel=multilabel, - mode=mode, - use_xpu=use_xpu) - fetchs.update(metric) - - return fetchs - - -def create_optimizer(config, parameter_list=None): - """ - Create an optimizer using config, usually including - learning rate and regularization. - - Args: - config(dict): such as - { - 'LEARNING_RATE': - {'function': 'Cosine', - 'params': {'lr': 0.1} - }, - 'OPTIMIZER': - {'function': 'Momentum', - 'params':{'momentum': 0.9}, - 'regularizer': - {'function': 'L2', 'factor': 0.0001} - } - } - - Returns: - an optimizer instance - """ - # create learning_rate instance - lr_config = config['LEARNING_RATE'] - lr_config['params'].update({ - 'epochs': config['epochs'], - 'step_each_epoch': - config['total_images'] // config['TRAIN']['batch_size'], - }) - lr = LearningRateBuilder(**lr_config)() - - # create optimizer instance - opt_config = config['OPTIMIZER'] - opt = OptimizerBuilder(**opt_config) - return opt(lr, parameter_list), lr - - -def create_feeds(batch, use_mix, num_classes, multilabel=False): - image = batch[0] - if use_mix: - y_a = to_tensor(batch[1].numpy().astype("int64").reshape(-1, 1)) - y_b = to_tensor(batch[2].numpy().astype("int64").reshape(-1, 1)) - lam = to_tensor(batch[3].numpy().astype("float32").reshape(-1, 1)) - feeds = {"image": image, "y_a": y_a, "y_b": y_b, "lam": lam} - else: - if not multilabel: - label = to_tensor(batch[1].numpy().astype("int64").reshape(-1, 1)) - else: - label = to_tensor(batch[1].numpy().astype('float32').reshape( - -1, num_classes)) - feeds = {"image": image, "label": label} - return feeds - - -total_step = 0 - - -def run(dataloader, - config, - net, - optimizer=None, - lr_scheduler=None, - epoch=0, - mode='train', - vdl_writer=None, - profiler_options=None): - """ - Feed data to the model and fetch the measures and loss - - Args: - dataloader(paddle dataloader): - exe(): - program(): - fetchs(dict): dict of measures and the loss - epoch(int): epoch of training or validation - model(str): log only - - Returns: - """ - print_interval = config.get("print_interval", 10) - use_mix = config.get("use_mix", False) and mode == "train" - multilabel = config.get("multilabel", False) - classes_num = config.get("classes_num") - - metric_list = [ - ("loss", AverageMeter( - 'loss', '7.5f', postfix=",")), - ("lr", AverageMeter( - 'lr', 'f', postfix=",", need_avg=False)), - ("batch_time", AverageMeter( - 'batch_cost', '.5f', postfix=" s,")), - ("reader_time", AverageMeter( - 'reader_cost', '.5f', postfix=" s,")), - ] - if not use_mix: - if not multilabel: - topk_name = 'top{}'.format(config.topk) - metric_list.insert( - 0, (topk_name, AverageMeter( - topk_name, '.5f', postfix=","))) - metric_list.insert( - 0, ("top1", AverageMeter( - "top1", '.5f', postfix=","))) - else: - metric_list.insert( - 0, ("multilabel_accuracy", AverageMeter( - "multilabel_accuracy", '.5f', postfix=","))) - metric_list.insert( - 0, ("hamming_distance", AverageMeter( - "hamming_distance", '.5f', postfix=","))) - - metric_list = OrderedDict(metric_list) - - tic = time.time() - for idx, batch in enumerate(dataloader()): - # avoid statistics from warmup time - if idx == 10: - metric_list["batch_time"].reset() - metric_list["reader_time"].reset() - - profiler.add_profiler_step(profiler_options) - - metric_list['reader_time'].update(time.time() - tic) - batch_size = len(batch[0]) - feeds = create_feeds(batch, use_mix, classes_num, multilabel) - fetchs = create_fetchs(feeds, net, config, mode) - if mode == 'train': - avg_loss = fetchs['loss'] - avg_loss.backward() - - optimizer.step() - optimizer.clear_grad() - lr_value = optimizer._global_learning_rate().numpy()[0] - metric_list['lr'].update(lr_value, batch_size) - - if lr_scheduler is not None: - if lr_scheduler.update_specified: - curr_global_counter = lr_scheduler.step_each_epoch * epoch + idx - update = max( - 0, curr_global_counter - lr_scheduler.update_start_step - ) % lr_scheduler.update_step_interval == 0 - if update: - lr_scheduler.step() - else: - lr_scheduler.step() - - for name, fetch in fetchs.items(): - metric_list[name].update(fetch.numpy()[0], batch_size) - metric_list["batch_time"].update(time.time() - tic) - tic = time.time() - - if vdl_writer and mode == "train": - global total_step - logger.scaler( - name="lr", value=lr_value, step=total_step, writer=vdl_writer) - for name, fetch in fetchs.items(): - logger.scaler( - name="train_{}".format(name), - value=fetch.numpy()[0], - step=total_step, - writer=vdl_writer) - total_step += 1 - - fetchs_str = ' '.join([ - str(metric_list[key].mean) - if "time" in key else str(metric_list[key].value) - for key in metric_list - ]) - - if idx % print_interval == 0: - ips_info = "ips: {:.5f} images/sec".format( - batch_size / metric_list["batch_time"].avg) - - if mode == "train": - epoch_str = "epoch:{:<3d}".format(epoch) - step_str = "{:s} step:{:<4d}".format(mode, idx) - eta_sec = ((config["epochs"] - epoch) * len(dataloader) - idx - ) * metric_list["batch_time"].avg - eta_str = "eta: {:s}".format( - str(datetime.timedelta(seconds=int(eta_sec)))) - logger.info("{:s}, {:s}, {:s} {:s}, {:s}".format( - epoch_str, step_str, fetchs_str, ips_info, eta_str)) - else: - logger.info("{:s} step:{:<4d}, {:s} {:s}".format( - mode, idx, fetchs_str, ips_info)) - - end_str = ' '.join([str(m.mean) for m in metric_list.values()] + - [metric_list['batch_time'].total]) - ips_info = "ips: {:.5f} images/sec.".format( - batch_size * metric_list["batch_time"].count / - metric_list["batch_time"].sum) - - if mode == 'eval': - logger.info("END {:s} {:s} {:s}".format(mode, end_str, ips_info)) - else: - end_epoch_str = "END epoch:{:<3d}".format(epoch) - logger.info("{:s} {:s} {:s} {:s}".format(end_epoch_str, mode, end_str, - ips_info)) - - # return top1_acc in order to save the best model - if mode == 'valid': - if multilabel: - return metric_list['multilabel_accuracy'].avg - else: - return metric_list['top1'].avg diff --git a/tools/run.sh b/tools/run.sh deleted file mode 100755 index 345b62758f447f9084442f4cdd681dd0bbdd8e74..0000000000000000000000000000000000000000 --- a/tools/run.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash - -python3.7 -m paddle.distributed.launch \ - --gpus="0,1,2,3" \ - tools/train.py \ - -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml \ - -o print_interval=10 diff --git a/tools/run_download.sh b/tools/run_download.sh deleted file mode 100755 index ffcbd88c742a023f214e1e91bb5445af63b6a603..0000000000000000000000000000000000000000 --- a/tools/run_download.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -python tools/download.py -a ResNet34 -p ./pretrained/ -d 1 diff --git a/tools/train.sh b/tools/train.sh new file mode 100755 index 0000000000000000000000000000000000000000..5fced8636235d533bdadcdbb40769733930a0763 --- /dev/null +++ b/tools/train.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# for single card train +# python3.7 tools/train.py -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml + +# for multi-cards train +python3.7 -m paddle.distributed.launch --gpus="0,1,2,3" tools/train.py -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml \ No newline at end of file