From 74e6c8aa33926c4f3550dc15976f6a890129e12b Mon Sep 17 00:00:00 2001 From: gaotingquan Date: Wed, 10 May 2023 15:32:39 +0000 Subject: [PATCH] add fp32 and ampo2 ultra configs --- ...> MobileNetV3_small_x1_0_ampo2_ultra.yaml} | 0 .../MobileNetV3_small_x1_0_fp32_ultra.yaml | 131 +++++++++++++++++ ...tra.yaml => PPLCNet_x1_0_ampo2_ultra.yaml} | 0 .../PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml | 131 +++++++++++++++++ ...0_ultra.yaml => ResNet50_ampo2_ultra.yaml} | 0 .../ImageNet/ResNet/ResNet50_fp32_ultra.yaml | 134 ++++++++++++++++++ ...l_x1_0_ampo2_ultra_train_infer_python.txt} | 0 ...all_x1_0_fp32_ultra_train_infer_python.txt | 61 ++++++++ ...t_small_ampo2_ultra_train_infer_python.txt | 61 ++++++++ ...et_small_fp32_ultra_train_infer_python.txt | 61 ++++++++ ...t_x1_0_ampo2_ultra_train_infer_python.txt} | 0 ...Net_x1_0_fp32_ultra_train_infer_python.txt | 61 ++++++++ ...sNet50_ampo2_ultra_train_infer_python.txt} | 0 ...ResNet50_fp32_ultra_train_infer_python.txt | 62 ++++++++ ...ow7_224_ampo2_ultra_train_infer_python.txt | 62 ++++++++ ...dow7_224_fp32_ultra_train_infer_python.txt | 62 ++++++++ 16 files changed, 826 insertions(+) rename ppcls/configs/ImageNet/MobileNetV3/{MobileNetV3_small_x1_0_ultra.yaml => MobileNetV3_small_x1_0_ampo2_ultra.yaml} (100%) create mode 100644 ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra.yaml rename ppcls/configs/ImageNet/PPLCNet/{PPLCNet_x1_0_ultra.yaml => PPLCNet_x1_0_ampo2_ultra.yaml} (100%) create mode 100644 ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml rename ppcls/configs/ImageNet/ResNet/{ResNet50_ultra.yaml => ResNet50_ampo2_ultra.yaml} (100%) create mode 100644 ppcls/configs/ImageNet/ResNet/ResNet50_fp32_ultra.yaml rename test_tipc/configs/MobileNetV3/{MobileNetV3_small_x1_0_ultra_train_infer_python.txt => MobileNetV3_small_x1_0_ampo2_ultra_train_infer_python.txt} (100%) create mode 100644 test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra_train_infer_python.txt create mode 100644 test_tipc/configs/PPHGNet/PPHGNet_small_ampo2_ultra_train_infer_python.txt create mode 100644 test_tipc/configs/PPHGNet/PPHGNet_small_fp32_ultra_train_infer_python.txt rename test_tipc/configs/PPLCNet/{PPLCNet_x1_0_ultra_train_infer_python.txt => PPLCNet_x1_0_ampo2_ultra_train_infer_python.txt} (100%) create mode 100644 test_tipc/configs/PPLCNet/PPLCNet_x1_0_fp32_ultra_train_infer_python.txt rename test_tipc/configs/ResNet/{ResNet50_ultra_train_infer_python.txt => ResNet50_ampo2_ultra_train_infer_python.txt} (100%) create mode 100644 test_tipc/configs/ResNet/ResNet50_fp32_ultra_train_infer_python.txt create mode 100644 test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_ampo2_ultra_train_infer_python.txt create mode 100644 test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_fp32_ultra_train_infer_python.txt diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ultra.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ampo2_ultra.yaml similarity index 100% rename from ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ultra.yaml rename to ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ampo2_ultra.yaml diff --git a/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra.yaml b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra.yaml new file mode 100644 index 00000000..41e9dbc2 --- /dev/null +++ b/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + use_dali: True + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 5.2 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 1024 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ultra.yaml b/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ampo2_ultra.yaml similarity index 100% rename from ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ultra.yaml rename to ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ampo2_ultra.yaml diff --git a/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml b/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml new file mode 100644 index 00000000..c53e3055 --- /dev/null +++ b/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + use_dali: True + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.6 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/ResNet/ResNet50_ultra.yaml b/ppcls/configs/ImageNet/ResNet/ResNet50_ampo2_ultra.yaml similarity index 100% rename from ppcls/configs/ImageNet/ResNet/ResNet50_ultra.yaml rename to ppcls/configs/ImageNet/ResNet/ResNet50_ampo2_ultra.yaml diff --git a/ppcls/configs/ImageNet/ResNet/ResNet50_fp32_ultra.yaml b/ppcls/configs/ImageNet/ResNet/ResNet50_fp32_ultra.yaml new file mode 100644 index 00000000..fd166940 --- /dev/null +++ b/ppcls/configs/ImageNet/ResNet/ResNet50_fp32_ultra.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + use_dali: True + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.8 + decay_epochs: [30, 60, 90] + values: [0.8, 0.08, 0.008, 0.0008] + warmup_epoch : 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_ultra_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_ampo2_ultra_train_infer_python.txt similarity index 100% rename from test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_ultra_train_infer_python.txt rename to test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_ampo2_ultra_train_infer_python.txt diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra_train_infer_python.txt new file mode 100644 index 00000000..e1161294 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra_train_infer_python.txt @@ -0,0 +1,61 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x1_0_ultra +python:python3.10 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ultra.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ultra.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ultra.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================train_benchmark_params========================== +batch_size:1024 +fp_items:fp32 +epoch:1 +model_type:norm_train +num_workers:16 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_ampo2_ultra_train_infer_python.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_ampo2_ultra_train_infer_python.txt new file mode 100644 index 00000000..6f6f946c --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_ampo2_ultra_train_infer_python.txt @@ -0,0 +1,61 @@ +===========================train_params=========================== +model_name:PPHGNet_small +python:python3.10 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp16 +epoch:1 +model_type:norm_train +num_workers:16 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_fp32_ultra_train_infer_python.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_fp32_ultra_train_infer_python.txt new file mode 100644 index 00000000..7e97dcd2 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_fp32_ultra_train_infer_python.txt @@ -0,0 +1,61 @@ +===========================train_params=========================== +model_name:PPHGNet_small +python:python3.10 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +model_type:norm_train +num_workers:16 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_ultra_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_ampo2_ultra_train_infer_python.txt similarity index 100% rename from test_tipc/configs/PPLCNet/PPLCNet_x1_0_ultra_train_infer_python.txt rename to test_tipc/configs/PPLCNet/PPLCNet_x1_0_ampo2_ultra_train_infer_python.txt diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_fp32_ultra_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_fp32_ultra_train_infer_python.txt new file mode 100644 index 00000000..09a5f745 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_fp32_ultra_train_infer_python.txt @@ -0,0 +1,61 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_0_ultra +python:python3.10 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ultra.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ultra.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ultra.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================train_benchmark_params========================== +batch_size:512 +fp_items:fp32 +epoch:1 +model_type:norm_train +num_workers:16 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/ResNet/ResNet50_ultra_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_ampo2_ultra_train_infer_python.txt similarity index 100% rename from test_tipc/configs/ResNet/ResNet50_ultra_train_infer_python.txt rename to test_tipc/configs/ResNet/ResNet50_ampo2_ultra_train_infer_python.txt diff --git a/test_tipc/configs/ResNet/ResNet50_fp32_ultra_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_fp32_ultra_train_infer_python.txt new file mode 100644 index 00000000..6720e5e1 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_fp32_ultra_train_infer_python.txt @@ -0,0 +1,62 @@ +===========================train_params=========================== +model_name:ResNet50_ultra +python:python3.10 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_ultra.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_ultra.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_ultra.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:256 +fp_items:fp32 +epoch:1 +model_type:norm_train +num_workers:16 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_ampo2_ultra_train_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_ampo2_ultra_train_infer_python.txt new file mode 100644 index 00000000..932db246 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_ampo2_ultra_train_infer_python.txt @@ -0,0 +1,62 @@ +===========================train_params=========================== +model_name:SwinTransformer_base_patch4_window7_224 +python:python3.10 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp16 +epoch:1 +model_type:norm_train +num_workers:16 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_fp32_ultra_train_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_fp32_ultra_train_infer_python.txt new file mode 100644 index 00000000..af4beef9 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_fp32_ultra_train_infer_python.txt @@ -0,0 +1,62 @@ +===========================train_params=========================== +model_name:SwinTransformer_base_patch4_window7_224 +python:python3.10 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64 +fp_items:fp32 +epoch:1 +model_type:norm_train +num_workers:16 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] -- GitLab