From 5bb2e8c339cad16136e310d5c5a0285f20383470 Mon Sep 17 00:00:00 2001 From: gaotingquan Date: Fri, 26 May 2023 04:02:17 +0000 Subject: [PATCH] add clip tipc config --- ...atch16_224_finetune_train_infer_python.txt | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 test_tipc/configs/CLIP/CLIP_vit_base_patch16_224_finetune_train_infer_python.txt diff --git a/test_tipc/configs/CLIP/CLIP_vit_base_patch16_224_finetune_train_infer_python.txt b/test_tipc/configs/CLIP/CLIP_vit_base_patch16_224_finetune_train_infer_python.txt new file mode 100644 index 00000000..c83ba82c --- /dev/null +++ b/test_tipc/configs/CLIP/CLIP_vit_base_patch16_224_finetune_train_infer_python.txt @@ -0,0 +1,62 @@ +===========================train_params=========================== +model_name:CLIP_vit_base_patch16_224 +python:python +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/CLIP/CLIP_vit_base_patch16_224_finetune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/CLIP/CLIP_vit_base_patch16_224_finetune.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/CLIP/CLIP_vit_base_patch16_224_finetune.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/CLIP_vit_base_patch16_224.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32|amp +epoch:1 +model_type:norm_train +num_workers:12 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] -- GitLab