# global configs Global: checkpoints: null pretrained_model: null output_dir: "./output/mo" device: "gpu" save_interval: 5 eval_during_train: True eval_interval: 1 epochs: 30 print_batch_step: 20 use_visualdl: False # used for static mode and model export image_shape: [3, 192, 256] save_inference_dir: "./inference" use_multilabel: True # mixed precision training AMP: scale_loss: 128.0 use_dynamic_loss_scaling: True # O1: mixed fp16 level: O1 # model architecture Arch: name: "Res2Net200_vd_26w_4s" pretrained: True class_num: 19 infer_add_softmax: False # loss function config for traing/eval process Loss: Train: - MultiLabelLoss: weight: 1.0 weight_ratio: True size_sum: True Eval: - MultiLabelLoss: weight: 1.0 weight_ratio: True size_sum: True Optimizer: name: Momentum momentum: 0.9 lr: name: Cosine learning_rate: 0.01 warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.0005 # data loader for train and eval DataLoader: Train: dataset: name: MultiLabelDataset image_root: "dataset/VeRi/" cls_label_path: "dataset/VeRi/train_list.txt" label_ratio: True transform_ops: - DecodeImage: to_rgb: True channel_first: False - ResizeImage: size: [256, 192] - Padv2: size: [276, 212] pad_mode: 1 fill_value: 0 - RandomCropImage: size: [256, 192] - RandFlipImage: flip_code: 1 - NormalizeImage: scale: 1.0/255.0 mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' sampler: name: DistributedBatchSampler batch_size: 64 drop_last: True shuffle: True loader: num_workers: 8 use_shared_memory: True Eval: dataset: name: MultiLabelDataset image_root: "dataset/VeRi/" cls_label_path: "dataset/VeRi/test_list.txt" label_ratio: True transform_ops: - DecodeImage: to_rgb: True channel_first: False - ResizeImage: size: [256, 192] - NormalizeImage: scale: 1.0/255.0 mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' sampler: name: DistributedBatchSampler batch_size: 64 drop_last: False shuffle: False loader: num_workers: 8 use_shared_memory: True Metric: Eval: - ATTRMetric: