# global configs Global: checkpoints: null pretrained_model: null output_dir: "./output/" device: "gpu" save_interval: 5 eval_during_train: True eval_interval: 1 epochs: 20 print_batch_step: 20 use_visualdl: False # used for static mode and model export image_shape: [3, 256, 192] save_inference_dir: "./inference" use_multilabel: True # model architecture Arch: name: "MobileNetV3_large_x1_0" pretrained: True class_num: 26 # loss function config for traing/eval process Loss: Train: - MultiLabelLoss: weight: 1.0 weight_ratio: True size_sum: True Eval: - MultiLabelLoss: weight: 1.0 weight_ratio: True size_sum: True Optimizer: name: Momentum momentum: 0.9 lr: name: Cosine learning_rate: 0.01 warmup_epoch: 5 regularizer: name: 'L2' coeff: 0.0005 #clip_norm: 10 # data loader for train and eval DataLoader: Train: dataset: name: MultiLabelDataset image_root: "dataset/attribute/data/" cls_label_path: "dataset/attribute/pa100k_train_list.txt" label_ratio: True transform_ops: - DecodeImage: to_rgb: True channel_first: False - ResizeImage: size: [192, 256] - Padv2: size: [212, 276] pad_mode: 1 fill_value: 0 - RandomCropImage: size: [192, 256] - RandFlipImage: flip_code: 1 - NormalizeImage: scale: 1.0/255.0 mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' sampler: name: DistributedBatchSampler batch_size: 64 drop_last: True shuffle: True loader: num_workers: 4 use_shared_memory: True Eval: dataset: name: MultiLabelDataset image_root: "dataset/attribute/data/" cls_label_path: "dataset/attribute/pa100k_val_list.txt" label_ratio: True transform_ops: - DecodeImage: to_rgb: True channel_first: False - ResizeImage: size: [192, 256] - NormalizeImage: scale: 1.0/255.0 mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' sampler: name: DistributedBatchSampler batch_size: 64 drop_last: False shuffle: False loader: num_workers: 4 use_shared_memory: True Metric: Eval: - ATTRMetric: