# global configs Global: checkpoints: null pretrained_model: null output_dir: "./output/" device: "gpu" class_num: 1000 save_interval: 1 eval_during_train: True eval_interval: 1 epochs: 90 print_batch_step: 10 use_visualdl: False # used for static mode and model export image_shape: [3, 224, 224] save_inference_dir: "./inference" # model architecture Arch: name: "VGG11" # loss function config for traing/eval process Loss: Train: - CELoss: weight: 1.0 Eval: - CELoss: weight: 1.0 Optimizer: name: Momentum momentum: 0.9 lr: name: Cosine learning_rate: 0.1 regularizer: name: 'L2' coeff: 0.0002 # data loader for train and eval DataLoader: Train: dataset: name: ImageNetDataset image_root: "./dataset/ILSVRC2012/" cls_label_path: "./dataset/ILSVRC2012/train_list.txt" transform_ops: - RandCropImage: size: 224 - RandFlipImage: flip_code: 1 - NormalizeImage: scale: 0.00392157 mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' sampler: name: DistributedBatchSampler batch_size: 64 drop_last: False shuffle: True loader: num_workers: 6 use_shared_memory: False Eval: # TOTO: modify to the latest trainer dataset: name: ImageNetDataset image_root: "./dataset/ILSVRC2012/" cls_label_path: "./dataset/ILSVRC2012/val_list.txt" transform_ops: - ResizeImage: resize_short: 256 - CropImage: size: 224 - NormalizeImage: scale: 0.00392157 mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' sampler: name: DistributedBatchSampler batch_size: 64 drop_last: False shuffle: False loader: num_workers: 6 use_shared_memory: False Infer: infer_imgs: "docs/images/whl/demo.jpg" batch_size: 10 transforms: - DecodeImage: to_rgb: True channel_first: False - ResizeImage: resize_short: 256 - CropImage: size: 224 - NormalizeImage: scale: 1.0/255.0 mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' - ToCHWImage: PostProcess: name: Topk topk: 5 class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" Metric: Train: - TopkAcc: topk: [1, 5] Eval: - TopkAcc: topk: [1, 5]