MobileViTv3_S.yaml 3.4 KB
Newer Older
Y
Yang Nie 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
# global configs
Global:
  checkpoints: null
  pretrained_model: null
  output_dir: ./output/
  device: gpu
  save_interval: 1
  eval_during_train: True
  eval_interval: 1
  epochs: 300
  print_batch_step: 10
  use_visualdl: False
  # used for static mode and model export
  image_shape: [3, 256, 256]
  save_inference_dir: ./inference
  use_dali: False

# mixed precision training
AMP:
  scale_loss: 65536
  use_dynamic_loss_scaling: True
  # O1: mixed fp16
  level: O1

# model ema
EMA:
  decay: 0.9995

# model architecture
Arch:
  name: MobileViTv3_S
  class_num: 1000
  dropout: 0.1

# loss function config for traing/eval process
Loss:
  Train:
    - CELoss:
        weight: 1.0
        epsilon: 0.1
  Eval:
    - CELoss:
        weight: 1.0

Optimizer:
  name: AdamW
  beta1: 0.9
  beta2: 0.999
  epsilon: 1e-8
  weight_decay: 0.01
  lr:
    # for 8 cards
    name: Cosine
    learning_rate: 0.002
    eta_min: 0.0002
    warmup_epoch: 1  # 3000 iterations
    warmup_start_lr: 0.0002
    # by_epoch: True

# data loader for train and eval
DataLoader:
  Train:
    dataset:
      name: MultiScaleDataset
      image_root: ./dataset/ILSVRC2012/
      cls_label_path: ./dataset/ILSVRC2012/train_list.txt
      transform_ops:
        - DecodeImage:
            to_rgb: True
            channel_first: False
        - RandCropImage:
            size: 256
            interpolation: bilinear
            use_log_aspect: True
        - RandFlipImage:
            flip_code: 1
        - NormalizeImage:
            scale: 1.0/255.0
            mean: [0.0, 0.0, 0.0]
            std: [1.0, 1.0, 1.0]
            order: ''
    # support to specify width and height respectively:
    # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)]
    sampler:
      name: MultiScaleSampler
      scales: [256, 160, 192, 224, 288, 320]
      # first_bs: batch size for the first image resolution in the scales list
      # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple
      first_bs: 48
      divided_factor: 32
      is_training: True
    loader:
      num_workers: 4
      use_shared_memory: True
  Eval:
    dataset: 
      name: ImageNetDataset
      image_root: ./dataset/ILSVRC2012/
      cls_label_path: ./dataset/ILSVRC2012/val_list.txt
      transform_ops:
        - DecodeImage:
            to_rgb: False
            channel_first: False
        - ResizeImage:
            resize_short: 288
            interpolation: bilinear
        - CropImage:
            size: 256
        - NormalizeImage:
            scale: 1.0/255.0
            mean: [0.0, 0.0, 0.0]
            std: [1.0, 1.0, 1.0]
            order: ''
    sampler:
      name: DistributedBatchSampler
      batch_size: 48
      drop_last: False
      shuffle: False
    loader:
      num_workers: 4
      use_shared_memory: True

Infer:
  infer_imgs: docs/images/inference_deployment/whl_demo.jpg
  batch_size: 10
  transforms:
    - DecodeImage:
        to_rgb: True
        channel_first: False
    - ResizeImage:
        resize_short: 288
        interpolation: bilinear
    - CropImage:
        size: 256
    - NormalizeImage:
        scale: 1.0/255.0
        mean: [0.0, 0.0, 0.0]
        std: [1.0, 1.0, 1.0]
        order: ''
    - ToCHWImage:
  PostProcess:
    name: Topk
    topk: 5
    class_id_map_file: ppcls/utils/imagenet1k_label_list.txt

Metric:
  Train:
    - TopkAcc:
        topk: [1, 5]
  Eval:
    - TopkAcc:
        topk: [1, 5]