EfficientNetV2_S.yaml 3.1 KB
Newer Older
1 2 3 4 5 6
# global configs
Global:
  checkpoints: null
  pretrained_model: null
  output_dir: ./output/
  device: gpu
7
  save_interval: 1
8 9 10 11 12
  eval_during_train: True
  eval_interval: 1
  epochs: 350
  print_batch_step: 20
  use_visualdl: False
13
  train_mode: progressive  # progressive training
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
  # used for static mode and model export
  image_shape: [3, 384, 384]
  save_inference_dir: ./inference

AMP:
  scale_loss: 65536
  use_dynamic_loss_scaling: True
  # O1: mixed fp16
  level: O1

EMA:
  decay: 0.9999

# model architecture
Arch:
  name: EfficientNetV2_S
  class_num: 1000
  use_sync_bn: True

# loss function config for traing/eval process
Loss:
  Train:
    - CELoss:
        weight: 1.0
        epsilon: 0.1
  Eval:
    - CELoss:
        weight: 1.0

Optimizer:
  name: Momentum
  momentum: 0.9
  lr:
    name: Cosine
    learning_rate: 0.65 # 8gpux128bs
    warmup_epoch: 5
  regularizer:
    name: L2
    coeff: 0.00001

# data loader for train and eval
DataLoader:
  Train:
    dataset:
      name: ImageNetDataset
      image_root: ./dataset/ILSVRC2012/
      cls_label_path: ./dataset/ILSVRC2012/train_list.txt
      transform_ops:
        - DecodeImage:
            to_rgb: True
            channel_first: False
        - RandCropImage:
66 67
            size: 171
            progress_size: [171, 214, 257, 300]
68 69 70 71 72
            scale: [0.05, 1.0]
        - RandFlipImage:
            flip_code: 1
        - RandAugmentV2:
            num_layers: 2
73 74
            magnitude: 5.0
            progress_magnitude: [5.0, 8.3333333333, 11.66666666667, 15.0]
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
        - NormalizeImage:
            scale: 1.0
            mean: [128.0, 128.0, 128.0]
            std: [128.0, 128.0, 128.0]
            order: ""

    sampler:
      name: DistributedBatchSampler
      batch_size: 128
      drop_last: True
      shuffle: True
    loader:
      num_workers: 8
      use_shared_memory: True

  Eval:
    dataset:
      name: ImageNetDataset
      image_root: ./dataset/ILSVRC2012/
      cls_label_path: ./dataset/ILSVRC2012/val_list.txt
      transform_ops:
        - DecodeImage:
            to_rgb: True
            channel_first: False
        - CropImageAtRatio:
            size: 384
            pad: 32
            interpolation: bilinear
        - NormalizeImage:
            scale: 1.0
            mean: [128.0, 128.0, 128.0]
            std: [128.0, 128.0, 128.0]
            order: ""
    sampler:
      name: DistributedBatchSampler
      batch_size: 128
      drop_last: False
      shuffle: False
    loader:
      num_workers: 8
      use_shared_memory: True

Infer:
  infer_imgs: docs/images/inference_deployment/whl_demo.jpg
  batch_size: 10
  transforms:
    - DecodeImage:
        to_rgb: True
        channel_first: False
    - CropImageAtRatio:
        size: 384
        pad: 32
        interpolation: bilinear
    - NormalizeImage:
        scale: 1.0
        mean: [128.0, 128.0, 128.0]
        std: [128.0, 128.0, 128.0]
        order: ""
  PostProcess:
    name: Topk
    topk: 5
    class_id_map_file: ppcls/utils/imagenet1k_label_list.txt

Metric:
  Train:
    - TopkAcc:
        topk: [1, 5]
  Eval:
    - TopkAcc:
        topk: [1, 5]