ppyolo_r18vd_coco.yml 1.9 KB
Newer Older
W
wangxinxin08 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
_BASE_: [
  '../datasets/coco_detection.yml',
  '../runtime.yml',
  './_base_/ppyolo_r18vd.yml',
  './_base_/optimizer_1x.yml',
  './_base_/ppyolo_reader.yml',
]

snapshot_epoch: 10
weights: output/ppyolo_r18vd_coco/model_final

TrainReader:
  sample_transforms:
14 15 16 17 18 19
    - Decode: {}
    - Mixup: {alpha: 1.5, beta: 1.5}
    - RandomDistort: {}
    - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}
    - RandomCrop: {}
    - RandomFlip: {}
W
wangxinxin08 已提交
20
  batch_transforms:
21
    - BatchRandomResize:
W
wangxinxin08 已提交
22 23 24 25
        target_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]
        random_size: True
        random_interp: True
        keep_ratio: False
26 27 28 29
    - NormalizeBox: {}
    - PadBox: {num_max_boxes: 50}
    - BboxXYXY2XYWH: {}
    - NormalizeImage:
W
wangxinxin08 已提交
30 31 32
        mean: [0.485, 0.456, 0.406]
        std: [0.229, 0.224, 0.225]
        is_scale: True
33 34
    - Permute: {}
    - Gt2YoloTarget:
W
wangxinxin08 已提交
35 36 37 38 39 40 41 42 43 44
        anchor_masks: [[3, 4, 5], [0, 1, 2]]
        anchors: [[10, 14], [23, 27], [37, 58], [81, 82], [135, 169], [344, 319]]
        downsample_ratios: [32, 16]

  batch_size: 32
  mixup_epoch: 500
  shuffle: true

EvalReader:
  sample_transforms:
45 46 47 48
    - Decode: {}
    - Resize: {target_size: [512, 512], keep_ratio: False, interp: 2}
    - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
    - Permute: {}
W
wangxinxin08 已提交
49 50 51 52 53 54
  batch_size: 8

TestReader:
  inputs_def:
    image_shape: [3, 512, 512]
  sample_transforms:
55 56 57 58
    - Decode: {}
    - Resize: {target_size: [512, 512], keep_ratio: False, interp: 2}
    - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
    - Permute: {}
W
wangxinxin08 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
  batch_size: 1

epoch: 270

LearningRate:
  base_lr: 0.004
  schedulers:
  - !PiecewiseDecay
    gamma: 0.1
    milestones:
    - 162
    - 216
  - !LinearWarmup
    start_factor: 0.
    steps: 4000

OptimizerBuilder:
  optimizer:
    momentum: 0.9
    type: Momentum
  regularizer:
    factor: 0.0005
    type: L2