ppyolo_mbv3_large_coco.yml 2.0 KB
Newer Older
W
wangxinxin08 已提交
1 2 3 4 5 6 7 8 9 10 11
_BASE_: [
  '../datasets/coco_detection.yml',
  '../runtime.yml',
  './_base_/ppyolo_mbv3_large.yml',
  './_base_/optimizer_1x.yml',
  './_base_/ppyolo_reader.yml',
]

snapshot_epoch: 10
weights: output/ppyolo_mbv3_large_coco/model_final

12 13 14
# AMP training
master_grad: true

W
wangxinxin08 已提交
15 16 17 18
TrainReader:
  inputs_def:
    num_max_boxes: 90
  sample_transforms:
19 20 21 22 23 24
    - Decode: {}
    - Mixup: {alpha: 1.5, beta: 1.5}
    - RandomDistort: {}
    - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}
    - RandomCrop: {}
    - RandomFlip: {}
W
wangxinxin08 已提交
25
  batch_transforms:
26
    - BatchRandomResize:
W
wangxinxin08 已提交
27 28 29 30
        target_size: [224, 256, 288, 320, 352, 384, 416, 448, 480, 512]
        random_size: True
        random_interp: True
        keep_ratio: False
31 32 33 34 35 36
    - NormalizeBox: {}
    - PadBox: {num_max_boxes: 90}
    - BboxXYXY2XYWH: {}
    - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
    - Permute: {}
    - Gt2YoloTarget:
W
wangxinxin08 已提交
37 38 39 40 41 42 43 44 45 46 47
        anchor_masks: [[3, 4, 5], [0, 1, 2]]
        anchors: [[11, 18], [34, 47], [51, 126], [115, 71], [120, 195], [254, 235]]
        downsample_ratios: [32, 16]
        iou_thresh: 0.25
        num_classes: 80
  batch_size: 32
  mixup_epoch: 200
  shuffle: true

EvalReader:
  sample_transforms:
48 49 50 51
    - Decode: {}
    - Resize: {target_size: [320, 320], keep_ratio: False, interp: 2}
    - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
    - Permute: {}
W
wangxinxin08 已提交
52 53 54 55 56 57
  batch_size: 8

TestReader:
  inputs_def:
    image_shape: [3, 320, 320]
  sample_transforms:
58 59 60 61
    - Decode: {}
    - Resize: {target_size: [320, 320], keep_ratio: False, interp: 2}
    - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
    - Permute: {}
W
wangxinxin08 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
  batch_size: 1

epoch: 270

LearningRate:
  base_lr: 0.005
  schedulers:
  - !PiecewiseDecay
    gamma: 0.1
    milestones:
    - 162
    - 216
  - !LinearWarmup
    start_factor: 0.
    steps: 4000

OptimizerBuilder:
  optimizer:
    momentum: 0.9
    type: Momentum
  regularizer:
    factor: 0.0005
    type: L2