architecture: YOLOv4 use_gpu: true max_iters: 70000 log_smooth_window: 20 save_dir: output snapshot_iter: 2000 metric: VOC pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/CSPDarkNet53_pretrained.pdparams weights: output/yolov4_cspdarknet_voc/model_final num_classes: 20 use_fine_grained_loss: true YOLOv4: backbone: CSPDarkNet yolo_head: YOLOv4Head CSPDarkNet: norm_type: sync_bn norm_decay: 0. depth: 53 YOLOv4Head: anchors: [[12, 16], [19, 36], [40, 28], [36, 75], [76, 55], [72, 146], [142, 110], [192, 243], [459, 401]] anchor_masks: [[0, 1, 2], [3, 4, 5], [6, 7, 8]] nms: background_label: -1 keep_top_k: -1 nms_threshold: 0.45 nms_top_k: -1 normalized: true score_threshold: 0.001 downsample: [8,16,32] scale_x_y: [1.2, 1.1, 1.05] YOLOv3Loss: # batch_size here is only used for fine grained loss, not used # for training batch_size setting, training batch_size setting # is in configs/yolov3_reader.yml TrainReader.batch_size, batch # size here should be set as same value as TrainReader.batch_size batch_size: 8 ignore_thresh: 0.7 label_smooth: false downsample: [8,16,32] scale_x_y: [1.2, 1.1, 1.05] iou_loss: IouLoss ignore_class_score_thresh: 0.25 IouLoss: loss_weight: 0.07 max_height: 608 max_width: 608 ciou_term: true loss_square: false LearningRate: base_lr: 0.0013 schedulers: - !PiecewiseDecay gamma: 0.1 milestones: - 56000 - 62000 - !LinearWarmup start_factor: 0. steps: 1000 OptimizerBuilder: clip_grad_by_norm: 10. optimizer: momentum: 0.949 type: Momentum regularizer: factor: 0.0005 type: L2 _READER_: '../yolov3_reader.yml' TrainReader: inputs_def: fields: ['image', 'gt_bbox', 'gt_class', 'gt_score'] num_max_boxes: 90 use_fine_grained_loss: true dataset: !VOCDataSet anno_path: trainval.txt dataset_dir: dataset/voc with_background: false sample_transforms: - !DecodeImage to_rgb: True with_mosaic: True with_mixup: True - !MosaicImage offset: 0.3 mosaic_scale: [0.8, 1.0] sample_scale: [0.3, 1.0] sample_flip: 0.5 use_cv2: true interp: 2 - !MixupImage alpha: 1.5 beta: 1.5 - !ColorDistort {} - !RandomExpand fill_value: [123.675, 116.28, 103.53] - !RandomCrop {} - !RandomFlipImage is_normalized: false - !NormalizeBox {} - !PadBox num_max_boxes: 90 - !BboxXYXY2XYWH {} batch_transforms: - !RandomShape sizes: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608] random_inter: True - !NormalizeImage mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] is_scale: True is_channel_first: false - !Permute to_bgr: false channel_first: True # Gt2YoloTarget is only used when use_fine_grained_loss set as true, # this operator will be deleted automatically if use_fine_grained_loss # is set as false - !Gt2YoloTarget anchor_masks: [[0, 1, 2], [3, 4, 5], [6, 7, 8]] anchors: [[12, 16], [19, 36], [40, 28], [36, 75], [76, 55], [72, 146], [142, 110], [192, 243], [459, 401]] downsample_ratios: [8, 16, 32] num_classes: 20 iou_thresh: 0.213 batch_size: 8 mixup_epoch: 250 mosaic_prob: 0.3 mosaic_epoch: 300 shuffle: true drop_last: true worker_num: 8 bufsize: 16 use_process: true drop_empty: false EvalReader: inputs_def: fields: ['image', 'im_size', 'im_id', 'gt_bbox', 'gt_class', 'is_difficult'] num_max_boxes: 90 dataset: !VOCDataSet anno_path: test.txt dataset_dir: dataset/voc use_default_label: true with_background: false sample_transforms: - !DecodeImage to_rgb: True - !ResizeImage target_size: 608 interp: 2 - !NormalizeImage mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] is_scale: True is_channel_first: false - !PadBox num_max_boxes: 90 - !Permute to_bgr: false channel_first: True batch_size: 8 drop_empty: false worker_num: 8 bufsize: 16 TestReader: inputs_def: image_shape: [3, 608, 608] fields: ['image', 'im_size', 'im_id'] dataset: !ImageFolder use_default_label: true with_background: false sample_transforms: - !DecodeImage to_rgb: True - !ResizeImage target_size: 608 interp: 1 - !NormalizeImage mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] is_scale: True is_channel_first: false - !Permute to_bgr: false channel_first: True