architecture: CascadeRCNN max_iters: 500000 snapshot_iter: 50000 use_gpu: true log_smooth_window: 20 save_dir: output pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x1_0_ssld_pretrained.tar weights: output/cascade_rcnn_mobilenetv3_fpn_640/model_final metric: COCO num_classes: 81 CascadeRCNN: backbone: MobileNetV3RCNN fpn: FPN rpn_head: FPNRPNHead roi_extractor: FPNRoIAlign bbox_head: CascadeBBoxHead bbox_assigner: CascadeBBoxAssigner MobileNetV3RCNN: norm_type: bn freeze_norm: true norm_decay: 0.0 feature_maps: [2, 3, 4] conv_decay: 0.00001 lr_mult_list: [1.0, 1.0, 1.0, 1.0, 1.0] scale: 1.0 model_name: large FPN: min_level: 2 max_level: 6 num_chan: 48 has_extra_convs: true spatial_scale: [0.0625, 0.125, 0.25] FPNRPNHead: anchor_generator: anchor_sizes: [32, 64, 128, 256, 512] aspect_ratios: [0.5, 1.0, 2.0] stride: [16.0, 16.0] variance: [1.0, 1.0, 1.0, 1.0] anchor_start_size: 24 min_level: 2 max_level: 6 num_chan: 48 rpn_target_assign: rpn_batch_size_per_im: 256 rpn_fg_fraction: 0.5 rpn_positive_overlap: 0.7 rpn_negative_overlap: 0.3 rpn_straddle_thresh: 0.0 train_proposal: min_size: 0.0 nms_thresh: 0.7 pre_nms_top_n: 2000 post_nms_top_n: 2000 test_proposal: min_size: 0.0 nms_thresh: 0.7 pre_nms_top_n: 300 post_nms_top_n: 100 FPNRoIAlign: canconical_level: 4 canonical_size: 224 min_level: 2 max_level: 5 box_resolution: 7 sampling_ratio: 2 CascadeBBoxAssigner: batch_size_per_im: 512 bbox_reg_weights: [10, 20, 30] bg_thresh_lo: [0.0, 0.0, 0.0] bg_thresh_hi: [0.5, 0.6, 0.7] fg_thresh: [0.5, 0.6, 0.7] fg_fraction: 0.25 CascadeBBoxHead: head: CascadeTwoFCHead bbox_loss: BalancedL1Loss nms: keep_top_k: 100 nms_threshold: 0.5 score_threshold: 0.05 BalancedL1Loss: alpha: 0.5 gamma: 1.5 beta: 1.0 loss_weight: 1.0 CascadeTwoFCHead: mlp_dim: 128 LearningRate: base_lr: 0.02 schedulers: - !CosineDecay max_iters: 500000 - !LinearWarmup start_factor: 0.1 steps: 500 OptimizerBuilder: optimizer: momentum: 0.9 type: Momentum regularizer: factor: 0.00004 type: L2 TrainReader: inputs_def: fields: ['image', 'im_info', 'im_id', 'gt_bbox', 'gt_class', 'is_crowd'] dataset: !COCODataSet image_dir: train2017 anno_path: annotations/instances_train2017.json dataset_dir: dataset/coco sample_transforms: - !DecodeImage to_rgb: true - !RandomFlipImage prob: 0.5 - !AutoAugmentImage autoaug_type: v1 - !NormalizeImage is_channel_first: false is_scale: true mean: [0.485,0.456,0.406] std: [0.229, 0.224,0.225] - !ResizeImage target_size: [416, 448, 480, 512, 544, 576, 608, 640, 672] max_size: 1000 interp: 1 use_cv2: true - !Permute to_bgr: false channel_first: true batch_transforms: - !PadBatch pad_to_stride: 32 use_padded_im_info: false batch_size: 2 shuffle: true worker_num: 2 use_process: false TestReader: inputs_def: # set image_shape if needed fields: ['image', 'im_info', 'im_id', 'im_shape'] dataset: !ImageFolder anno_path: annotations/instances_val2017.json sample_transforms: - !DecodeImage to_rgb: true with_mixup: false - !NormalizeImage is_channel_first: false is_scale: true mean: [0.485,0.456,0.406] std: [0.229, 0.224,0.225] - !ResizeImage interp: 1 max_size: 640 target_size: 640 use_cv2: true - !Permute channel_first: true to_bgr: false batch_transforms: - !PadBatch pad_to_stride: 32 use_padded_im_info: true batch_size: 1 shuffle: false EvalReader: inputs_def: fields: ['image', 'im_info', 'im_id', 'im_shape'] # for voc #fields: ['image', 'im_info', 'im_id', 'gt_bbox', 'gt_class', 'is_difficult'] dataset: !COCODataSet image_dir: val2017 anno_path: annotations/instances_val2017.json dataset_dir: dataset/coco sample_transforms: - !DecodeImage to_rgb: true with_mixup: false - !NormalizeImage is_channel_first: false is_scale: true mean: [0.485,0.456,0.406] std: [0.229, 0.224,0.225] - !ResizeImage interp: 1 max_size: 640 target_size: 640 use_cv2: true - !Permute channel_first: true to_bgr: false batch_transforms: - !PadBatch pad_to_stride: 32 use_padded_im_info: true batch_size: 1 shuffle: false drop_empty: false worker_num: 2