将自己的数据集转换为COCO格式后,为什么训练时没有使用我自己的数据集,还是反复在下载COCO数据集呢
Created by: gaooolianggg
配置文件如下,
architecture: YOLOv3
use_gpu: true
max_iters: 10000
log_smooth_window: 20
save_dir: output
snapshot_iter: 200
metric: COCO
pretrain_weights: http://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_pretrained.tar
finetune_exclude_pretrained_params: 'yolo_output'
weights: output/yolov3_mobilenet_v1/model_final
num_classes: 1
use_fine_grained_loss: false
YOLOv3:
backbone: MobileNet
yolo_head: YOLOv3Head
MobileNet:
norm_type: sync_bn
norm_decay: 0.
conv_group_scale: 1
with_extra_blocks: false
YOLOv3Head:
anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
anchors: [[10, 13], [16, 30], [33, 23],
[30, 61], [62, 45], [59, 119],
[116, 90], [156, 198], [373, 326]]
norm_decay: 0.
yolo_loss: YOLOv3Loss
nms:
background_label: -1
keep_top_k: 100
nms_threshold: 0.45
nms_top_k: 1000
normalized: false
score_threshold: 0.01
YOLOv3Loss:
# batch_size here is only used for fine grained loss, not used
# for training batch_size setting, training batch_size setting
# is in configs/yolov3_reader.yml TrainReader.batch_size, batch
# size here should be set as same value as TrainReader.batch_size
batch_size: 8
ignore_thresh: 0.7
label_smooth: true
LearningRate:
base_lr: 0.00001
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones:
- 15000
- 18000
- !LinearWarmup
start_factor: 0.
steps: 100
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0005
type: L2
_READER_: 'yolov3_reader.yml'
TrainReader:
inputs_def:
image_shape: [3, 608, 608]
fields: ['image', 'gt_bbox', 'gt_class', 'gt_score']
num_max_boxes: 50
dataset:
!COCODataSet
dataset_dir: D:/Paddle/PaddleDetection/ppdet//data/tools/cocomeSausage
anno_path: ./annotations/instances_train.json
image_dir: ./train
with_background: false
sample_transforms:
- !DecodeImage
to_rgb: true
with_mixup: true
- !MixupImage
alpha: 1.5
beta: 1.5
- !ColorDistort {}
batch_transforms:
- !RandomShape
sizes: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]
random_inter: True
batch_size: 1
shuffle: true
mixup_epoch: 250
worker_num: 8
use_process: true