未验证 提交 8a2367c9 编写于 作者: W wangguanzhong 提交者: GitHub

enhance arg overwrite for picodet and ppyoloe (#7720)

上级 e31935c8
...@@ -2,20 +2,20 @@ metric: COCO ...@@ -2,20 +2,20 @@ metric: COCO
num_classes: 80 num_classes: 80
TrainDataset: TrainDataset:
!COCODataSet name: COCODataSet
image_dir: train2017 image_dir: train2017
anno_path: annotations/instances_train2017.json anno_path: annotations/instances_train2017.json
dataset_dir: dataset/coco dataset_dir: dataset/coco
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd'] data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset: EvalDataset:
!COCODataSet name: COCODataSet
image_dir: val2017 image_dir: val2017
anno_path: annotations/instances_val2017.json anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco dataset_dir: dataset/coco
allow_empty: true allow_empty: true
TestDataset: TestDataset:
!ImageFolder name: ImageFolder
anno_path: annotations/instances_val2017.json # also support txt (like VOC's label_list.txt) anno_path: annotations/instances_val2017.json # also support txt (like VOC's label_list.txt)
dataset_dir: dataset/coco # if set, anno_path will be 'dataset_dir/anno_path' dataset_dir: dataset/coco # if set, anno_path will be 'dataset_dir/anno_path'
...@@ -2,19 +2,19 @@ metric: COCO ...@@ -2,19 +2,19 @@ metric: COCO
num_classes: 80 num_classes: 80
TrainDataset: TrainDataset:
!COCODataSet name: COCODataSet
image_dir: train2017 image_dir: train2017
anno_path: annotations/instances_train2017.json anno_path: annotations/instances_train2017.json
dataset_dir: dataset/coco dataset_dir: dataset/coco
data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_poly', 'is_crowd'] data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_poly', 'is_crowd']
EvalDataset: EvalDataset:
!COCODataSet name: COCODataSet
image_dir: val2017 image_dir: val2017
anno_path: annotations/instances_val2017.json anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco dataset_dir: dataset/coco
TestDataset: TestDataset:
!ImageFolder name: ImageFolder
anno_path: annotations/instances_val2017.json # also support txt (like VOC's label_list.txt) anno_path: annotations/instances_val2017.json # also support txt (like VOC's label_list.txt)
dataset_dir: dataset/coco # if set, anno_path will be 'dataset_dir/anno_path' dataset_dir: dataset/coco # if set, anno_path will be 'dataset_dir/anno_path'
...@@ -3,9 +3,9 @@ epoch: 300 ...@@ -3,9 +3,9 @@ epoch: 300
LearningRate: LearningRate:
base_lr: 0.32 base_lr: 0.32
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 300 max_epochs: 300
- !LinearWarmup - name: LinearWarmup
start_factor: 0.1 start_factor: 0.1
steps: 300 steps: 300
......
...@@ -3,9 +3,9 @@ epoch: 100 ...@@ -3,9 +3,9 @@ epoch: 100
LearningRate: LearningRate:
base_lr: 0.4 base_lr: 0.4
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 100 max_epochs: 100
- !LinearWarmup - name: LinearWarmup
start_factor: 0.1 start_factor: 0.1
steps: 300 steps: 300
......
...@@ -3,9 +3,9 @@ epoch: 300 ...@@ -3,9 +3,9 @@ epoch: 300
LearningRate: LearningRate:
base_lr: 0.4 base_lr: 0.4
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 300 max_epochs: 300
- !LinearWarmup - name: LinearWarmup
start_factor: 0.1 start_factor: 0.1
steps: 300 steps: 300
......
...@@ -35,9 +35,9 @@ PicoHeadV2: ...@@ -35,9 +35,9 @@ PicoHeadV2:
LearningRate: LearningRate:
base_lr: 0.12 base_lr: 0.12
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 300 max_epochs: 300
- !LinearWarmup - name: LinearWarmup
start_factor: 0.1 start_factor: 0.1
steps: 300 steps: 300
......
...@@ -18,8 +18,8 @@ TrainReader: ...@@ -18,8 +18,8 @@ TrainReader:
LearningRate: LearningRate:
base_lr: 0.24 base_lr: 0.24
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 300 max_epochs: 300
- !LinearWarmup - name: LinearWarmup
start_factor: 0.1 start_factor: 0.1
steps: 300 steps: 300
...@@ -18,8 +18,8 @@ TrainReader: ...@@ -18,8 +18,8 @@ TrainReader:
LearningRate: LearningRate:
base_lr: 0.24 base_lr: 0.24
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 300 max_epochs: 300
- !LinearWarmup - name: LinearWarmup
start_factor: 0.1 start_factor: 0.1
steps: 300 steps: 300
...@@ -38,8 +38,8 @@ TrainReader: ...@@ -38,8 +38,8 @@ TrainReader:
LearningRate: LearningRate:
base_lr: 0.28 base_lr: 0.28
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 300 max_epochs: 300
- !LinearWarmup - name: LinearWarmup
start_factor: 0.1 start_factor: 0.1
steps: 300 steps: 300
...@@ -3,9 +3,9 @@ epoch: 300 ...@@ -3,9 +3,9 @@ epoch: 300
LearningRate: LearningRate:
base_lr: 0.01 base_lr: 0.01
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 360 max_epochs: 360
- !LinearWarmup - name: LinearWarmup
start_factor: 0. start_factor: 0.
epochs: 5 epochs: 5
......
...@@ -3,9 +3,9 @@ epoch: 36 ...@@ -3,9 +3,9 @@ epoch: 36
LearningRate: LearningRate:
base_lr: 0.00125 base_lr: 0.00125
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 43 max_epochs: 43
- !LinearWarmup - name: LinearWarmup
start_factor: 0.001 start_factor: 0.001
steps: 2000 steps: 2000
......
...@@ -3,9 +3,9 @@ epoch: 400 ...@@ -3,9 +3,9 @@ epoch: 400
LearningRate: LearningRate:
base_lr: 0.01 base_lr: 0.01
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 480 max_epochs: 480
- !LinearWarmup - name: LinearWarmup
start_factor: 0. start_factor: 0.
epochs: 5 epochs: 5
......
...@@ -3,9 +3,9 @@ epoch: 60 ...@@ -3,9 +3,9 @@ epoch: 60
LearningRate: LearningRate:
base_lr: 0.001 base_lr: 0.001
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 72 max_epochs: 72
- !LinearWarmup - name: LinearWarmup
start_factor: 0. start_factor: 0.
epochs: 1 epochs: 1
......
...@@ -3,9 +3,9 @@ epoch: 80 ...@@ -3,9 +3,9 @@ epoch: 80
LearningRate: LearningRate:
base_lr: 0.001 base_lr: 0.001
schedulers: schedulers:
- !CosineDecay - name: CosineDecay
max_epochs: 96 max_epochs: 96
- !LinearWarmup - name: LinearWarmup
start_factor: 0. start_factor: 0.
epochs: 5 epochs: 5
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册