未验证 提交 59f8b0fc 编写于 作者: K Kaipeng Deng 提交者: GitHub

Split config (#1679)

* split config into models, optimizers, dataset, readers,runtime
上级 f2955696
metric: COCO
num_classes: 80
TrainDataset:
!COCODataSet
image_dir: train2017
anno_path: annotations/instances_train2017.json
dataset_dir: dataset/coco
EvalDataset:
!COCODataSet
image_dir: val2017
anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco
TestDataset:
!ImageFolder
anno_path: annotations/instances_val2017.json
architecture: MaskRCNN architecture: MaskRCNN
use_gpu: true
max_iters: 180000
log_iter: 20
save_dir: output
snapshot_iter: 10000
pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_cos_pretrained.tar pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_cos_pretrained.tar
metric: COCO
weights: output/mask_rcnn_r50_fpn_1x/model_final weights: output/mask_rcnn_r50_fpn_1x/model_final
num_classes: 81
load_static_weights: True load_static_weights: True
# Model Achitecture # Model Achitecture
...@@ -126,25 +119,3 @@ MaskHead: ...@@ -126,25 +119,3 @@ MaskHead:
MaskPostProcess: MaskPostProcess:
mask_resolution: 28 mask_resolution: 28
# Train
LearningRate:
base_lr: 0.01
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [120000, 160000]
- !LinearWarmup
start_factor: 0.3333333
steps: 500
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0001
type: L2
_READER_: 'mask_reader.yml'
architecture: YOLOv3 architecture: YOLOv3
use_gpu: true
max_iters: 500000
log_iter: 20
save_dir: output
snapshot_iter: 50000
metric: COCO
pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/DarkNet53_pretrained.tar pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/DarkNet53_pretrained.tar
weights: output/yolov3_darknet/model_final weights: output/yolov3_darknet/model_final
num_classes: 80
use_fine_grained_loss: false use_fine_grained_loss: false
load_static_weights: True load_static_weights: True
...@@ -48,25 +41,3 @@ BBoxPostProcess: ...@@ -48,25 +41,3 @@ BBoxPostProcess:
nms_top_k: 1000 nms_top_k: 1000
normalized: false normalized: false
background_label: -1 background_label: -1
LearningRate:
base_lr: 0.001
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones:
- 400000
- 450000
- !LinearWarmup
start_factor: 0.
steps: 4000
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0005
type: L2
_READER_: 'yolov3_reader.yml'
max_iters: 180000
LearningRate:
base_lr: 0.01
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [120000, 160000]
- !LinearWarmup
start_factor: 0.3333333333333333
steps: 500
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0001
type: L2
max_iters: 500000
LearningRate:
base_lr: 0.001
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones:
- 400000
- 450000
- !LinearWarmup
start_factor: 0.
steps: 4000
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0005
type: L2
TrainReader: TrainReader:
inputs_def: inputs_def:
fields: ['image', 'im_info', 'im_id', 'gt_bbox', 'gt_class', 'is_crowd'] fields: ['image', 'im_info', 'im_id', 'gt_bbox', 'gt_class', 'is_crowd']
dataset:
!COCODataSet
image_dir: train2017
anno_path: annotations/instances_train2017.json
dataset_dir: dataset/coco
sample_transforms: sample_transforms:
- !DecodeImage - !DecodeImage
to_rgb: True to_rgb: True
...@@ -37,11 +32,6 @@ TrainReader: ...@@ -37,11 +32,6 @@ TrainReader:
EvalReader: EvalReader:
inputs_def: inputs_def:
fields: ['image', 'im_info', 'im_id', 'im_shape'] fields: ['image', 'im_info', 'im_id', 'im_shape']
dataset:
!COCODataSet
image_dir: val2017
anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco
sample_transforms: sample_transforms:
- !DecodeImage - !DecodeImage
to_rgb: true to_rgb: true
...@@ -71,9 +61,6 @@ EvalReader: ...@@ -71,9 +61,6 @@ EvalReader:
TestReader: TestReader:
inputs_def: inputs_def:
fields: ['image', 'im_info', 'im_id', 'im_shape'] fields: ['image', 'im_info', 'im_id', 'im_shape']
dataset:
!ImageFolder
anno_path: annotations/instances_val2017.json
sample_transforms: sample_transforms:
- !DecodeImage - !DecodeImage
to_rgb: true to_rgb: true
......
TrainReader: TrainReader:
inputs_def: inputs_def:
fields: ['image', 'im_info', 'im_id', 'gt_bbox', 'gt_class', 'is_crowd', 'gt_mask'] fields: ['image', 'im_info', 'im_id', 'gt_bbox', 'gt_class', 'is_crowd', 'gt_mask']
dataset:
!COCODataSet
image_dir: train2017
anno_path: annotations/instances_train2017.json
dataset_dir: dataset/coco
sample_transforms: sample_transforms:
- !DecodeImage - !DecodeImage
to_rgb: true to_rgb: true
...@@ -39,11 +34,6 @@ TrainReader: ...@@ -39,11 +34,6 @@ TrainReader:
EvalReader: EvalReader:
inputs_def: inputs_def:
fields: ['image', 'im_info', 'im_id', 'im_shape'] fields: ['image', 'im_info', 'im_id', 'im_shape']
dataset:
!COCODataSet
image_dir: val2017
anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco
sample_transforms: sample_transforms:
- !DecodeImage - !DecodeImage
to_rgb: true to_rgb: true
...@@ -74,9 +64,6 @@ EvalReader: ...@@ -74,9 +64,6 @@ EvalReader:
TestReader: TestReader:
inputs_def: inputs_def:
fields: ['image', 'im_info', 'im_id', 'im_shape'] fields: ['image', 'im_info', 'im_id', 'im_shape']
dataset:
!ImageFolder
anno_path: annotations/instances_val2017.json
sample_transforms: sample_transforms:
- !DecodeImage - !DecodeImage
to_rgb: true to_rgb: true
......
...@@ -2,12 +2,6 @@ TrainReader: ...@@ -2,12 +2,6 @@ TrainReader:
inputs_def: inputs_def:
fields: ['image', 'gt_bbox', 'gt_class', 'gt_score'] fields: ['image', 'gt_bbox', 'gt_class', 'gt_score']
num_max_boxes: 50 num_max_boxes: 50
dataset:
!COCODataSet
image_dir: train2017
anno_path: annotations/instances_train2017.json
dataset_dir: dataset/coco
with_background: false
sample_transforms: sample_transforms:
- !DecodeImage - !DecodeImage
to_rgb: True to_rgb: True
...@@ -59,12 +53,6 @@ EvalReader: ...@@ -59,12 +53,6 @@ EvalReader:
inputs_def: inputs_def:
fields: ['image', 'im_size', 'im_id'] fields: ['image', 'im_size', 'im_id']
num_max_boxes: 50 num_max_boxes: 50
dataset:
!COCODataSet
image_dir: val2017
anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco
with_background: false
sample_transforms: sample_transforms:
- !DecodeImage - !DecodeImage
to_rgb: True to_rgb: True
...@@ -90,10 +78,6 @@ TestReader: ...@@ -90,10 +78,6 @@ TestReader:
inputs_def: inputs_def:
image_shape: [3, 608, 608] image_shape: [3, 608, 608]
fields: ['image', 'im_size', 'im_id'] fields: ['image', 'im_size', 'im_id']
dataset:
!ImageFolder
anno_path: annotations/instances_val2017.json
with_background: false
sample_transforms: sample_transforms:
- !DecodeImage - !DecodeImage
to_rgb: True to_rgb: True
......
use_gpu: true
log_iter: 50
save_dir: output
snapshot_iter: 10000
architecture: CascadeRCNN
use_gpu: true
max_iters: 180000
log_iter: 50
save_dir: output
snapshot_iter: 10000
pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/dygraph/resnet50.pdparams
metric: COCO
weights: output/cascade_rcnn_r50_1x/model_final
num_classes: 81
num_stages: 3
open_debug: False
# Model Achitecture
CascadeRCNN:
# model anchor info flow
anchor: AnchorRPN
proposal: Proposal
mask: Mask
# model feat info flow
backbone: ResNet
rpn_head: RPNHead
bbox_head: BBoxHead
mask_head: MaskHead
ResNet:
norm_type: 'affine'
depth: 50
freeze_at: 'res2'
RPNHead:
rpn_feat:
name: RPNFeat
feat_in: 1024
feat_out: 1024
anchor_per_position: 15
BBoxHead:
bbox_feat:
name: BBoxFeat
feat_in: 1024
feat_out: 512
roi_extractor:
resolution: 14
sampling_ratio: 0
spatial_scale: 0.0625
extractor_type: 'RoIAlign'
MaskHead:
mask_feat:
name: MaskFeat
feat_in: 2048
feat_out: 256
feat_in: 256
resolution: 14
AnchorRPN:
anchor_generator:
name: AnchorGeneratorRPN
anchor_sizes: [32, 64, 128, 256, 512]
aspect_ratios: [0.5, 1.0, 2.0]
stride: [16.0, 16.0]
variance: [1.0, 1.0, 1.0, 1.0]
anchor_target_generator:
name: AnchorTargetGeneratorRPN
batch_size_per_im: 256
fg_fraction: 0.5
negative_overlap: 0.3
positive_overlap: 0.7
straddle_thresh: 0.0
Proposal:
proposal_generator:
name: ProposalGenerator
min_size: 0.0
nms_thresh: 0.7
train_pre_nms_top_n: 2000
train_post_nms_top_n: 2000
infer_pre_nms_top_n: 2000
infer_post_nms_top_n: 2000
return_rois_num: True
proposal_target_generator:
name: ProposalTargetGenerator
batch_size_per_im: 512
bbox_reg_weights: [[0.1, 0.1, 0.2, 0.2],[0.05, 0.05, 0.1, 0.1],[0.333333, 0.333333, 0.666666, 0.666666]]
bg_thresh_hi: [0.5, 0.6, 0.7]
bg_thresh_lo: [0.0, 0.0, 0.0]
fg_thresh: [0.5, 0.6, 0.7]
fg_fraction: 0.25
bbox_post_process: # used in infer
name: BBoxPostProcess
# decode -> clip -> nms
decode_clip_nms:
name: DecodeClipNms
keep_top_k: 100
score_threshold: 0.05
nms_threshold: 0.5
Mask:
mask_target_generator:
name: MaskTargetGenerator
resolution: 14
mask_post_process:
name: MaskPostProcess
# Train
LearningRate:
base_lr: 0.01
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [120000, 160000]
- !LinearWarmup
start_factor: 0.3333333333333333
steps: 500
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0001
type: L2
_READER_: 'mask_reader.yml'
architecture: FasterRCNN
use_gpu: true
max_iters: 180000
log_iter: 50
save_dir: output
snapshot_iter: 10000
pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/dygraph/resnet50.pdparams
metric: COCO
weights: output/faster_rcnn_r50_1x/model_final
num_classes: 81
open_debug: False
# Model Achitecture
FasterRCNN:
# model anchor info flow
anchor: AnchorRPN
proposal: Proposal
# model feat info flow
backbone: ResNet
rpn_head: RPNHead
bbox_head: BBoxHead
ResNet:
depth: 50
norm_type: 'affine'
freeze_at: 'res2'
RPNHead:
rpn_feat:
name: RPNFeat
feat_in: 1024
feat_out: 1024
anchor_per_position: 15
BBoxHead:
bbox_feat:
name: BBoxFeat
roi_extractor:
name: RoIExtractor
resolution: 14
sampling_ratio: 0
spatial_scale: 0.0625
extractor_type: 'RoIAlign'
feat_out: 512
AnchorRPN:
anchor_generator:
name: AnchorGeneratorRPN
anchor_sizes: [32, 64, 128, 256, 512]
aspect_ratios: [0.5, 1.0, 2.0]
stride: [16.0, 16.0]
variance: [1.0, 1.0, 1.0, 1.0]
anchor_target_generator:
name: AnchorTargetGeneratorRPN
batch_size_per_im: 256
fg_fraction: 0.5
negative_overlap: 0.3
positive_overlap: 0.7
straddle_thresh: 0.0
Proposal:
proposal_generator:
name: ProposalGenerator
min_size: 0.0
nms_thresh: 0.7
train_pre_nms_top_n: 12000
train_post_nms_top_n: 2000
infer_pre_nms_top_n: 12000 # used in infer
infer_post_nms_top_n: 2000 # used in infer
return_rois_num: True
proposal_target_generator:
name: ProposalTargetGenerator
batch_size_per_im: 512
bbox_reg_weights: [[0.1, 0.1, 0.2, 0.2],]
bg_thresh_hi: [0.5,]
bg_thresh_lo: [0.0,]
fg_thresh: [0.5,]
fg_fraction: 0.25
bbox_post_process: # used in infer
name: BBoxPostProcess
# decode -> clip -> nms
decode_clip_nms:
name: DecodeClipNms
keep_top_k: 100
score_threshold: 0.05
nms_threshold: 0.5
# Train
LearningRate:
base_lr: 0.01
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [120000, 160000]
- !LinearWarmup
start_factor: 0.3333333333333333
steps: 500
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0001
type: L2
_READER_: 'faster_reader.yml'
architecture: MaskRCNN
use_gpu: true
max_iters: 180000
log_iter: 50
save_dir: output
snapshot_iter: 10000
pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/dygraph/resnet50.pdparams
metric: COCO
weights: output/mask_rcnn_r50_1x/model_final
num_classes: 81
open_debug: False
# Model Achitecture
MaskRCNN:
# model anchor info flow
anchor: AnchorRPN
proposal: Proposal
mask: Mask
# model feat info flow
backbone: ResNet
rpn_head: RPNHead
bbox_head: BBoxHead
mask_head: MaskHead
ResNet:
norm_type: 'affine'
depth: 50
freeze_at: 'res2'
RPNHead:
rpn_feat:
name: RPNFeat
feat_in: 1024
feat_out: 1024
anchor_per_position: 15
BBoxHead:
bbox_feat:
name: BBoxFeat
roi_extractor:
name: RoIExtractor
resolution: 14
sampling_ratio: 0
spatial_scale: 0.0625
extractor_type: 'RoIAlign'
feat_in: 1024
feat_out: 512
MaskHead:
mask_feat:
name: MaskFeat
feat_in: 2048
feat_out: 256
mask_stages: 1
feat_in: 256
resolution: 14
mask_stages: 1
AnchorRPN:
anchor_generator:
name: AnchorGeneratorRPN
anchor_sizes: [32, 64, 128, 256, 512]
aspect_ratios: [0.5, 1.0, 2.0]
stride: [16.0, 16.0]
variance: [1.0, 1.0, 1.0, 1.0]
anchor_target_generator:
name: AnchorTargetGeneratorRPN
batch_size_per_im: 256
fg_fraction: 0.5
negative_overlap: 0.3
positive_overlap: 0.7
straddle_thresh: 0.0
Proposal:
proposal_generator:
name: ProposalGenerator
min_size: 0.0
nms_thresh: 0.7
train_pre_nms_top_n: 12000
train_post_nms_top_n: 2000
infer_pre_nms_top_n: 12000
infer_post_nms_top_n: 2000
return_rois_num: True
proposal_target_generator:
name: ProposalTargetGenerator
batch_size_per_im: 512
bbox_reg_weights: [[0.1, 0.1, 0.2, 0.2],]
bg_thresh_hi: [0.5,]
bg_thresh_lo: [0.0,]
fg_thresh: [0.5,]
fg_fraction: 0.25
bbox_post_process: # used in infer
name: BBoxPostProcess
# decode -> clip -> nms
decode_clip_nms:
name: DecodeClipNms
keep_top_k: 100
score_threshold: 0.05
nms_threshold: 0.5
Mask:
mask_target_generator:
name: MaskTargetGenerator
resolution: 14
mask_post_process:
name: MaskPostProcess
# Train
LearningRate:
base_lr: 0.01
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [120000, 160000]
- !LinearWarmup
start_factor: 0.3333333333333333
steps: 500
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0001
type: L2
_READER_: 'mask_reader.yml'
_BASE_: [
'./_base_/models/mask_rcnn_r50_fpn.yml',
'./_base_/optimizers/rcnn_1x.yml',
'./_base_/datasets/coco.yml',
'./_base_/readers/mask_reader.yml',
'./_base_/runtime.yml',
]
_BASE_: [
'./_base_/models/yolov3_darknet53.yml',
'./_base_/optimizers/yolov3_270e.yml',
'./_base_/datasets/coco.yml',
'./_base_/readers/yolov3_reader.yml',
'./_base_/runtime.yml',
]
...@@ -66,7 +66,44 @@ class AttrDict(dict): ...@@ -66,7 +66,44 @@ class AttrDict(dict):
global_config = AttrDict() global_config = AttrDict()
READER_KEY = '_READER_' BASE_KEY = '_BASE_'
# parse and load _BASE_ recursively
def _load_config_with_base(file_path):
with open(file_path) as f:
file_cfg = yaml.load(f, Loader=yaml.Loader)
# NOTE: cfgs outside have higher priority than cfgs in _BASE_
if BASE_KEY in file_cfg:
all_base_cfg = AttrDict()
base_ymls = list(file_cfg[BASE_KEY])
for base_yml in base_ymls:
if base_yml.startswith("~"):
base_yml = os.path.expanduser(base_yml)
if not base_yml.startswith('/'):
base_yml = os.path.join(os.path.dirname(file_path), base_yml)
with open(base_yml) as f:
base_cfg = _load_config_with_base(base_yml)
all_base_cfg = merge_config(base_cfg, all_base_cfg)
del file_cfg[BASE_KEY]
return merge_config(file_cfg, all_base_cfg)
return file_cfg
WITHOUT_BACKGROUND_ARCHS = ['YOLOv3']
def _parse_with_background():
arch = global_config.architecture
with_background = arch not in WITHOUT_BACKGROUND_ARCHS
global_config['with_background'] = with_background
global_config['TrainReader']['with_background'] = with_background
global_config['EvalReader']['with_background'] = with_background
global_config['TestReader']['with_background'] = with_background
def load_config(file_path): def load_config(file_path):
...@@ -81,22 +118,13 @@ def load_config(file_path): ...@@ -81,22 +118,13 @@ def load_config(file_path):
_, ext = os.path.splitext(file_path) _, ext = os.path.splitext(file_path)
assert ext in ['.yml', '.yaml'], "only support yaml files for now" assert ext in ['.yml', '.yaml'], "only support yaml files for now"
cfg = AttrDict() # load config from file and merge into global config
with open(file_path) as f: cfg = _load_config_with_base(file_path)
cfg = merge_config(yaml.load(f, Loader=yaml.Loader), cfg) merge_config(cfg)
if READER_KEY in cfg:
reader_cfg = cfg[READER_KEY]
if reader_cfg.startswith("~"):
reader_cfg = os.path.expanduser(reader_cfg)
if not reader_cfg.startswith('/'):
reader_cfg = os.path.join(os.path.dirname(file_path), reader_cfg)
with open(reader_cfg) as f: # parse config from merged config
merge_config(yaml.load(f, Loader=yaml.Loader)) _parse_with_background()
del cfg[READER_KEY]
merge_config(cfg)
return global_config return global_config
......
...@@ -158,6 +158,8 @@ class Reader(object): ...@@ -158,6 +158,8 @@ class Reader(object):
""" """
Args: Args:
dataset (DataSet): DataSet object dataset (DataSet): DataSet object
with_background (bool): whether load background as a class. if True,
total class number will be class number of dataset + 1. default True.
sample_transforms (list of BaseOperator): a list of sample transforms sample_transforms (list of BaseOperator): a list of sample transforms
operators. operators.
batch_transforms (list of BaseOperator): a list of batch transforms batch_transforms (list of BaseOperator): a list of batch transforms
...@@ -188,6 +190,7 @@ class Reader(object): ...@@ -188,6 +190,7 @@ class Reader(object):
def __init__(self, def __init__(self,
dataset=None, dataset=None,
with_background=True,
sample_transforms=None, sample_transforms=None,
batch_transforms=None, batch_transforms=None,
batch_size=None, batch_size=None,
...@@ -206,7 +209,7 @@ class Reader(object): ...@@ -206,7 +209,7 @@ class Reader(object):
inputs_def=None, inputs_def=None,
devices_num=1): devices_num=1):
self._dataset = dataset self._dataset = dataset
self._roidbs = self._dataset.get_roidb() self._roidbs = self._dataset.get_roidb(with_background)
self._fields = copy.deepcopy(inputs_def[ self._fields = copy.deepcopy(inputs_def[
'fields']) if inputs_def else None 'fields']) if inputs_def else None
...@@ -416,7 +419,7 @@ class Reader(object): ...@@ -416,7 +419,7 @@ class Reader(object):
self._parallel.stop() self._parallel.stop()
def create_reader(cfg, max_iter=0, global_cfg=None, devices_num=1): def create_reader(dataset, cfg, max_iter=0, global_cfg=None, devices_num=1):
""" """
Return iterable data reader. Return iterable data reader.
...@@ -432,7 +435,8 @@ def create_reader(cfg, max_iter=0, global_cfg=None, devices_num=1): ...@@ -432,7 +435,8 @@ def create_reader(cfg, max_iter=0, global_cfg=None, devices_num=1):
'use_fine_grained_loss', False) 'use_fine_grained_loss', False)
cfg['num_classes'] = getattr(global_cfg, 'num_classes', 80) cfg['num_classes'] = getattr(global_cfg, 'num_classes', 80)
cfg['devices_num'] = devices_num cfg['devices_num'] = devices_num
reader = Reader(**cfg)()
reader = Reader(dataset=dataset, **cfg)()
def _reader(): def _reader():
n = 0 n = 0
......
...@@ -33,25 +33,20 @@ class COCODataSet(DataSet): ...@@ -33,25 +33,20 @@ class COCODataSet(DataSet):
image_dir (str): directory for images. image_dir (str): directory for images.
anno_path (str): json file path. anno_path (str): json file path.
sample_num (int): number of samples to load, -1 means all. sample_num (int): number of samples to load, -1 means all.
with_background (bool): whether load background as a class.
if True, total class number will be 81. default True.
""" """
def __init__(self, def __init__(self,
image_dir=None, image_dir=None,
anno_path=None, anno_path=None,
dataset_dir=None, dataset_dir=None,
sample_num=-1, sample_num=-1):
with_background=True):
super(COCODataSet, self).__init__( super(COCODataSet, self).__init__(
image_dir=image_dir, image_dir=image_dir,
anno_path=anno_path, anno_path=anno_path,
dataset_dir=dataset_dir, dataset_dir=dataset_dir,
sample_num=sample_num, sample_num=sample_num)
with_background=with_background)
self.anno_path = anno_path self.anno_path = anno_path
self.sample_num = sample_num self.sample_num = sample_num
self.with_background = with_background
# `roidbs` is list of dict whose structure is: # `roidbs` is list of dict whose structure is:
# { # {
# 'im_file': im_fname, # image file name # 'im_file': im_fname, # image file name
...@@ -69,7 +64,7 @@ class COCODataSet(DataSet): ...@@ -69,7 +64,7 @@ class COCODataSet(DataSet):
self.cname2cid = None self.cname2cid = None
self.load_image_only = False self.load_image_only = False
def load_roidb_and_cname2cid(self): def load_roidb_and_cname2cid(self, with_background=True):
anno_path = os.path.join(self.dataset_dir, self.anno_path) anno_path = os.path.join(self.dataset_dir, self.anno_path)
image_dir = os.path.join(self.dataset_dir, self.image_dir) image_dir = os.path.join(self.dataset_dir, self.image_dir)
...@@ -85,7 +80,7 @@ class COCODataSet(DataSet): ...@@ -85,7 +80,7 @@ class COCODataSet(DataSet):
# when with_background = True, mapping category to classid, like: # when with_background = True, mapping category to classid, like:
# background:0, first_class:1, second_class:2, ... # background:0, first_class:1, second_class:2, ...
catid2clsid = dict({ catid2clsid = dict({
catid: i + int(self.with_background) catid: i + int(with_background)
for i, catid in enumerate(cat_ids) for i, catid in enumerate(cat_ids)
}) })
cname2cid = dict({ cname2cid = dict({
......
...@@ -40,7 +40,6 @@ class DataSet(object): ...@@ -40,7 +40,6 @@ class DataSet(object):
image_dir=None, image_dir=None,
anno_path=None, anno_path=None,
sample_num=-1, sample_num=-1,
with_background=True,
use_default_label=None, use_default_label=None,
**kwargs): **kwargs):
super(DataSet, self).__init__() super(DataSet, self).__init__()
...@@ -48,7 +47,6 @@ class DataSet(object): ...@@ -48,7 +47,6 @@ class DataSet(object):
self.image_dir = image_dir if image_dir is not None else '' self.image_dir = image_dir if image_dir is not None else ''
self.dataset_dir = dataset_dir if dataset_dir is not None else '' self.dataset_dir = dataset_dir if dataset_dir is not None else ''
self.sample_num = sample_num self.sample_num = sample_num
self.with_background = with_background
self.use_default_label = use_default_label self.use_default_label = use_default_label
self.cname2cid = None self.cname2cid = None
...@@ -59,13 +57,13 @@ class DataSet(object): ...@@ -59,13 +57,13 @@ class DataSet(object):
raise NotImplementedError('%s.load_roidb_and_cname2cid not available' % raise NotImplementedError('%s.load_roidb_and_cname2cid not available' %
(self.__class__.__name__)) (self.__class__.__name__))
def get_roidb(self): def get_roidb(self, with_background=True):
if not self.roidbs: if not self.roidbs:
data_dir = get_dataset_path(self.dataset_dir, self.anno_path, data_dir = get_dataset_path(self.dataset_dir, self.anno_path,
self.image_dir) self.image_dir)
if data_dir: if data_dir:
self.dataset_dir = data_dir self.dataset_dir = data_dir
self.load_roidb_and_cname2cid() self.load_roidb_and_cname2cid(with_background)
return self.roidbs return self.roidbs
...@@ -116,12 +114,10 @@ class ImageFolder(DataSet): ...@@ -116,12 +114,10 @@ class ImageFolder(DataSet):
image_dir=None, image_dir=None,
anno_path=None, anno_path=None,
sample_num=-1, sample_num=-1,
with_background=True,
use_default_label=None, use_default_label=None,
**kwargs): **kwargs):
super(ImageFolder, self).__init__(dataset_dir, image_dir, anno_path, super(ImageFolder, self).__init__(dataset_dir, image_dir, anno_path,
sample_num, with_background, sample_num, use_default_label)
use_default_label)
self.roidbs = None self.roidbs = None
self._imid2path = {} self._imid2path = {}
......
...@@ -40,8 +40,6 @@ class VOCDataSet(DataSet): ...@@ -40,8 +40,6 @@ class VOCDataSet(DataSet):
sample_num (int): number of samples to load, -1 means all. sample_num (int): number of samples to load, -1 means all.
use_default_label (bool): whether use the default mapping of use_default_label (bool): whether use the default mapping of
label to integer index. Default True. label to integer index. Default True.
with_background (bool): whether load background as a class,
default True.
label_list (str): if use_default_label is False, will load label_list (str): if use_default_label is False, will load
mapping between category and class index. mapping between category and class index.
""" """
...@@ -52,14 +50,12 @@ class VOCDataSet(DataSet): ...@@ -52,14 +50,12 @@ class VOCDataSet(DataSet):
anno_path=None, anno_path=None,
sample_num=-1, sample_num=-1,
use_default_label=True, use_default_label=True,
with_background=True,
label_list='label_list.txt'): label_list='label_list.txt'):
super(VOCDataSet, self).__init__( super(VOCDataSet, self).__init__(
image_dir=image_dir, image_dir=image_dir,
anno_path=anno_path, anno_path=anno_path,
sample_num=sample_num, sample_num=sample_num,
dataset_dir=dataset_dir, dataset_dir=dataset_dir)
with_background=with_background)
# roidbs is list of dict whose structure is: # roidbs is list of dict whose structure is:
# { # {
# 'im_file': im_fname, # image file name # 'im_file': im_fname, # image file name
...@@ -78,7 +74,7 @@ class VOCDataSet(DataSet): ...@@ -78,7 +74,7 @@ class VOCDataSet(DataSet):
self.use_default_label = use_default_label self.use_default_label = use_default_label
self.label_list = label_list self.label_list = label_list
def load_roidb_and_cname2cid(self): def load_roidb_and_cname2cid(self, with_background=True):
anno_path = os.path.join(self.dataset_dir, self.anno_path) anno_path = os.path.join(self.dataset_dir, self.anno_path)
image_dir = os.path.join(self.dataset_dir, self.image_dir) image_dir = os.path.join(self.dataset_dir, self.image_dir)
...@@ -96,12 +92,12 @@ class VOCDataSet(DataSet): ...@@ -96,12 +92,12 @@ class VOCDataSet(DataSet):
raise ValueError("label_list {} does not exists".format( raise ValueError("label_list {} does not exists".format(
label_path)) label_path))
with open(label_path, 'r') as fr: with open(label_path, 'r') as fr:
label_id = int(self.with_background) label_id = int(with_background)
for line in fr.readlines(): for line in fr.readlines():
cname2cid[line.strip()] = label_id cname2cid[line.strip()] = label_id
label_id += 1 label_id += 1
else: else:
cname2cid = pascalvoc_label(self.with_background) cname2cid = pascalvoc_label(with_background)
with open(anno_path, 'r') as fr: with open(anno_path, 'r') as fr:
while True: while True:
......
...@@ -32,8 +32,6 @@ class WIDERFaceDataSet(DataSet): ...@@ -32,8 +32,6 @@ class WIDERFaceDataSet(DataSet):
image_dir (str): directory for images. image_dir (str): directory for images.
anno_path (str): root directory for voc annotation data anno_path (str): root directory for voc annotation data
sample_num (int): number of samples to load, -1 means all sample_num (int): number of samples to load, -1 means all
with_background (bool): whether load background as a class.
if True, total class number will be 2. default True.
""" """
def __init__(self, def __init__(self,
...@@ -41,22 +39,19 @@ class WIDERFaceDataSet(DataSet): ...@@ -41,22 +39,19 @@ class WIDERFaceDataSet(DataSet):
image_dir=None, image_dir=None,
anno_path=None, anno_path=None,
sample_num=-1, sample_num=-1,
with_background=True,
with_lmk=False): with_lmk=False):
super(WIDERFaceDataSet, self).__init__( super(WIDERFaceDataSet, self).__init__(
image_dir=image_dir, image_dir=image_dir,
anno_path=anno_path, anno_path=anno_path,
sample_num=sample_num, sample_num=sample_num,
dataset_dir=dataset_dir, dataset_dir=dataset_dir)
with_background=with_background)
self.anno_path = anno_path self.anno_path = anno_path
self.sample_num = sample_num self.sample_num = sample_num
self.with_background = with_background
self.roidbs = None self.roidbs = None
self.cname2cid = None self.cname2cid = None
self.with_lmk = with_lmk self.with_lmk = with_lmk
def load_roidb_and_cname2cid(self): def load_roidb_and_cname2cid(self, with_background=True):
anno_path = os.path.join(self.dataset_dir, self.anno_path) anno_path = os.path.join(self.dataset_dir, self.anno_path)
image_dir = os.path.join(self.dataset_dir, self.image_dir) image_dir = os.path.join(self.dataset_dir, self.image_dir)
...@@ -65,7 +60,7 @@ class WIDERFaceDataSet(DataSet): ...@@ -65,7 +60,7 @@ class WIDERFaceDataSet(DataSet):
records = [] records = []
ct = 0 ct = 0
file_lists = self._load_file_list(txt_file) file_lists = self._load_file_list(txt_file)
cname2cid = widerface_label(self.with_background) cname2cid = widerface_label(with_background)
for item in file_lists: for item in file_lists:
im_fname = item[0] im_fname = item[0]
......
...@@ -4,6 +4,7 @@ from . import mask ...@@ -4,6 +4,7 @@ from . import mask
from . import backbone from . import backbone
from . import neck from . import neck
from . import head from . import head
from . import loss
from . import architecture from . import architecture
from . import post_process from . import post_process
...@@ -13,5 +14,6 @@ from .mask import * ...@@ -13,5 +14,6 @@ from .mask import *
from .backbone import * from .backbone import *
from .neck import * from .neck import *
from .head import * from .head import *
from .loss import *
from .architecture import * from .architecture import *
from .post_process import * from .post_process import *
...@@ -87,11 +87,11 @@ class CascadeRCNN(BaseArch): ...@@ -87,11 +87,11 @@ class CascadeRCNN(BaseArch):
mask_out = self.mask.post_process(self.gbd) mask_out = self.mask.post_process(self.gbd)
self.gbd.update(mask_out) self.gbd.update(mask_out)
def loss(self, ): def get_loss(self, ):
outs = {} outs = {}
losses = [] losses = []
rpn_cls_loss, rpn_reg_loss = self.rpn_head.loss(self.gbd) rpn_cls_loss, rpn_reg_loss = self.rpn_head.get_loss(self.gbd)
outs['loss_rpn_cls'] = rpn_cls_loss outs['loss_rpn_cls'] = rpn_cls_loss
outs['loss_rpn_reg'] = rpn_reg_loss outs['loss_rpn_reg'] = rpn_reg_loss
losses.extend([rpn_cls_loss, rpn_reg_loss]) losses.extend([rpn_cls_loss, rpn_reg_loss])
...@@ -100,7 +100,7 @@ class CascadeRCNN(BaseArch): ...@@ -100,7 +100,7 @@ class CascadeRCNN(BaseArch):
bbox_reg_loss_list = [] bbox_reg_loss_list = []
for i in range(self.num_stages): for i in range(self.num_stages):
self.gbd.update_v('stage', i) self.gbd.update_v('stage', i)
bbox_cls_loss, bbox_reg_loss = self.bbox_head.loss(self.gbd) bbox_cls_loss, bbox_reg_loss = self.bbox_head.get_loss(self.gbd)
bbox_cls_loss_list.append(bbox_cls_loss) bbox_cls_loss_list.append(bbox_cls_loss)
bbox_reg_loss_list.append(bbox_reg_loss) bbox_reg_loss_list.append(bbox_reg_loss)
outs['loss_bbox_cls_' + str(i)] = bbox_cls_loss outs['loss_bbox_cls_' + str(i)] = bbox_cls_loss
...@@ -108,7 +108,7 @@ class CascadeRCNN(BaseArch): ...@@ -108,7 +108,7 @@ class CascadeRCNN(BaseArch):
losses.extend(bbox_cls_loss_list) losses.extend(bbox_cls_loss_list)
losses.extend(bbox_reg_loss_list) losses.extend(bbox_reg_loss_list)
mask_loss = self.mask_head.loss(self.gbd) mask_loss = self.mask_head.get_loss(self.gbd)
outs['mask_loss'] = mask_loss outs['mask_loss'] = mask_loss
losses.append(mask_loss) losses.append(mask_loss)
...@@ -116,7 +116,7 @@ class CascadeRCNN(BaseArch): ...@@ -116,7 +116,7 @@ class CascadeRCNN(BaseArch):
outs['loss'] = loss outs['loss'] = loss
return outs return outs
def infer(self, ): def get_pred(self, ):
outs = { outs = {
'bbox': self.gbd['predicted_bbox'].numpy(), 'bbox': self.gbd['predicted_bbox'].numpy(),
'bbox_nums': self.gbd['predicted_bbox_nums'].numpy(), 'bbox_nums': self.gbd['predicted_bbox_nums'].numpy(),
......
...@@ -55,9 +55,9 @@ class FasterRCNN(BaseArch): ...@@ -55,9 +55,9 @@ class FasterRCNN(BaseArch):
bbox_out = self.proposal.post_process(self.gbd) bbox_out = self.proposal.post_process(self.gbd)
self.gbd.update(bbox_out) self.gbd.update(bbox_out)
def loss(self, ): def get_loss(self, ):
rpn_cls_loss, rpn_reg_loss = self.rpn_head.loss(self.gbd) rpn_cls_loss, rpn_reg_loss = self.rpn_head.get_loss(self.gbd)
bbox_cls_loss, bbox_reg_loss = self.bbox_head.loss(self.gbd) bbox_cls_loss, bbox_reg_loss = self.bbox_head.get_loss(self.gbd)
losses = [rpn_cls_loss, rpn_reg_loss, bbox_cls_loss, bbox_reg_loss] losses = [rpn_cls_loss, rpn_reg_loss, bbox_cls_loss, bbox_reg_loss]
loss = fluid.layers.sum(losses) loss = fluid.layers.sum(losses)
out = { out = {
...@@ -69,7 +69,7 @@ class FasterRCNN(BaseArch): ...@@ -69,7 +69,7 @@ class FasterRCNN(BaseArch):
} }
return out return out
def infer(self, ): def get_pred(self, ):
outs = { outs = {
"bbox": self.gbd['predicted_bbox'].numpy(), "bbox": self.gbd['predicted_bbox'].numpy(),
"bbox_nums": self.gbd['predicted_bbox_nums'].numpy(), "bbox_nums": self.gbd['predicted_bbox_nums'].numpy(),
......
...@@ -95,30 +95,30 @@ class MaskRCNN(BaseArch): ...@@ -95,30 +95,30 @@ class MaskRCNN(BaseArch):
self.bboxes, bbox_feat, self.bboxes, bbox_feat,
rois_has_mask_int32, spatial_scale) rois_has_mask_int32, spatial_scale)
def loss(self, ): def get_loss(self, ):
loss = {} loss = {}
# RPN loss # RPN loss
rpn_loss_inputs = self.anchor.generate_loss_inputs( rpn_loss_inputs = self.anchor.generate_loss_inputs(
self.inputs, self.rpn_head_out, self.anchor_out) self.inputs, self.rpn_head_out, self.anchor_out)
loss_rpn = self.rpn_head.loss(rpn_loss_inputs) loss_rpn = self.rpn_head.get_loss(rpn_loss_inputs)
loss.update(loss_rpn) loss.update(loss_rpn)
# BBox loss # BBox loss
bbox_targets = self.proposal.get_targets() bbox_targets = self.proposal.get_targets()
loss_bbox = self.bbox_head.loss(self.bbox_head_out, bbox_targets) loss_bbox = self.bbox_head.get_loss(self.bbox_head_out, bbox_targets)
loss.update(loss_bbox) loss.update(loss_bbox)
# Mask loss # Mask loss
mask_targets = self.mask.get_targets() mask_targets = self.mask.get_targets()
loss_mask = self.mask_head.loss(self.mask_head_out, mask_targets) loss_mask = self.mask_head.get_loss(self.mask_head_out, mask_targets)
loss.update(loss_mask) loss.update(loss_mask)
total_loss = fluid.layers.sums(list(loss.values())) total_loss = fluid.layers.sums(list(loss.values()))
loss.update({'loss': total_loss}) loss.update({'loss': total_loss})
return loss return loss
def infer(self, ): def get_pred(self, ):
mask = self.mask_post_process(self.bboxes, self.mask_head_out, mask = self.mask_post_process(self.bboxes, self.mask_head_out,
self.inputs['im_info']) self.inputs['im_info'])
bbox, bbox_num = self.bboxes bbox, bbox_num = self.bboxes
......
...@@ -22,9 +22,9 @@ class BaseArch(nn.Layer): ...@@ -22,9 +22,9 @@ class BaseArch(nn.Layer):
self.model_arch() self.model_arch()
if mode == 'train': if mode == 'train':
out = self.loss() out = self.get_loss()
elif mode == 'infer': elif mode == 'infer':
out = self.infer() out = self.get_pred()
else: else:
raise "Now, only support train or infer mode!" raise "Now, only support train or infer mode!"
return out return out
...@@ -45,8 +45,8 @@ class BaseArch(nn.Layer): ...@@ -45,8 +45,8 @@ class BaseArch(nn.Layer):
def model_arch(self): def model_arch(self):
raise NotImplementedError("Should implement model_arch method!") raise NotImplementedError("Should implement model_arch method!")
def loss(self, ): def get_loss(self, ):
raise NotImplementedError("Should implement loss method!") raise NotImplementedError("Should implement get_loss method!")
def infer(self, ): def get_pred(self, ):
raise NotImplementedError("Should implement infer method!") raise NotImplementedError("Should implement get_pred method!")
...@@ -39,11 +39,11 @@ class YOLOv3(BaseArch): ...@@ -39,11 +39,11 @@ class YOLOv3(BaseArch):
# YOLO Head # YOLO Head
self.yolo_head_outs = self.yolo_head(body_feats) self.yolo_head_outs = self.yolo_head(body_feats)
def loss(self, ): def get_loss(self, ):
yolo_loss = self.yolo_head.loss(self.inputs, self.yolo_head_outs) loss = self.yolo_head.get_loss(self.inputs, self.yolo_head_outs)
return yolo_loss return loss
def infer(self, ): def get_pred(self, ):
bbox, bbox_num = self.post_process(self.yolo_head_outs, bbox, bbox_num = self.post_process(self.yolo_head_outs,
self.yolo_head.mask_anchors, self.yolo_head.mask_anchors,
self.inputs['im_size']) self.inputs['im_size'])
......
...@@ -164,7 +164,7 @@ class BBoxHead(Layer): ...@@ -164,7 +164,7 @@ class BBoxHead(Layer):
loss_bbox_reg = fluid.layers.reduce_mean(loss_bbox_reg) loss_bbox_reg = fluid.layers.reduce_mean(loss_bbox_reg)
return loss_bbox_cls, loss_bbox_reg return loss_bbox_cls, loss_bbox_reg
def loss(self, bbox_head_out, targets): def get_loss(self, bbox_head_out, targets):
loss_bbox = {} loss_bbox = {}
for lvl, (bboxhead, target) in enumerate(zip(bbox_head_out, targets)): for lvl, (bboxhead, target) in enumerate(zip(bbox_head_out, targets)):
score, delta = bboxhead score, delta = bboxhead
......
...@@ -178,7 +178,7 @@ class MaskHead(Layer): ...@@ -178,7 +178,7 @@ class MaskHead(Layer):
spatial_scale, stage) spatial_scale, stage)
return mask_head_out return mask_head_out
def loss(self, mask_head_out, mask_target): def get_loss(self, mask_head_out, mask_target):
mask_logits = fluid.layers.flatten(mask_head_out) mask_logits = fluid.layers.flatten(mask_head_out)
mask_label = fluid.layers.cast(x=mask_target, dtype='float32') mask_label = fluid.layers.cast(x=mask_target, dtype='float32')
mask_label.stop_gradient = True mask_label.stop_gradient = True
......
...@@ -85,7 +85,7 @@ class RPNHead(Layer): ...@@ -85,7 +85,7 @@ class RPNHead(Layer):
rpn_head_out.append((rrs, rrd)) rpn_head_out.append((rrs, rrd))
return rpn_feats, rpn_head_out return rpn_feats, rpn_head_out
def loss(self, loss_inputs): def get_loss(self, loss_inputs):
# cls loss # cls loss
score_tgt = fluid.layers.cast( score_tgt = fluid.layers.cast(
x=loss_inputs['rpn_score_target'], dtype='float32') x=loss_inputs['rpn_score_target'], dtype='float32')
......
...@@ -67,5 +67,5 @@ class YOLOv3Head(nn.Layer): ...@@ -67,5 +67,5 @@ class YOLOv3Head(nn.Layer):
yolo_outputs.append(yolo_output) yolo_outputs.append(yolo_output)
return yolo_outputs return yolo_outputs
def loss(self, inputs, head_outputs): def get_loss(self, inputs, head_outputs):
return self.loss(inputs, head_outputs, anchors, anchor_masks) return self.loss(inputs, head_outputs, self.anchors, self.anchor_masks)
...@@ -21,6 +21,8 @@ from paddle.fluid.regularizer import L2Decay ...@@ -21,6 +21,8 @@ from paddle.fluid.regularizer import L2Decay
from ppdet.core.workspace import register from ppdet.core.workspace import register
from ..backbone.darknet import ConvBNLayer from ..backbone.darknet import ConvBNLayer
__all__ = ['YOLOv3Loss']
@register @register
class YOLOv3Loss(nn.Layer): class YOLOv3Loss(nn.Layer):
...@@ -33,6 +35,7 @@ class YOLOv3Loss(nn.Layer): ...@@ -33,6 +35,7 @@ class YOLOv3Loss(nn.Layer):
downsample=32, downsample=32,
use_fine_grained_loss=False): use_fine_grained_loss=False):
super(YOLOv3Loss, self).__init__() super(YOLOv3Loss, self).__init__()
self.num_classes = num_classes
self.ignore_thresh = ignore_thresh self.ignore_thresh = ignore_thresh
self.label_smooth = label_smooth self.label_smooth = label_smooth
self.downsample = downsample self.downsample = downsample
......
...@@ -92,20 +92,11 @@ def check_config(cfg): ...@@ -92,20 +92,11 @@ def check_config(cfg):
if 'log_iter' not in cfg: if 'log_iter' not in cfg:
cfg.log_iter = 20 cfg.log_iter = 20
train_dataset = cfg['TrainReader']['dataset']
eval_dataset = cfg['EvalReader']['dataset']
test_dataset = cfg['TestReader']['dataset']
assert train_dataset.with_background == eval_dataset.with_background, \
"'with_background' of TrainReader is not equal to EvalReader."
assert train_dataset.with_background == test_dataset.with_background, \
"'with_background' of TrainReader is not equal to TestReader."
actual_num_classes = int(cfg.num_classes) - int(
train_dataset.with_background)
logger.debug("The 'num_classes'(number of classes) you set is {}, " \ logger.debug("The 'num_classes'(number of classes) you set is {}, " \
"and 'with_background' in 'dataset' sets {}.\n" \ "and 'with_background' in 'dataset' sets {}.\n" \
"So please note the actual number of categories is {}." "So please note the actual number of categories is {}."
.format(cfg.num_classes, train_dataset.with_background, .format(cfg.num_classes, cfg.with_background,
actual_num_classes)) cfg.num_classes + 1))
cfg.num_classes = cfg.num_classes + int(cfg.with_background)
return cfg return cfg
...@@ -59,7 +59,8 @@ def run(FLAGS, cfg): ...@@ -59,7 +59,8 @@ def run(FLAGS, cfg):
devices_num = 1 devices_num = 1
else: else:
devices_num = int(os.environ.get('CPU_NUM', 1)) devices_num = int(os.environ.get('CPU_NUM', 1))
eval_reader = create_reader(cfg.EvalReader, devices_num=devices_num) eval_reader = create_reader(
cfg.EvalDataset, cfg.EvalReader, devices_num=devices_num)
# Run Eval # Run Eval
outs_res = [] outs_res = []
......
...@@ -144,7 +144,7 @@ def run(FLAGS, cfg): ...@@ -144,7 +144,7 @@ def run(FLAGS, cfg):
model = load_dygraph_ckpt(model, ckpt=cfg.weights) model = load_dygraph_ckpt(model, ckpt=cfg.weights)
# Data Reader # Data Reader
test_reader = create_reader(cfg.TestReader) test_reader = create_reader(cfg.TestDataset, cfg.TestReader)
# Run Infer # Run Infer
for iter_id, data in enumerate(test_reader()): for iter_id, data in enumerate(test_reader()):
......
...@@ -132,6 +132,7 @@ def run(FLAGS, cfg): ...@@ -132,6 +132,7 @@ def run(FLAGS, cfg):
devices_num = int(os.environ.get('CPU_NUM', 1)) devices_num = int(os.environ.get('CPU_NUM', 1))
train_reader = create_reader( train_reader = create_reader(
cfg.TrainDataset,
cfg.TrainReader, (cfg.max_iters - start_iter), cfg.TrainReader, (cfg.max_iters - start_iter),
cfg, cfg,
devices_num=devices_num) devices_num=devices_num)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册