未验证 提交 2b9eda14 编写于 作者: G Guanghua Yu 提交者: GitHub

add tools for voc dataset to coco (#1347)

上级 40ff9a63
...@@ -34,8 +34,6 @@ PaddleDetection的数据处理模块是一个Python模块,所有代码逻辑 ...@@ -34,8 +34,6 @@ PaddleDetection的数据处理模块是一个Python模块,所有代码逻辑
├── tests # 单元测试模块 ├── tests # 单元测试模块
│ ├── test_dataset.py # 对数据集解析、加载等进行单元测试 │ ├── test_dataset.py # 对数据集解析、加载等进行单元测试
│ │ ... │ │ ...
├── tools # 一些有用的工具
│ ├── x2coco.py # 将其他数据集转换为COCO数据集格式
├── transform # 数据预处理模块 ├── transform # 数据预处理模块
│ ├── batch_operators.py # 定义各类基于批量数据的预处理算子 │ ├── batch_operators.py # 定义各类基于批量数据的预处理算子
│ ├── op_helper.py # 预处理算子的辅助函数 │ ├── op_helper.py # 预处理算子的辅助函数
......
...@@ -8,7 +8,7 @@ In transfer learning, if different dataset and the number of classes is used, th ...@@ -8,7 +8,7 @@ In transfer learning, if different dataset and the number of classes is used, th
### Use custom dataset ### Use custom dataset
Transfer learning needs custom dataset and annotation in COCO-format and VOC-format is supported now. The script converts the annotation from labelme or cityscape to COCO is provided in ```ppdet/data/tools/x2coco.py```. More details please refer to [READER](READER.md). After data preparation, update the data parameters in configuration file. Transfer learning needs custom dataset and annotation in COCO-format and VOC-format is supported now. The script converts the annotation from voc, labelme or cityscape to COCO is provided in ```tools/x2coco.py```. More details please refer to [READER](READER.md). After data preparation, update the data parameters in configuration file.
1. COCO-format dataset, take [yolov3\_darknet.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/master/configs/yolov3_darknet.yml#L66) for example, modify the COCODataSet in yolov3\_reader: 1. COCO-format dataset, take [yolov3\_darknet.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/master/configs/yolov3_darknet.yml#L66) for example, modify the COCODataSet in yolov3\_reader:
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
### 选择数据 ### 选择数据
迁移学习需要使用自己的数据集,目前已支持COCO和VOC的数据标注格式,在```ppdet/data/tools/x2coco.py```中给出了labelme和cityscape标注格式转换为COCO格式的脚本,具体使用方式可以参考[自定义数据源](READER.md)。数据准备完成后,在配置文件中配置数据路径,对应修改reader中的路径参数即可。 迁移学习需要使用自己的数据集,目前已支持COCO和VOC的数据标注格式,在```tools/x2coco.py```中给出了voc、labelme和cityscape标注格式转换为COCO格式的脚本,具体使用方式可以参考[自定义数据源](READER.md)。数据准备完成后,在配置文件中配置数据路径,对应修改reader中的路径参数即可。
1. COCO数据集需要修改COCODataSet中的参数,以[yolov3\_darknet.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/master/configs/yolov3_darknet.yml#L66)为例,修改yolov3\_reader中的配置: 1. COCO数据集需要修改COCODataSet中的参数,以[yolov3\_darknet.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/master/configs/yolov3_darknet.yml#L66)为例,修改yolov3\_reader中的配置:
......
...@@ -15,26 +15,42 @@ ...@@ -15,26 +15,42 @@
### 方式一:将数据集转换为COCO格式 ### 方式一:将数据集转换为COCO格式
`./tools/`中提供了`x2coco.py`用于将labelme标注的数据集或cityscape数据集转换为COCO数据集: `./tools/`中提供了`x2coco.py`用于将voc格式数据集、labelme标注的数据集或cityscape数据集转换为COCO数据集,例如:
(1)labelmes数据转换为COCO格式:
```bash ```bash
python ./ppdet/data/tools/x2coco.py \ python tools/x2coco.py \
--dataset_type labelme \ --dataset_type labelme \
--json_input_dir ./labelme_annos/ \ --json_input_dir ./labelme_annos/ \
--image_input_dir ./labelme_imgs/ \ --image_input_dir ./labelme_imgs/ \
--output_dir ./cocome/ \ --output_dir ./cocome/ \
--train_proportion 0.8 \ --train_proportion 0.8 \
--val_proportion 0.2 \ --val_proportion 0.2 \
--test_proportion 0.0 \ --test_proportion 0.0
```
(2)voc数据转换为COCO格式:
```bash
python tools/x2coco.py \
--dataset_type voc \
--voc_anno_dir path/to/VOCdevkit/VOC2007/Annotations/ \
--voc_anno_list path/to/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt \
--voc_label_list dataset/voc/label_list.txt \
--voc_out_name voc_train.json
``` ```
**参数说明:** **参数说明:**
- `--dataset_type`:需要转换的数据格式,目前支持:’labelme‘和’cityscape‘ - `--dataset_type`:需要转换的数据格式,目前支持:’voc‘、’labelme‘和’cityscape‘
- `--json_input_dir`:使用labelme标注的json文件所在文件夹 - `--json_input_dir`:使用labelme标注的json文件所在文件夹
- `--image_input_dir`:图像文件所在文件夹 - `--image_input_dir`:图像文件所在文件夹
- `--output_dir`:转换后的COCO格式数据集存放位置 - `--output_dir`:转换后的COCO格式数据集存放位置
- `--train_proportion`:标注数据中用于train的比例 - `--train_proportion`:标注数据中用于train的比例
- `--val_proportion`:标注数据中用于validation的比例 - `--val_proportion`:标注数据中用于validation的比例
- `--test_proportion`:标注数据中用于infer的比例 - `--test_proportion`:标注数据中用于infer的比例
- `--voc_anno_dir`:VOC数据转换为COCO数据集时的voc数据集标注文件路径
- `--voc_anno_list`:VOC数据转换为COCO数据集时的标注列表文件,一般是`ImageSets/Main`下trainval.txt和test.txt文件
- `--voc_label_list`:VOC数据转换为COCO数据集时的类别列表文件,文件中每一行表示一种物体类别
- `--voc_out_name`:VOC数据转换为COCO数据集时的输出的COCO数据集格式json文件名
### 方式二:将数据集转换为VOC格式 ### 方式二:将数据集转换为VOC格式
......
...@@ -137,14 +137,14 @@ class COCODataSet(DataSet): ...@@ -137,14 +137,14 @@ class COCODataSet(DataSet):
y1 = max(0, y) y1 = max(0, y)
x2 = min(im_w - 1, x1 + max(0, box_w - 1)) x2 = min(im_w - 1, x1 + max(0, box_w - 1))
y2 = min(im_h - 1, y1 + max(0, box_h - 1)) y2 = min(im_h - 1, y1 + max(0, box_h - 1))
if inst['area'] > 0 and x2 >= x1 and y2 >= y1: if x2 >= x1 and y2 >= y1:
inst['clean_bbox'] = [x1, y1, x2, y2] inst['clean_bbox'] = [x1, y1, x2, y2]
bboxes.append(inst) bboxes.append(inst)
else: else:
logger.warn( logger.warn(
'Found an invalid bbox in annotations: im_id: {}, ' 'Found an invalid bbox in annotations: im_id: {}, '
'area: {} x1: {}, y1: {}, x2: {}, y2: {}.'.format( 'x1: {}, y1: {}, x2: {}, y2: {}.'.format(
img_id, float(inst['area']), x1, y1, x2, y2)) img_id, x1, y1, x2, y2))
num_bbox = len(bboxes) num_bbox = len(bboxes)
gt_bbox = np.zeros((num_bbox, 4), dtype=np.float32) gt_bbox = np.zeros((num_bbox, 4), dtype=np.float32)
......
此差异已折叠。
...@@ -21,6 +21,9 @@ import os ...@@ -21,6 +21,9 @@ import os
import os.path as osp import os.path as osp
import sys import sys
import shutil import shutil
import xml.etree.ElementTree as ET
from tqdm import tqdm
import re
import numpy as np import numpy as np
import PIL.ImageDraw import PIL.ImageDraw
...@@ -189,6 +192,100 @@ def deal_json(ds_type, img_path, json_path): ...@@ -189,6 +192,100 @@ def deal_json(ds_type, img_path, json_path):
return data_coco return data_coco
def voc_get_label_anno(ann_dir_path, ann_ids_path, labels_path):
with open(labels_path, 'r') as f:
labels_str = f.read().split()
labels_ids = list(range(1, len(labels_str) + 1))
with open(ann_ids_path, 'r') as f:
ann_ids = f.read().split()
ann_paths = []
for aid in ann_ids:
if aid.endswith('xml'):
ann_path = os.path.join(ann_dir_path, aid)
else:
ann_path = os.path.join(ann_dir_path, aid + '.xml')
ann_paths.append(ann_path)
return dict(zip(labels_str, labels_ids)), ann_paths
def voc_get_image_info(annotation_root, im_id):
filename = annotation_root.findtext('filename')
assert filename is not None
img_name = os.path.basename(filename)
size = annotation_root.find('size')
width = int(size.findtext('width'))
height = int(size.findtext('height'))
image_info = {
'file_name': filename,
'height': height,
'width': width,
'id': im_id
}
return image_info
def voc_get_coco_annotation(obj, label2id):
label = obj.findtext('name')
assert label in label2id, "label is not in label2id."
category_id = label2id[label]
bndbox = obj.find('bndbox')
xmin = int(bndbox.findtext('xmin')) - 1
ymin = int(bndbox.findtext('ymin')) - 1
xmax = int(bndbox.findtext('xmax'))
ymax = int(bndbox.findtext('ymax'))
assert xmax > xmin and ymax > ymin, "Box size error."
o_width = xmax - xmin
o_height = ymax - ymin
anno = {
'area': o_width * o_height,
'iscrowd': 0,
'bbox': [xmin, ymin, o_width, o_height],
'category_id': category_id,
'ignore': 0,
'segmentation': [] # This script is not for segmentation
}
return anno
def voc_xmls_to_cocojson(annotation_paths, label2id, output_dir, output_file):
output_json_dict = {
"images": [],
"type": "instances",
"annotations": [],
"categories": []
}
bnd_id = 1 # bounding box start id
im_id = 0
print('Start converting !')
for a_path in tqdm(annotation_paths):
# Read annotation xml
ann_tree = ET.parse(a_path)
ann_root = ann_tree.getroot()
img_info = voc_get_image_info(ann_root, im_id)
im_id += 1
img_id = img_info['id']
output_json_dict['images'].append(img_info)
for obj in ann_root.findall('object'):
ann = voc_get_coco_annotation(obj=obj, label2id=label2id)
ann.update({'image_id': img_id, 'id': bnd_id})
output_json_dict['annotations'].append(ann)
bnd_id = bnd_id + 1
for label, label_id in label2id.items():
category_info = {'supercategory': 'none', 'id': label_id, 'name': label}
output_json_dict['categories'].append(category_info)
output_file = os.path.join(output_dir, output_file)
with open(output_file, 'w') as f:
output_json = json.dumps(output_json_dict)
f.write(output_json)
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter) formatter_class=argparse.ArgumentDefaultsHelpFormatter)
...@@ -196,7 +293,7 @@ def main(): ...@@ -196,7 +293,7 @@ def main():
parser.add_argument('--json_input_dir', help='input annotated directory') parser.add_argument('--json_input_dir', help='input annotated directory')
parser.add_argument('--image_input_dir', help='image directory') parser.add_argument('--image_input_dir', help='image directory')
parser.add_argument( parser.add_argument(
'--output_dir', help='output dataset directory', default='../../../') '--output_dir', help='output dataset directory', default='./')
parser.add_argument( parser.add_argument(
'--train_proportion', '--train_proportion',
help='the proportion of train dataset', help='the proportion of train dataset',
...@@ -212,96 +309,137 @@ def main(): ...@@ -212,96 +309,137 @@ def main():
help='the proportion of test dataset', help='the proportion of test dataset',
type=float, type=float,
default=0.0) default=0.0)
parser.add_argument(
'--voc_anno_dir',
help='In Voc format dataset, path to annotation files directory.',
type=str,
default=None)
parser.add_argument(
'--voc_anno_list',
help='In Voc format dataset, path to annotation files ids list.',
type=str,
default=None)
parser.add_argument(
'--voc_label_list',
help='In Voc format dataset, path to label list. The content of each line is a category.',
type=str,
default=None)
parser.add_argument(
'--voc_out_name',
type=str,
default='voc.json',
help='In Voc format dataset, path to output json file')
args = parser.parse_args() args = parser.parse_args()
try: try:
assert args.dataset_type in ['labelme', 'cityscape'] assert args.dataset_type in ['voc', 'labelme', 'cityscape']
except AssertionError as e:
print('Now only support the cityscape dataset and labelme dataset!!')
os._exit(0)
try:
assert os.path.exists(args.json_input_dir)
except AssertionError as e:
print('The json folder does not exist!')
os._exit(0)
try:
assert os.path.exists(args.image_input_dir)
except AssertionError as e:
print('The image folder does not exist!')
os._exit(0)
try:
assert abs(args.train_proportion + args.val_proportion \
+ args.test_proportion - 1.0) < 1e-5
except AssertionError as e: except AssertionError as e:
print( print(
'The sum of pqoportion of training, validation and test datase must be 1!' 'Now only support the voc, cityscape dataset and labelme dataset!!')
)
os._exit(0) os._exit(0)
# Allocate the dataset. if args.dataset_type == 'voc':
total_num = len(glob.glob(osp.join(args.json_input_dir, '*.json'))) assert args.voc_anno_dir and args.voc_anno_list and args.voc_label_list
if args.train_proportion != 0: label2id, ann_paths = voc_get_label_anno(
train_num = int(total_num * args.train_proportion) args.voc_anno_dir, args.voc_anno_list, args.voc_label_list)
os.makedirs(args.output_dir + '/train') voc_xmls_to_cocojson(
annotation_paths=ann_paths,
label2id=label2id,
output_dir=args.output_dir,
output_file=args.voc_out_name)
else: else:
train_num = 0 try:
if args.val_proportion == 0.0: assert os.path.exists(args.json_input_dir)
val_num = 0 except AssertionError as e:
test_num = total_num - train_num print('The json folder does not exist!')
if args.test_proportion != 0.0: os._exit(0)
os.makedirs(args.output_dir + '/test') try:
else: assert os.path.exists(args.image_input_dir)
val_num = int(total_num * args.val_proportion) except AssertionError as e:
test_num = total_num - train_num - val_num print('The image folder does not exist!')
os.makedirs(args.output_dir + '/val') os._exit(0)
if args.test_proportion != 0.0: try:
os.makedirs(args.output_dir + '/test') assert abs(args.train_proportion + args.val_proportion \
count = 1 + args.test_proportion - 1.0) < 1e-5
for img_name in os.listdir(args.image_input_dir): except AssertionError as e:
if count <= train_num: print(
if osp.exists(args.output_dir + '/train/'): 'The sum of pqoportion of training, validation and test datase must be 1!'
shutil.copyfile( )
osp.join(args.image_input_dir, img_name), os._exit(0)
osp.join(args.output_dir + '/train/', img_name))
# Allocate the dataset.
total_num = len(glob.glob(osp.join(args.json_input_dir, '*.json')))
if args.train_proportion != 0:
train_num = int(total_num * args.train_proportion)
os.makedirs(args.output_dir + '/train')
else: else:
if count <= train_num + val_num: train_num = 0
if osp.exists(args.output_dir + '/val/'): if args.val_proportion == 0.0:
val_num = 0
test_num = total_num - train_num
if args.test_proportion != 0.0:
os.makedirs(args.output_dir + '/test')
else:
val_num = int(total_num * args.val_proportion)
test_num = total_num - train_num - val_num
os.makedirs(args.output_dir + '/val')
if args.test_proportion != 0.0:
os.makedirs(args.output_dir + '/test')
count = 1
for img_name in os.listdir(args.image_input_dir):
if count <= train_num:
if osp.exists(args.output_dir + '/train/'):
shutil.copyfile( shutil.copyfile(
osp.join(args.image_input_dir, img_name), osp.join(args.image_input_dir, img_name),
osp.join(args.output_dir + '/val/', img_name)) osp.join(args.output_dir + '/train/', img_name))
else: else:
if osp.exists(args.output_dir + '/test/'): if count <= train_num + val_num:
shutil.copyfile( if osp.exists(args.output_dir + '/val/'):
osp.join(args.image_input_dir, img_name), shutil.copyfile(
osp.join(args.output_dir + '/test/', img_name)) osp.join(args.image_input_dir, img_name),
count = count + 1 osp.join(args.output_dir + '/val/', img_name))
else:
# Deal with the json files. if osp.exists(args.output_dir + '/test/'):
if not os.path.exists(args.output_dir + '/annotations'): shutil.copyfile(
os.makedirs(args.output_dir + '/annotations') osp.join(args.image_input_dir, img_name),
if args.train_proportion != 0: osp.join(args.output_dir + '/test/', img_name))
train_data_coco = deal_json( count = count + 1
args.dataset_type, args.output_dir + '/train', args.json_input_dir)
train_json_path = osp.join(args.output_dir + '/annotations', # Deal with the json files.
'instance_train.json') if not os.path.exists(args.output_dir + '/annotations'):
json.dump( os.makedirs(args.output_dir + '/annotations')
train_data_coco, if args.train_proportion != 0:
open(train_json_path, 'w'), train_data_coco = deal_json(args.dataset_type,
indent=4, args.output_dir + '/train',
cls=MyEncoder) args.json_input_dir)
if args.val_proportion != 0: train_json_path = osp.join(args.output_dir + '/annotations',
val_data_coco = deal_json(args.dataset_type, args.output_dir + '/val', 'instance_train.json')
args.json_input_dir) json.dump(
val_json_path = osp.join(args.output_dir + '/annotations', train_data_coco,
'instance_val.json') open(train_json_path, 'w'),
json.dump( indent=4,
val_data_coco, open(val_json_path, 'w'), indent=4, cls=MyEncoder) cls=MyEncoder)
if args.test_proportion != 0: if args.val_proportion != 0:
test_data_coco = deal_json(args.dataset_type, args.output_dir + '/test', val_data_coco = deal_json(args.dataset_type,
args.json_input_dir) args.output_dir + '/val',
test_json_path = osp.join(args.output_dir + '/annotations', args.json_input_dir)
'instance_test.json') val_json_path = osp.join(args.output_dir + '/annotations',
json.dump( 'instance_val.json')
test_data_coco, open(test_json_path, 'w'), indent=4, cls=MyEncoder) json.dump(
val_data_coco,
open(val_json_path, 'w'),
indent=4,
cls=MyEncoder)
if args.test_proportion != 0:
test_data_coco = deal_json(args.dataset_type,
args.output_dir + '/test',
args.json_input_dir)
test_json_path = osp.join(args.output_dir + '/annotations',
'instance_test.json')
json.dump(
test_data_coco,
open(test_json_path, 'w'),
indent=4,
cls=MyEncoder)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册