提交 e4784c9c 编写于 作者: L LaraStuStu

dataannotation

上级 af74bc3a
# Instance Segmentation Example
## Annotation
```bash
labelme data_annotated --labels labels.txt --nodata
labelme data_annotated --labels labels.txt --nodata --labelflags '{.*: [occluded, truncated], person-\d+: [male]}'
```
![](.readme/annotation.jpg)
## Convert to VOC-format Dataset
```bash
# It generates:
# - data_dataset_voc/JPEGImages
# - data_dataset_voc/SegmentationClass
# - data_dataset_voc/SegmentationClassVisualization
# - data_dataset_voc/SegmentationObject
# - data_dataset_voc/SegmentationObjectVisualization
./labelme2voc.py data_annotated data_dataset_voc --labels labels.txt
```
<img src="data_dataset_voc/JPEGImages/2011_000003.jpg" width="33%" /> <img src="data_dataset_voc/SegmentationClassVisualization/2011_000003.jpg" width="33%" /> <img src="data_dataset_voc/SegmentationObjectVisualization/2011_000003.jpg" width="33%" />
Fig 1. JPEG image (left), JPEG class label visualization (center), JPEG instance label visualization (right)
Note that the label file contains only very low label values (ex. `0, 4, 14`), and
`255` indicates the `__ignore__` label value (`-1` in the npy file).
You can see the label PNG file by following.
```bash
labelme_draw_label_png data_dataset_voc/SegmentationClassPNG/2011_000003.png # left
labelme_draw_label_png data_dataset_voc/SegmentationObjectPNG/2011_000003.png # right
```
<img src=".readme/draw_label_png_class.jpg" width="33%" /> <img src=".readme/draw_label_png_object.jpg" width="33%" />
## Convert to COCO-format Dataset
```bash
# It generates:
# - data_dataset_coco/JPEGImages
# - data_dataset_coco/annotations.json
./labelme2coco.py data_annotated data_dataset_coco --labels labels.txt
```
#!/usr/bin/env python
import argparse
import collections
import datetime
import glob
import json
import os
import os.path as osp
import sys
import numpy as np
import PIL.Image
import labelme
try:
import pycocotools.mask
except ImportError:
print('Please install pycocotools:\n\n pip install pycocotools\n')
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('input_dir', help='input annotated directory')
parser.add_argument('output_dir', help='output dataset directory')
parser.add_argument('--labels', help='labels file', required=True)
args = parser.parse_args()
if osp.exists(args.output_dir):
print('Output directory already exists:', args.output_dir)
sys.exit(1)
os.makedirs(args.output_dir)
os.makedirs(osp.join(args.output_dir, 'JPEGImages'))
print('Creating dataset:', args.output_dir)
now = datetime.datetime.now()
data = dict(
info=dict(
description=None,
url=None,
version=None,
year=now.year,
contributor=None,
date_created=now.strftime('%Y-%m-%d %H:%M:%S.%f'),
),
licenses=[dict(
url=None,
id=0,
name=None,
)],
images=[
# license, url, file_name, height, width, date_captured, id
],
type='instances',
annotations=[
# segmentation, area, iscrowd, image_id, bbox, category_id, id
],
categories=[
# supercategory, id, name
],
)
class_name_to_id = {}
for i, line in enumerate(open(args.labels).readlines()):
class_id = i - 1 # starts with -1
class_name = line.strip()
if class_id == -1:
assert class_name == '__ignore__'
continue
class_name_to_id[class_name] = class_id
data['categories'].append(dict(
supercategory=None,
id=class_id,
name=class_name,
))
out_ann_file = osp.join(args.output_dir, 'annotations.json')
label_files = glob.glob(osp.join(args.input_dir, '*.json'))
for image_id, label_file in enumerate(label_files):
print('Generating dataset from:', label_file)
with open(label_file) as f:
label_data = json.load(f)
base = osp.splitext(osp.basename(label_file))[0]
out_img_file = osp.join(
args.output_dir, 'JPEGImages', base + '.jpg'
)
img_file = osp.join(
osp.dirname(label_file), label_data['imagePath']
)
img = np.asarray(PIL.Image.open(img_file))
PIL.Image.fromarray(img).save(out_img_file)
data['images'].append(dict(
license=0,
url=None,
file_name=osp.relpath(out_img_file, osp.dirname(out_ann_file)),
height=img.shape[0],
width=img.shape[1],
date_captured=None,
id=image_id,
))
masks = {} # for area
segmentations = collections.defaultdict(list) # for segmentation
for shape in label_data['shapes']:
points = shape['points']
label = shape['label']
shape_type = shape.get('shape_type', None)
mask = labelme.utils.shape_to_mask(
img.shape[:2], points, shape_type
)
if label in masks:
masks[label] = masks[label] | mask
else:
masks[label] = mask
points = np.asarray(points).flatten().tolist()
segmentations[label].append(points)
for label, mask in masks.items():
cls_name = label.split('-')[0]
if cls_name not in class_name_to_id:
continue
cls_id = class_name_to_id[cls_name]
mask = np.asfortranarray(mask.astype(np.uint8))
mask = pycocotools.mask.encode(mask)
area = float(pycocotools.mask.area(mask))
bbox = pycocotools.mask.toBbox(mask).flatten().tolist()
data['annotations'].append(dict(
id=len(data['annotations']),
image_id=image_id,
category_id=cls_id,
segmentation=segmentations[label],
area=area,
bbox=bbox,
iscrowd=0,
))
with open(out_ann_file, 'w') as f:
json.dump(data, f)
if __name__ == '__main__':
main()
#!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import json
import os
import os.path as osp
import sys
import numpy as np
import PIL.Image
import labelme
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('input_dir', help='input annotated directory')
parser.add_argument('output_dir', help='output dataset directory')
parser.add_argument('--labels', help='labels file', required=True)
args = parser.parse_args()
if osp.exists(args.output_dir):
print('Output directory already exists:', args.output_dir)
sys.exit(1)
os.makedirs(args.output_dir)
os.makedirs(osp.join(args.output_dir, 'JPEGImages'))
os.makedirs(osp.join(args.output_dir, 'SegmentationClass'))
os.makedirs(osp.join(args.output_dir, 'SegmentationClassPNG'))
os.makedirs(osp.join(args.output_dir, 'SegmentationClassVisualization'))
os.makedirs(osp.join(args.output_dir, 'SegmentationObject'))
os.makedirs(osp.join(args.output_dir, 'SegmentationObjectPNG'))
os.makedirs(osp.join(args.output_dir, 'SegmentationObjectVisualization'))
print('Creating dataset:', args.output_dir)
class_names = []
class_name_to_id = {}
for i, line in enumerate(open(args.labels).readlines()):
class_id = i - 1 # starts with -1
class_name = line.strip()
class_name_to_id[class_name] = class_id
if class_id == -1:
assert class_name == '__ignore__'
continue
elif class_id == 0:
assert class_name == '_background_'
class_names.append(class_name)
class_names = tuple(class_names)
print('class_names:', class_names)
out_class_names_file = osp.join(args.output_dir, 'class_names.txt')
with open(out_class_names_file, 'w') as f:
f.writelines('\n'.join(class_names))
print('Saved class_names:', out_class_names_file)
colormap = labelme.utils.label_colormap(255)
for label_file in glob.glob(osp.join(args.input_dir, '*.json')):
print('Generating dataset from:', label_file)
with open(label_file) as f:
base = osp.splitext(osp.basename(label_file))[0]
out_img_file = osp.join(
args.output_dir, 'JPEGImages', base + '.jpg')
out_cls_file = osp.join(
args.output_dir, 'SegmentationClass', base + '.npy')
out_clsp_file = osp.join(
args.output_dir, 'SegmentationClassPNG', base + '.png')
out_clsv_file = osp.join(
args.output_dir,
'SegmentationClassVisualization',
base + '.jpg',
)
out_ins_file = osp.join(
args.output_dir, 'SegmentationObject', base + '.npy')
out_insp_file = osp.join(
args.output_dir, 'SegmentationObjectPNG', base + '.png')
out_insv_file = osp.join(
args.output_dir,
'SegmentationObjectVisualization',
base + '.jpg',
)
data = json.load(f)
img_file = osp.join(osp.dirname(label_file), data['imagePath'])
img = np.asarray(PIL.Image.open(img_file))
PIL.Image.fromarray(img).save(out_img_file)
cls, ins = labelme.utils.shapes_to_label(
img_shape=img.shape,
shapes=data['shapes'],
label_name_to_value=class_name_to_id,
type='instance',
)
ins[cls == -1] = 0 # ignore it.
# class label
labelme.utils.lblsave(out_clsp_file, cls)
np.save(out_cls_file, cls)
clsv = labelme.utils.draw_label(
cls, img, class_names, colormap=colormap)
PIL.Image.fromarray(clsv).save(out_clsv_file)
# instance label
labelme.utils.lblsave(out_insp_file, ins)
np.save(out_ins_file, ins)
instance_ids = np.unique(ins)
instance_names = [str(i) for i in range(max(instance_ids) + 1)]
insv = labelme.utils.draw_label(ins, img, instance_names)
PIL.Image.fromarray(insv).save(out_insv_file)
if __name__ == '__main__':
main()
__ignore__
_background_
aeroplane
bicycle
bird
boat
bottle
bus
car
cat
chair
cow
diningtable
dog
horse
motorbike
person
potted plant
sheep
sofa
train
tv/monitor
\ No newline at end of file
{
"version": "3.5.0",
"flags": {},
"shapes": [
{
"label": "rectangle",
"line_color": null,
"fill_color": null,
"points": [
[
32,
35
],
[
132,
135
]
],
"shape_type": "rectangle"
},
{
"label": "circle",
"line_color": null,
"fill_color": null,
"points": [
[
195,
84
],
[
225,
125
]
],
"shape_type": "circle"
},
{
"label": "rectangle",
"line_color": null,
"fill_color": null,
"points": [
[
391,
33
],
[
542,
135
]
],
"shape_type": "rectangle"
},
{
"label": "polygon",
"line_color": null,
"fill_color": null,
"points": [
[
69,
318
],
[
45,
403
],
[
173,
406
],
[
198,
321
]
],
"shape_type": "polygon"
},
{
"label": "line",
"line_color": null,
"fill_color": null,
"points": [
[
188,
178
],
[
160,
224
]
],
"shape_type": "line"
},
{
"label": "point",
"line_color": null,
"fill_color": null,
"points": [
[
345,
174
]
],
"shape_type": "point"
},
{
"label": "line_strip",
"line_color": null,
"fill_color": null,
"points": [
[
441,
181
],
[
403,
274
],
[
545,
275
]
],
"shape_type": "linestrip"
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": "primitives.jpg",
"imageData": null
}
\ No newline at end of file
# Semantic Segmentation Example
## Annotation
```bash
labelme data_annotated --labels labels.txt --nodata
```
![](.readme/annotation.jpg)
## Convert to VOC-format Dataset
```bash
# It generates:
# - data_dataset_voc/JPEGImages
# - data_dataset_voc/SegmentationClass
# - data_dataset_voc/SegmentationClassVisualization
./labelme2voc.py data_annotated data_dataset_voc --labels labels.txt
```
<img src="data_dataset_voc/JPEGImages/2011_000003.jpg" width="33%" /> <img src="data_dataset_voc/SegmentationClassPNG/2011_000003.png" width="33%" /> <img src="data_dataset_voc/SegmentationClassVisualization/2011_000003.jpg" width="33%" />
Fig 1. JPEG image (left), PNG label (center), JPEG label visualization (right)
Note that the label file contains only very low label values (ex. `0, 4, 14`), and
`255` indicates the `__ignore__` label value (`-1` in the npy file).
You can see the label PNG file by following.
```bash
labelme_draw_label_png data_dataset_voc/SegmentationClassPNG/2011_000003.png
```
<img src=".readme/draw_label_png.jpg" width="33%" />
{
"shapes": [
{
"label": "person",
"line_color": null,
"fill_color": null,
"points": [
[
250.8142292490119,
107.33596837944665
],
[
229.8142292490119,
119.33596837944665
],
[
221.8142292490119,
135.33596837944665
],
[
223.8142292490119,
148.33596837944665
],
[
217.8142292490119,
161.33596837944665
],
[
202.8142292490119,
168.33596837944665
],
[
192.8142292490119,
200.33596837944665
],
[
194.8142292490119,
222.33596837944665
],
[
199.8142292490119,
227.33596837944665
],
[
191.8142292490119,
234.33596837944665
],
[
197.8142292490119,
264.3359683794467
],
[
213.8142292490119,
295.3359683794467
],
[
214.8142292490119,
320.3359683794467
],
[
221.8142292490119,
327.3359683794467
],
[
235.8142292490119,
326.3359683794467
],
[
240.8142292490119,
323.3359683794467
],
[
235.8142292490119,
298.3359683794467
],
[
238.8142292490119,
287.3359683794467
],
[
234.8142292490119,
268.3359683794467
],
[
257.81422924901193,
258.3359683794467
],
[
264.81422924901193,
264.3359683794467
],
[
256.81422924901193,
273.3359683794467
],
[
259.81422924901193,
282.3359683794467
],
[
284.81422924901193,
288.3359683794467
],
[
297.81422924901193,
278.3359683794467
],
[
288.81422924901193,
270.3359683794467
],
[
281.81422924901193,
270.3359683794467
],
[
283.81422924901193,
264.3359683794467
],
[
292.81422924901193,
261.3359683794467
],
[
308.81422924901193,
236.33596837944665
],
[
313.81422924901193,
217.33596837944665
],
[
309.81422924901193,
208.33596837944665
],
[
312.81422924901193,
202.33596837944665
],
[
308.81422924901193,
185.33596837944665
],
[
291.81422924901193,
173.33596837944665
],
[
269.81422924901193,
159.33596837944665
],
[
261.81422924901193,
154.33596837944665
],
[
264.81422924901193,
142.33596837944665
],
[
273.81422924901193,
137.33596837944665
],
[
278.81422924901193,
130.33596837944665
],
[
270.81422924901193,
121.33596837944665
]
]
},
{
"label": "person",
"line_color": null,
"fill_color": null,
"points": [
[
482.81422924901193,
85.33596837944665
],
[
468.81422924901193,
90.33596837944665
],
[
460.81422924901193,
110.33596837944665
],
[
460.81422924901193,
127.33596837944665
],
[
444.81422924901193,
137.33596837944665
],
[
419.81422924901193,
153.33596837944665
],
[
410.81422924901193,
163.33596837944665
],
[
403.81422924901193,
168.33596837944665
],
[
394.81422924901193,
170.33596837944665
],
[
386.81422924901193,
168.33596837944665
],
[
386.81422924901193,
184.33596837944665
],
[
392.81422924901193,
182.33596837944665
],
[
410.81422924901193,
187.33596837944665
],
[
414.81422924901193,
192.33596837944665
],
[
437.81422924901193,
189.33596837944665
],
[
434.81422924901193,
204.33596837944665
],
[
390.81422924901193,
195.33596837944665
],
[
386.81422924901193,
195.33596837944665
],
[
387.81422924901193,
208.33596837944665
],
[
381.81422924901193,
212.33596837944665
],
[
372.81422924901193,
212.33596837944665
],
[
372.81422924901193,
216.33596837944665
],
[
400.81422924901193,
270.3359683794467
],
[
389.81422924901193,
272.3359683794467
],
[
389.81422924901193,
274.3359683794467
],
[
403.81422924901193,
282.3359683794467
],
[
444.81422924901193,
283.3359683794467
],
[
443.81422924901193,
259.3359683794467
],
[
426.81422924901193,
244.33596837944665
],
[
462.81422924901193,
256.3359683794467
],
[
474.81422924901193,
270.3359683794467
],
[
477.81422924901193,
280.3359683794467
],
[
473.81422924901193,
289.3359683794467
],
[
471.81422924901193,
296.3359683794467
],
[
472.81422924901193,
317.3359683794467
],
[
480.81422924901193,
332.3359683794467
],
[
494.81422924901193,
335.3359683794467
],
[
498.81422924901193,
329.3359683794467
],
[
494.81422924901193,
308.3359683794467
],
[
499.81422924901193,
297.3359683794467
],
[
499.81422924901193,
90.33596837944665
]
]
},
{
"label": "bottle",
"line_color": null,
"fill_color": null,
"points": [
[
374.81422924901193,
159.33596837944665
],
[
369.81422924901193,
170.33596837944665
],
[
369.81422924901193,
210.33596837944665
],
[
375.81422924901193,
212.33596837944665
],
[
387.81422924901193,
209.33596837944665
],
[
385.81422924901193,
185.33596837944665
],
[
385.81422924901193,
168.33596837944665
],
[
385.81422924901193,
165.33596837944665
],
[
382.81422924901193,
159.33596837944665
]
]
},
{
"label": "person",
"line_color": null,
"fill_color": null,
"points": [
[
370.81422924901193,
170.33596837944665
],
[
366.81422924901193,
173.33596837944665
],
[
365.81422924901193,
182.33596837944665
],
[
368.81422924901193,
185.33596837944665
]
]
},
{
"label": "__ignore__",
"line_color": null,
"fill_color": null,
"points": [
[
338.81422924901193,
266.3359683794467
],
[
313.81422924901193,
269.3359683794467
],
[
297.81422924901193,
277.3359683794467
],
[
282.81422924901193,
288.3359683794467
],
[
273.81422924901193,
302.3359683794467
],
[
272.81422924901193,
320.3359683794467
],
[
279.81422924901193,
337.3359683794467
],
[
428.81422924901193,
337.3359683794467
],
[
432.81422924901193,
316.3359683794467
],
[
423.81422924901193,
296.3359683794467
],
[
403.81422924901193,
283.3359683794467
],
[
370.81422924901193,
270.3359683794467
]
]
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": "2011_000003.jpg",
"imageData": null
}
\ No newline at end of file
{
"shapes": [
{
"label": "person",
"line_color": null,
"fill_color": null,
"points": [
[
204.936170212766,
108.56382978723406
],
[
183.936170212766,
141.56382978723406
],
[
166.936170212766,
150.56382978723406
],
[
108.93617021276599,
203.56382978723406
],
[
92.93617021276599,
228.56382978723406
],
[
95.93617021276599,
244.56382978723406
],
[
105.93617021276599,
244.56382978723406
],
[
116.93617021276599,
223.56382978723406
],
[
163.936170212766,
187.56382978723406
],
[
147.936170212766,
212.56382978723406
],
[
117.93617021276599,
222.56382978723406
],
[
108.93617021276599,
243.56382978723406
],
[
100.93617021276599,
325.56382978723406
],
[
135.936170212766,
329.56382978723406
],
[
148.936170212766,
319.56382978723406
],
[
150.936170212766,
295.56382978723406
],
[
169.936170212766,
272.56382978723406
],
[
171.936170212766,
249.56382978723406
],
[
178.936170212766,
246.56382978723406
],
[
186.936170212766,
225.56382978723406
],
[
214.936170212766,
219.56382978723406
],
[
242.936170212766,
157.56382978723406
],
[
228.936170212766,
146.56382978723406
],
[
228.936170212766,
125.56382978723406
],
[
216.936170212766,
112.56382978723406
]
]
},
{
"label": "person",
"line_color": null,
"fill_color": null,
"points": [
[
271.936170212766,
109.56382978723406
],
[
249.936170212766,
110.56382978723406
],
[
244.936170212766,
150.56382978723406
],
[
215.936170212766,
219.56382978723406
],
[
208.936170212766,
245.56382978723406
],
[
214.936170212766,
220.56382978723406
],
[
188.936170212766,
227.56382978723406
],
[
170.936170212766,
246.56382978723406
],
[
170.936170212766,
275.56382978723406
],
[
221.936170212766,
278.56382978723406
],
[
233.936170212766,
259.56382978723406
],
[
246.936170212766,
253.56382978723406
],
[
245.936170212766,
256.56382978723406
],
[
242.936170212766,
251.56382978723406
],
[
262.936170212766,
256.56382978723406
],
[
304.936170212766,
226.56382978723406
],
[
297.936170212766,
199.56382978723406
],
[
308.936170212766,
164.56382978723406
],
[
296.936170212766,
148.56382978723406
]
]
},
{
"label": "person",
"line_color": null,
"fill_color": null,
"points": [
[
308.936170212766,
115.56382978723406
],
[
298.936170212766,
145.56382978723406
],
[
309.936170212766,
166.56382978723406
],
[
297.936170212766,
200.56382978723406
],
[
305.936170212766,
228.56382978723406
],
[
262.936170212766,
258.56382978723406
],
[
252.936170212766,
284.56382978723406
],
[
272.936170212766,
291.56382978723406
],
[
281.936170212766,
250.56382978723406
],
[
326.936170212766,
235.56382978723406
],
[
351.936170212766,
239.56382978723406
],
[
365.936170212766,
223.56382978723406
],
[
371.936170212766,
187.56382978723406
],
[
353.936170212766,
168.56382978723406
],
[
344.936170212766,
143.56382978723406
],
[
336.936170212766,
115.56382978723406
]
]
},
{
"label": "chair",
"line_color": null,
"fill_color": null,
"points": [
[
308.936170212766,
242.56382978723406
],
[
281.936170212766,
251.56382978723406
],
[
270.936170212766,
287.56382978723406
],
[
174.936170212766,
275.56382978723406
],
[
148.936170212766,
296.56382978723406
],
[
150.936170212766,
319.56382978723406
],
[
159.936170212766,
328.56382978723406
],
[
164.77327127659578,
375.0
],
[
485.936170212766,
373.56382978723406
],
[
497.936170212766,
336.56382978723406
],
[
497.936170212766,
202.56382978723406
],
[
453.936170212766,
193.56382978723406
],
[
434.936170212766,
212.56382978723406
],
[
367.936170212766,
224.56382978723406
],
[
350.936170212766,
241.56382978723406
]
]
},
{
"label": "person",
"line_color": null,
"fill_color": null,
"points": [
[
425.936170212766,
82.56382978723406
],
[
404.936170212766,
109.56382978723406
],
[
400.936170212766,
114.56382978723406
],
[
437.936170212766,
114.56382978723406
],
[
448.936170212766,
102.56382978723406
],
[
446.936170212766,
91.56382978723406
]
]
},
{
"label": "__ignore__",
"line_color": null,
"fill_color": null,
"points": [
[
457.936170212766,
85.56382978723406
],
[
439.936170212766,
117.56382978723406
],
[
477.936170212766,
117.56382978723406
],
[
474.936170212766,
87.56382978723406
]
]
},
{
"label": "sofa",
"line_color": null,
"fill_color": null,
"points": [
[
183.936170212766,
140.56382978723406
],
[
125.93617021276599,
140.56382978723406
],
[
110.93617021276599,
187.56382978723406
],
[
22.936170212765987,
199.56382978723406
],
[
18.936170212765987,
218.56382978723406
],
[
22.936170212765987,
234.56382978723406
],
[
93.93617021276599,
239.56382978723406
],
[
91.93617021276599,
229.56382978723406
],
[
110.93617021276599,
203.56382978723406
]
]
},
{
"label": "sofa",
"line_color": null,
"fill_color": null,
"points": [
[
103.93617021276599,
290.56382978723406
],
[
58.93617021276599,
303.56382978723406
],
[
97.93617021276599,
311.56382978723406
]
]
},
{
"label": "sofa",
"line_color": null,
"fill_color": null,
"points": [
[
348.936170212766,
146.56382978723406
],
[
472.936170212766,
149.56382978723406
],
[
477.936170212766,
162.56382978723406
],
[
471.936170212766,
196.56382978723406
],
[
453.936170212766,
192.56382978723406
],
[
434.936170212766,
213.56382978723406
],
[
368.936170212766,
226.56382978723406
],
[
375.936170212766,
187.56382978723406
],
[
353.936170212766,
164.56382978723406
]
]
},
{
"label": "sofa",
"line_color": null,
"fill_color": null,
"points": [
[
246.936170212766,
252.56382978723406
],
[
219.936170212766,
277.56382978723406
],
[
254.936170212766,
287.56382978723406
],
[
261.936170212766,
256.56382978723406
]
]
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": "2011_000006.jpg",
"imageData": null
}
\ No newline at end of file
{
"shapes": [
{
"label": "bus",
"line_color": null,
"fill_color": null,
"points": [
[
260.936170212766,
22.563829787234056
],
[
193.936170212766,
19.563829787234056
],
[
124.93617021276599,
39.563829787234056
],
[
89.93617021276599,
101.56382978723406
],
[
81.93617021276599,
150.56382978723406
],
[
108.93617021276599,
145.56382978723406
],
[
88.93617021276599,
244.56382978723406
],
[
89.93617021276599,
322.56382978723406
],
[
116.93617021276599,
367.56382978723406
],
[
158.936170212766,
368.56382978723406
],
[
165.936170212766,
337.56382978723406
],
[
347.936170212766,
335.56382978723406
],
[
349.936170212766,
369.56382978723406
],
[
391.936170212766,
373.56382978723406
],
[
403.936170212766,
335.56382978723406
],
[
425.936170212766,
332.56382978723406
],
[
421.936170212766,
281.56382978723406
],
[
428.936170212766,
252.56382978723406
],
[
428.936170212766,
236.56382978723406
],
[
409.936170212766,
220.56382978723406
],
[
409.936170212766,
150.56382978723406
],
[
430.936170212766,
143.56382978723406
],
[
433.936170212766,
112.56382978723406
],
[
431.936170212766,
96.56382978723406
],
[
408.936170212766,
90.56382978723406
],
[
395.936170212766,
50.563829787234056
],
[
338.936170212766,
25.563829787234056
]
]
},
{
"label": "bus",
"line_color": null,
"fill_color": null,
"points": [
[
88.93617021276599,
115.56382978723406
],
[
0.9361702127659877,
96.56382978723406
],
[
0.0,
251.968085106388
],
[
0.9361702127659877,
265.56382978723406
],
[
27.936170212765987,
265.56382978723406
],
[
29.936170212765987,
283.56382978723406
],
[
63.93617021276599,
281.56382978723406
],
[
89.93617021276599,
252.56382978723406
],
[
100.93617021276599,
183.56382978723406
],
[
108.93617021276599,
145.56382978723406
],
[
81.93617021276599,
151.56382978723406
]
]
},
{
"label": "car",
"line_color": null,
"fill_color": null,
"points": [
[
413.936170212766,
168.56382978723406
],
[
497.936170212766,
168.56382978723406
],
[
497.936170212766,
256.56382978723406
],
[
431.936170212766,
258.56382978723406
],
[
430.936170212766,
236.56382978723406
],
[
408.936170212766,
218.56382978723406
]
]
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": "2011_000025.jpg",
"imageData": null
}
\ No newline at end of file
_background_
aeroplane
bicycle
bird
boat
bottle
bus
car
cat
chair
cow
diningtable
dog
horse
motorbike
person
potted plant
sheep
sofa
train
tv/monitor
\ No newline at end of file
#!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import json
import os
import os.path as osp
import sys
import numpy as np
import PIL.Image
import labelme
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('input_dir', help='input annotated directory')
parser.add_argument('output_dir', help='output dataset directory')
parser.add_argument('--labels', help='labels file', required=True)
args = parser.parse_args()
if osp.exists(args.output_dir):
print('Output directory already exists:', args.output_dir)
sys.exit(1)
os.makedirs(args.output_dir)
os.makedirs(osp.join(args.output_dir, 'JPEGImages'))
os.makedirs(osp.join(args.output_dir, 'SegmentationClass'))
os.makedirs(osp.join(args.output_dir, 'SegmentationClassPNG'))
os.makedirs(osp.join(args.output_dir, 'SegmentationClassVisualization'))
print('Creating dataset:', args.output_dir)
class_names = []
class_name_to_id = {}
for i, line in enumerate(open(args.labels).readlines()):
class_id = i - 1 # starts with -1
class_name = line.strip()
class_name_to_id[class_name] = class_id
if class_id == -1:
assert class_name == '__ignore__'
continue
elif class_id == 0:
assert class_name == '_background_'
class_names.append(class_name)
class_names = tuple(class_names)
print('class_names:', class_names)
out_class_names_file = osp.join(args.output_dir, 'class_names.txt')
with open(out_class_names_file, 'w') as f:
f.writelines('\n'.join(class_names))
print('Saved class_names:', out_class_names_file)
colormap = labelme.utils.label_colormap(255)
for label_file in glob.glob(osp.join(args.input_dir, '*.json')):
print('Generating dataset from:', label_file)
with open(label_file) as f:
base = osp.splitext(osp.basename(label_file))[0]
out_img_file = osp.join(
args.output_dir, 'JPEGImages', base + '.jpg')
out_lbl_file = osp.join(
args.output_dir, 'SegmentationClass', base + '.npy')
out_png_file = osp.join(
args.output_dir, 'SegmentationClassPNG', base + '.png')
out_viz_file = osp.join(
args.output_dir,
'SegmentationClassVisualization',
base + '.jpg',
)
data = json.load(f)
img_file = osp.join(osp.dirname(label_file), data['imagePath'])
img = np.asarray(PIL.Image.open(img_file))
PIL.Image.fromarray(img).save(out_img_file)
lbl = labelme.utils.shapes_to_label(
img_shape=img.shape,
shapes=data['shapes'],
label_name_to_value=class_name_to_id,
)
labelme.utils.lblsave(out_png_file, lbl)
np.save(out_lbl_file, lbl)
viz = labelme.utils.draw_label(
lbl, img, class_names, colormap=colormap)
PIL.Image.fromarray(viz).save(out_viz_file)
if __name__ == '__main__':
main()
__ignore__
_background_
aeroplane
bicycle
bird
boat
bottle
bus
car
cat
chair
cow
diningtable
dog
horse
motorbike
person
potted plant
sheep
sofa
train
tv/monitor
\ No newline at end of file
# Tutorial (Single Image Example)
## Annotation
```bash
labelme apc2016_obj3.jpg -O apc2016_obj3.json
```
![](.readme/annotation.jpg)
## Visualization
To view the json file quickly, you can use utility script:
```bash
labelme_draw_json apc2016_obj3.json
```
<img src=".readme/draw_json.jpg" width="70%" />
## Convert to Dataset
To convert the json to set of image and label, you can run following:
```bash
labelme_json_to_dataset apc2016_obj3.json -o apc2016_obj3_json
```
It generates standard files from the JSON file.
- [img.png](apc2016_obj3_json/img.png): Image file.
- [label.png](apc2016_obj3_json/label.png): uint8 label file.
- [label_viz.png](apc2016_obj3_json/label_viz.png): Visualization of `label.png`.
- [label_names.txt](apc2016_obj3_json/label_names.txt): Label names for values in `label.png`.
## How to load label PNG file?
Note that loading `label.png` is a bit difficult
(`scipy.misc.imread`, `skimage.io.imread` may not work correctly),
and please use `PIL.Image.open` to avoid unexpected behavior:
```python
# see load_label_png.py also.
>>> import numpy as np
>>> import PIL.Image
>>> label_png = 'apc2016_obj3_json/label.png'
>>> lbl = np.asarray(PIL.Image.open(label_png))
>>> print(lbl.dtype)
dtype('uint8')
>>> np.unique(lbl)
array([0, 1, 2, 3], dtype=uint8)
>>> lbl.shape
(907, 1210)
```
Also, you can see the label PNG file by:
```python
labelme_draw_label_png apc2016_obj3_json/label.png
```
<img src=".readme/draw_label_png.jpg" width="35%" />
label_names:
- _background_
- shelf
- highland_6539_self_stick_notes
- mead_index_cards
- kong_air_dog_squeakair_tennis_ball
_background_
shelf
highland_6539_self_stick_notes
mead_index_cards
kong_air_dog_squeakair_tennis_ball
#!/usr/bin/env python
from __future__ import print_function
import os.path as osp
import numpy as np
import PIL.Image
here = osp.dirname(osp.abspath(__file__))
def main():
label_png = osp.join(here, 'apc2016_obj3_json/label.png')
print('Loading:', label_png)
print()
lbl = np.asarray(PIL.Image.open(label_png))
labels = np.unique(lbl)
label_names_txt = osp.join(here, 'apc2016_obj3_json/label_names.txt')
label_names = [name.strip() for name in open(label_names_txt)]
print('# of labels:', len(labels))
print('# of label_names:', len(label_names))
if len(labels) != len(label_names):
print('Number of unique labels and label_names must be same.')
quit(1)
print()
print('label: label_name')
for label, label_name in zip(labels, label_names):
print('%d: %s' % (label, label_name))
if __name__ == '__main__':
main()
# Video Annotation Example
## Annotation
```bash
labelme data_annotated --labels labels.txt --nodata --keep-prev
```
<img src=".readme/00000100.jpg" width="49%" /> <img src=".readme/00000101.jpg" width="49%" />
*Fig 1. Video annotation example. A frame (left), The next frame (right).*
<img src=".readme/data_annotated.gif" width="98%" />
*Fig 2. Visualization of video semantic segmentation.*
## How to Convert a Video File to Images for Annotation?
```bash
# Download and install software for converting a video file (MP4) to images
wget https://raw.githubusercontent.com/wkentaro/dotfiles/f3c5ad1f47834818d4f123c36ed59a5943709518/local/bin/video_to_images
pip install imageio imageio-ffmpeg tqdm
python video_to_images your_video.mp4 # this creates your_video/ directory
ls your_video/
labelme your_video/
```
{
"flags": {},
"shapes": [
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
634,
204
],
[
604,
275
],
[
603,
340
],
[
622,
363
],
[
639,
363
],
[
649,
354
],
[
682,
383
],
[
733,
390
],
[
748,
364
],
[
827,
359
],
[
829,
250
],
[
800,
194
],
[
775,
185
],
[
740,
199
]
]
},
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
860,
190
],
[
997,
186
],
[
998,
305
],
[
924,
320
],
[
905,
352
],
[
877,
353
],
[
869,
245
],
[
879,
222
]
]
},
{
"label": "car",
"line_color": null,
"fill_color": null,
"points": [
[
924,
321
],
[
905,
352
],
[
909,
388
],
[
936,
404
],
[
959,
411
],
[
966,
431
],
[
1000.0,
432.0
],
[
1000.0,
306.0
]
]
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": "00000100.jpg",
"imageData": null
}
\ No newline at end of file
{
"flags": {},
"shapes": [
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
614.0,
204.0
],
[
584.0,
275.0
],
[
583.0,
340.0
],
[
602.0,
363.0
],
[
619.0,
363.0
],
[
629.0,
354.0
],
[
662.0,
383.0
],
[
713.0,
390.0
],
[
728.0,
364.0
],
[
827.0,
358.0
],
[
825.0,
249.0
],
[
801.0,
200.0
],
[
757.0,
194.0
],
[
720.0,
199.0
]
]
},
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
860.0,
190.0
],
[
997.0,
186.0
],
[
998.0,
305.0
],
[
924.0,
320.0
],
[
905.0,
352.0
],
[
877.0,
353.0
],
[
869.0,
245.0
],
[
879.0,
222.0
]
]
},
{
"label": "car",
"line_color": null,
"fill_color": null,
"points": [
[
924.0,
321.0
],
[
905.0,
352.0
],
[
909.0,
388.0
],
[
936.0,
404.0
],
[
959.0,
411.0
],
[
966.0,
431.0
],
[
1000.0,
432.0
],
[
1000.0,
306.0
]
]
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": "00000101.jpg",
"imageData": null
}
\ No newline at end of file
{
"flags": {},
"shapes": [
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
593.0,
204.0
],
[
563.0,
275.0
],
[
562.0,
340.0
],
[
581.0,
363.0
],
[
598.0,
363.0
],
[
608.0,
354.0
],
[
641.0,
383.0
],
[
692.0,
390.0
],
[
707.0,
364.0
],
[
827.0,
358.0
],
[
823.0,
243.0
],
[
802.0,
199.0
],
[
736.0,
194.0
],
[
699.0,
199.0
]
]
},
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
860.0,
190.0
],
[
997.0,
186.0
],
[
998.0,
305.0
],
[
924.0,
320.0
],
[
905.0,
352.0
],
[
877.0,
353.0
],
[
869.0,
245.0
],
[
879.0,
222.0
]
]
},
{
"label": "car",
"line_color": null,
"fill_color": null,
"points": [
[
924.0,
321.0
],
[
905.0,
352.0
],
[
909.0,
388.0
],
[
936.0,
404.0
],
[
959.0,
411.0
],
[
966.0,
431.0
],
[
1000.0,
432.0
],
[
1000.0,
306.0
]
]
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": "00000102.jpg",
"imageData": null
}
\ No newline at end of file
{
"flags": {},
"shapes": [
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
573.0,
207.0
],
[
543.0,
278.0
],
[
542.0,
343.0
],
[
561.0,
366.0
],
[
578.0,
366.0
],
[
588.0,
357.0
],
[
621.0,
386.0
],
[
672.0,
393.0
],
[
687.0,
367.0
],
[
829.0,
354.0
],
[
821.0,
236.0
],
[
801.0,
199.0
],
[
716.0,
197.0
],
[
679.0,
202.0
]
]
},
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
860.0,
190.0
],
[
997.0,
186.0
],
[
998.0,
305.0
],
[
924.0,
320.0
],
[
905.0,
352.0
],
[
877.0,
353.0
],
[
869.0,
245.0
],
[
879.0,
222.0
]
]
},
{
"label": "car",
"line_color": null,
"fill_color": null,
"points": [
[
924.0,
321.0
],
[
905.0,
352.0
],
[
909.0,
388.0
],
[
936.0,
404.0
],
[
959.0,
411.0
],
[
966.0,
431.0
],
[
1000.0,
432.0
],
[
1000.0,
306.0
]
]
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": "00000103.jpg",
"imageData": null
}
\ No newline at end of file
{
"flags": {},
"shapes": [
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
556.0,
201.0
],
[
528.0,
277.0
],
[
524.0,
342.0
],
[
528.0,
361.0
],
[
563.0,
365.0
],
[
573.0,
356.0
],
[
606.0,
385.0
],
[
657.0,
392.0
],
[
672.0,
366.0
],
[
825.0,
354.0
],
[
826.0,
238.0
],
[
801.0,
202.0
],
[
701.0,
196.0
],
[
664.0,
201.0
]
]
},
{
"label": "track",
"line_color": null,
"fill_color": null,
"points": [
[
860.0,
190.0
],
[
997.0,
186.0
],
[
998.0,
305.0
],
[
924.0,
320.0
],
[
905.0,
352.0
],
[
874.0,
354.0
],
[
869.0,
245.0
],
[
879.0,
222.0
]
]
},
{
"label": "car",
"line_color": null,
"fill_color": null,
"points": [
[
924.0,
321.0
],
[
905.0,
352.0
],
[
909.0,
388.0
],
[
936.0,
404.0
],
[
959.0,
411.0
],
[
966.0,
431.0
],
[
1000.0,
432.0
],
[
1000.0,
306.0
]
]
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": "00000104.jpg",
"imageData": null
}
\ No newline at end of file
../semantic_segmentation/labelme2voc.py
\ No newline at end of file
sudo: false
cache:
- pip
dist: trusty
language: python
python:
- '3.6'
- '2.7'
branches:
only:
- master
notifications:
email: false
install:
- true # drop pip install -r requirements.txt
script:
- pip install flake8
- flake8 .
<h1 align="center">
github2pypi
</h1>
<h4 align="center">
Utils to release Python repository on GitHub to PyPi
</h4>
<div align="center">
<a href="https://travis-ci.com/wkentaro/github2pypi"><img src="https://travis-ci.com/wkentaro/github2pypi.svg?branch=master"></a>
</div>
## Usage
### 1. Add `github2pypi` as submodule.
See [imgviz](https://github.com/wkentaro/imgviz) as an example.
```bash
git clone https://github.com/wkentaro/imgviz
cd imgviz
git submodule add https://github.com/wkentaro/github2pypi.git
```
### 2. Edit `setup.py`.
```python
import github2pypi
...
with open('README.md') as f:
# e.g., ![](examples/image.jpg) ->
# ![](https://github.com/wkentaro/imgviz/blob/master/examples/image.jpg)
long_description = github2pypi.replace_url(
slug='wkentaro/imgviz', content=f.read()
)
setup(
...
long_description=long_description,
long_description_content_type='text/markdown',
)
```
# flake8: noqa
from .replace_url import replace_url
import re
def replace_url(slug, content, branch='master'):
def repl(match):
if not match:
return
url = match.group(1)
if url.startswith('http'):
return match.group(0)
url_new = (
'https://github.com/{slug}/blob/{branch}/{url}'
.format(slug=slug, branch=branch, url=url)
)
if re.match(r'.*[\.jpg|\.png]$', url_new):
url_new += '?raw=true'
start0, end0 = match.regs[0]
start, end = match.regs[1]
start -= start0
end -= start0
res = match.group(0)
res = res[:start] + url_new + res[end:]
return res
lines = []
for line in content.splitlines():
patterns = [
r'!\[.*?\]\((.*?)\)',
r'<img.*?src="(.*?)".*?>',
r'\[.*?\]\((.*?)\)',
r'<a.*?href="(.*?)".*?>',
]
for pattern in patterns:
line = re.sub(pattern, repl, line)
lines.append(line)
return '\n'.join(lines)
# -*- mode: python -*-
# vim: ft=python
block_cipher = None
a = Analysis(
['labelme/main.py'],
pathex=['labelme'],
binaries=[],
datas=[
('labelme/config/default_config.yaml', 'labelme/config'),
('labelme/icons/*', 'labelme/icons'),
],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=['matplotlib'],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='labelme',
debug=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=False,
icon='labelme/icons/icon.ico',
)
app = BUNDLE(
exe,
name='labelme.app',
icon='labelme/icons/icon.icns',
bundle_identifier=None,
info_plist={'NSHighResolutionCapable': 'True'},
)
# flake8: noqa
import logging
import sys
from qtpy import QT_VERSION
__appname__ = 'labelme'
QT4 = QT_VERSION[0] == '4'
QT5 = QT_VERSION[0] == '5'
del QT_VERSION
PY2 = sys.version[0] == '2'
PY3 = sys.version[0] == '3'
del sys
from labelme._version import __version__
from labelme import testing
from labelme import utils
# Semantic Versioning 2.0.0: https://semver.org/
# 1. MAJOR version when you make incompatible API changes;
# 2. MINOR version when you add functionality in a backwards-compatible manner;
# 3. PATCH version when you make backwards-compatible bug fixes.
__version__ = '3.16.3'
此差异已折叠。
# flake8: noqa
from . import draw_json
from . import draw_label_png
from . import json_to_dataset
from . import on_docker
#!/usr/bin/env python
import argparse
import base64
import json
import os
import sys
import matplotlib.pyplot as plt
from labelme import utils
PY2 = sys.version_info[0] == 2
def main():
parser = argparse.ArgumentParser()
parser.add_argument('json_file')
args = parser.parse_args()
json_file = args.json_file
data = json.load(open(json_file))
if data['imageData']:
imageData = data['imageData']
else:
imagePath = os.path.join(os.path.dirname(json_file), data['imagePath'])
with open(imagePath, 'rb') as f:
imageData = f.read()
imageData = base64.b64encode(imageData).decode('utf-8')
img = utils.img_b64_to_arr(imageData)
label_name_to_value = {'_background_': 0}
for shape in sorted(data['shapes'], key=lambda x: x['label']):
label_name = shape['label']
if label_name in label_name_to_value:
label_value = label_name_to_value[label_name]
else:
label_value = len(label_name_to_value)
label_name_to_value[label_name] = label_value
lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)
label_names = [None] * (max(label_name_to_value.values()) + 1)
for name, value in label_name_to_value.items():
label_names[value] = name
lbl_viz = utils.draw_label(lbl, img, label_names)
plt.subplot(121)
plt.imshow(img)
plt.subplot(122)
plt.imshow(lbl_viz)
plt.show()
if __name__ == '__main__':
main()
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册