未验证 提交 861a16ce 编写于 作者: 小飞猪 提交者: GitHub

[xdoctest][task 293-296] reformat example code with google style in `vision` (#56458)

* [Doctest]fix No.293-296, test=docs_preview

* empty commit

* code style

* empty commit

* fix timeout

* fix timeout
上级 b32314b6
......@@ -102,102 +102,106 @@ class DatasetFolder(Dataset):
.. code-block:: python
import shutil
import tempfile
import cv2
import numpy as np
import paddle.vision.transforms as T
from pathlib import Path
from paddle.vision.datasets import DatasetFolder
def make_fake_file(img_path: str):
if img_path.endswith((".jpg", ".png", ".jpeg")):
fake_img = np.random.randint(0, 256, (32, 32, 3), dtype=np.uint8)
cv2.imwrite(img_path, fake_img)
elif img_path.endswith(".txt"):
with open(img_path, "w") as f:
f.write("This is a fake file.")
def make_directory(root, directory_hierarchy, file_maker=make_fake_file):
root = Path(root)
root.mkdir(parents=True, exist_ok=True)
for subpath in directory_hierarchy:
if isinstance(subpath, str):
filepath = root / subpath
file_maker(str(filepath))
else:
dirname = list(subpath.keys())[0]
make_directory(root / dirname, subpath[dirname])
directory_hirerarchy = [
{"class_0": [
"abc.jpg",
"def.png"]},
{"class_1": [
"ghi.jpeg",
"jkl.png",
{"mno": [
"pqr.jpeg",
"stu.jpg"]}]},
"this_will_be_ignored.txt",
]
# You can replace this with any directory to explore the structure
# of generated data. e.g. fake_data_dir = "./temp_dir"
fake_data_dir = tempfile.mkdtemp()
make_directory(fake_data_dir, directory_hirerarchy)
data_folder_1 = DatasetFolder(fake_data_dir)
print(data_folder_1.classes)
# ['class_0', 'class_1']
print(data_folder_1.class_to_idx)
# {'class_0': 0, 'class_1': 1}
print(data_folder_1.samples)
# [('./temp_dir/class_0/abc.jpg', 0), ('./temp_dir/class_0/def.png', 0),
# ('./temp_dir/class_1/ghi.jpeg', 1), ('./temp_dir/class_1/jkl.png', 1),
# ('./temp_dir/class_1/mno/pqr.jpeg', 1), ('./temp_dir/class_1/mno/stu.jpg', 1)]
print(data_folder_1.targets)
# [0, 0, 1, 1, 1, 1]
print(len(data_folder_1))
# 6
for i in range(len(data_folder_1)):
img, label = data_folder_1[i]
# do something with img and label
print(type(img), img.size, label)
# <class 'PIL.Image.Image'> (32, 32) 0
transform = T.Compose(
[
T.Resize(64),
T.ToTensor(),
T.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
to_rgb=True,
),
]
)
data_folder_2 = DatasetFolder(
fake_data_dir,
loader=lambda x: cv2.imread(x), # load image with OpenCV
extensions=(".jpg",), # only load *.jpg files
transform=transform, # apply transform to every image
)
print([img_path for img_path, label in data_folder_2.samples])
# ['./temp_dir/class_0/abc.jpg', './temp_dir/class_1/mno/stu.jpg']
print(len(data_folder_2))
# 2
for img, label in iter(data_folder_2):
# do something with img and label
print(type(img), img.shape, label)
# <class 'paddle.Tensor'> [3, 64, 64] 0
shutil.rmtree(fake_data_dir)
>>> import shutil
>>> import tempfile
>>> import cv2
>>> import numpy as np
>>> import paddle.vision.transforms as T
>>> from pathlib import Path
>>> from paddle.vision.datasets import DatasetFolder
>>> def make_fake_file(img_path: str):
... if img_path.endswith((".jpg", ".png", ".jpeg")):
... fake_img = np.random.randint(0, 256, (32, 32, 3), dtype=np.uint8)
... cv2.imwrite(img_path, fake_img)
... elif img_path.endswith(".txt"):
... with open(img_path, "w") as f:
... f.write("This is a fake file.")
>>> def make_directory(root, directory_hierarchy, file_maker=make_fake_file):
... root = Path(root)
... root.mkdir(parents=True, exist_ok=True)
... for subpath in directory_hierarchy:
... if isinstance(subpath, str):
... filepath = root / subpath
... file_maker(str(filepath))
... else:
... dirname = list(subpath.keys())[0]
... make_directory(root / dirname, subpath[dirname])
>>> directory_hirerarchy = [
... {"class_0": [
... "abc.jpg",
... "def.png"]},
... {"class_1": [
... "ghi.jpeg",
... "jkl.png",
... {"mno": [
... "pqr.jpeg",
... "stu.jpg"]}]},
... "this_will_be_ignored.txt",
... ]
>>> # You can replace this with any directory to explore the structure
>>> # of generated data. e.g. fake_data_dir = "./temp_dir"
>>> fake_data_dir = tempfile.mkdtemp()
>>> make_directory(fake_data_dir, directory_hirerarchy)
>>> data_folder_1 = DatasetFolder(fake_data_dir)
>>> print(data_folder_1.classes)
['class_0', 'class_1']
>>> print(data_folder_1.class_to_idx)
{'class_0': 0, 'class_1': 1}
>>> print(data_folder_1.samples)
>>> # doctest: +SKIP(it's different with windows)
[('./temp_dir/class_0/abc.jpg', 0), ('./temp_dir/class_0/def.png', 0),
('./temp_dir/class_1/ghi.jpeg', 1), ('./temp_dir/class_1/jkl.png', 1),
('./temp_dir/class_1/mno/pqr.jpeg', 1), ('./temp_dir/class_1/mno/stu.jpg', 1)]
>>> # doctest: -SKIP
>>> print(data_folder_1.targets)
[0, 0, 1, 1, 1, 1]
>>> print(len(data_folder_1))
6
>>> for i in range(len(data_folder_1)):
... img, label = data_folder_1[i]
... # do something with img and label
... print(type(img), img.size, label)
... # <class 'PIL.Image.Image'> (32, 32) 0
>>> transform = T.Compose(
... [
... T.Resize(64),
... T.ToTensor(),
... T.Normalize(
... mean=[0.5, 0.5, 0.5],
... std=[0.5, 0.5, 0.5],
... to_rgb=True,
... ),
... ]
... )
>>> data_folder_2 = DatasetFolder(
... fake_data_dir,
... loader=lambda x: cv2.imread(x), # load image with OpenCV
... extensions=(".jpg",), # only load *.jpg files
... transform=transform, # apply transform to every image
... )
>>> print([img_path for img_path, label in data_folder_2.samples])
>>> # doctest: +SKIP(it's different with windows)
['./temp_dir/class_0/abc.jpg', './temp_dir/class_1/mno/stu.jpg']
>>> # doctest: -SKIP
>>> print(len(data_folder_2))
2
>>> for img, label in iter(data_folder_2):
... # do something with img and label
... print(type(img), img.shape, label)
... # <class 'paddle.Tensor'> [3, 64, 64] 0
>>> shutil.rmtree(fake_data_dir)
"""
def __init__(
......@@ -335,92 +339,96 @@ class ImageFolder(Dataset):
.. code-block:: python
import shutil
import tempfile
import cv2
import numpy as np
import paddle.vision.transforms as T
from pathlib import Path
from paddle.vision.datasets import ImageFolder
def make_fake_file(img_path: str):
if img_path.endswith((".jpg", ".png", ".jpeg")):
fake_img = np.random.randint(0, 256, (32, 32, 3), dtype=np.uint8)
cv2.imwrite(img_path, fake_img)
elif img_path.endswith(".txt"):
with open(img_path, "w") as f:
f.write("This is a fake file.")
def make_directory(root, directory_hierarchy, file_maker=make_fake_file):
root = Path(root)
root.mkdir(parents=True, exist_ok=True)
for subpath in directory_hierarchy:
if isinstance(subpath, str):
filepath = root / subpath
file_maker(str(filepath))
else:
dirname = list(subpath.keys())[0]
make_directory(root / dirname, subpath[dirname])
directory_hierarchy = [
"abc.jpg",
"def.png",
{"ghi": [
"jkl.jpeg",
{"mno": [
"pqr.jpg"]}]},
"this_will_be_ignored.txt",
]
# You can replace this with any directory to explore the structure
# of generated data. e.g. fake_data_dir = "./temp_dir"
fake_data_dir = tempfile.mkdtemp()
make_directory(fake_data_dir, directory_hierarchy)
image_folder_1 = ImageFolder(fake_data_dir)
print(image_folder_1.samples)
# ['./temp_dir/abc.jpg', './temp_dir/def.png',
# './temp_dir/ghi/jkl.jpeg', './temp_dir/ghi/mno/pqr.jpg']
print(len(image_folder_1))
# 4
for i in range(len(image_folder_1)):
(img,) = image_folder_1[i]
# do something with img
print(type(img), img.size)
# <class 'PIL.Image.Image'> (32, 32)
transform = T.Compose(
[
T.Resize(64),
T.ToTensor(),
T.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
to_rgb=True,
),
]
)
image_folder_2 = ImageFolder(
fake_data_dir,
loader=lambda x: cv2.imread(x), # load image with OpenCV
extensions=(".jpg",), # only load *.jpg files
transform=transform, # apply transform to every image
)
print(image_folder_2.samples)
# ['./temp_dir/abc.jpg', './temp_dir/ghi/mno/pqr.jpg']
print(len(image_folder_2))
# 2
for (img,) in iter(image_folder_2):
# do something with img
print(type(img), img.shape)
# <class 'paddle.Tensor'> [3, 64, 64]
shutil.rmtree(fake_data_dir)
>>> import shutil
>>> import tempfile
>>> import cv2
>>> import numpy as np
>>> import paddle.vision.transforms as T
>>> from pathlib import Path
>>> from paddle.vision.datasets import ImageFolder
>>> def make_fake_file(img_path: str):
... if img_path.endswith((".jpg", ".png", ".jpeg")):
... fake_img = np.random.randint(0, 256, (32, 32, 3), dtype=np.uint8)
... cv2.imwrite(img_path, fake_img)
... elif img_path.endswith(".txt"):
... with open(img_path, "w") as f:
... f.write("This is a fake file.")
>>> def make_directory(root, directory_hierarchy, file_maker=make_fake_file):
... root = Path(root)
... root.mkdir(parents=True, exist_ok=True)
... for subpath in directory_hierarchy:
... if isinstance(subpath, str):
... filepath = root / subpath
... file_maker(str(filepath))
... else:
... dirname = list(subpath.keys())[0]
... make_directory(root / dirname, subpath[dirname])
>>> directory_hierarchy = [
... "abc.jpg",
... "def.png",
... {"ghi": [
... "jkl.jpeg",
... {"mno": [
... "pqr.jpg"]}]},
... "this_will_be_ignored.txt",
... ]
>>> # You can replace this with any directory to explore the structure
>>> # of generated data. e.g. fake_data_dir = "./temp_dir"
>>> fake_data_dir = tempfile.mkdtemp()
>>> make_directory(fake_data_dir, directory_hierarchy)
>>> image_folder_1 = ImageFolder(fake_data_dir)
>>> print(image_folder_1.samples)
>>> # doctest: +SKIP(it's different with windows)
['./temp_dir/abc.jpg', './temp_dir/def.png',
'./temp_dir/ghi/jkl.jpeg', './temp_dir/ghi/mno/pqr.jpg']
>>> # doctest: -SKIP
>>> print(len(image_folder_1))
4
>>> for i in range(len(image_folder_1)):
... (img,) = image_folder_1[i]
... # do something with img
... print(type(img), img.size)
... # <class 'PIL.Image.Image'> (32, 32)
>>> transform = T.Compose(
... [
... T.Resize(64),
... T.ToTensor(),
... T.Normalize(
... mean=[0.5, 0.5, 0.5],
... std=[0.5, 0.5, 0.5],
... to_rgb=True,
... ),
... ]
... )
>>> image_folder_2 = ImageFolder(
... fake_data_dir,
... loader=lambda x: cv2.imread(x), # load image with OpenCV
... extensions=(".jpg",), # only load *.jpg files
... transform=transform, # apply transform to every image
... )
>>> print(image_folder_2.samples)
>>> # doctest: +SKIP(it's different with windows)
['./temp_dir/abc.jpg', './temp_dir/ghi/mno/pqr.jpg']
>>> # doctest: -SKIP
>>> print(len(image_folder_2))
2
>>> for (img,) in iter(image_folder_2):
... # do something with img
... print(type(img), img.shape)
... # <class 'paddle.Tensor'> [3, 64, 64]
>>> shutil.rmtree(fake_data_dir)
"""
def __init__(
......
......@@ -50,44 +50,44 @@ class MNIST(Dataset):
.. code-block:: python
import itertools
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
mnist = MNIST()
print(len(mnist))
# 60000
for i in range(5): # only show first 5 images
img, label = mnist[i]
# do something with img and label
print(type(img), img.size, label)
# <class 'PIL.Image.Image'> (28, 28) [5]
transform = T.Compose(
[
T.ToTensor(),
T.Normalize(
mean=[127.5],
std=[127.5],
),
]
)
mnist_test = MNIST(
mode="test",
transform=transform, # apply transform to every image
backend="cv2", # use OpenCV as image transform backend
)
print(len(mnist_test))
# 10000
for img, label in itertools.islice(iter(mnist_test), 5): # only show first 5 images
# do something with img and label
print(type(img), img.shape, label)
# <class 'paddle.Tensor'> [1, 28, 28] [7]
>>> import itertools
>>> import paddle.vision.transforms as T
>>> from paddle.vision.datasets import MNIST
>>> mnist = MNIST()
>>> print(len(mnist))
60000
>>> for i in range(5): # only show first 5 images
... img, label = mnist[i]
... # do something with img and label
... print(type(img), img.size, label)
... # <class 'PIL.Image.Image'> (28, 28) [5]
>>> transform = T.Compose(
... [
... T.ToTensor(),
... T.Normalize(
... mean=[127.5],
... std=[127.5],
... ),
... ]
... )
>>> mnist_test = MNIST(
... mode="test",
... transform=transform, # apply transform to every image
... backend="cv2", # use OpenCV as image transform backend
... )
>>> print(len(mnist_test))
10000
>>> for img, label in itertools.islice(iter(mnist_test), 5): # only show first 5 images
... # do something with img and label
... print(type(img), img.shape, label)
... # <class 'paddle.Tensor'> [1, 28, 28] [7]
"""
NAME = 'mnist'
......@@ -261,44 +261,44 @@ class FashionMNIST(MNIST):
.. code-block:: python
import itertools
import paddle.vision.transforms as T
from paddle.vision.datasets import FashionMNIST
fashion_mnist = FashionMNIST()
print(len(fashion_mnist))
# 60000
for i in range(5): # only show first 5 images
img, label = fashion_mnist[i]
# do something with img and label
print(type(img), img.size, label)
# <class 'PIL.Image.Image'> (28, 28) [9]
transform = T.Compose(
[
T.ToTensor(),
T.Normalize(
mean=[127.5],
std=[127.5],
),
]
)
fashion_mnist_test = FashionMNIST(
mode="test",
transform=transform, # apply transform to every image
backend="cv2", # use OpenCV as image transform backend
)
print(len(fashion_mnist_test))
# 10000
for img, label in itertools.islice(iter(fashion_mnist_test), 5): # only show first 5 images
# do something with img and label
print(type(img), img.shape, label)
# <class 'paddle.Tensor'> [1, 28, 28] [9]
>>> import itertools
>>> import paddle.vision.transforms as T
>>> from paddle.vision.datasets import FashionMNIST
>>> fashion_mnist = FashionMNIST()
>>> print(len(fashion_mnist))
60000
>>> for i in range(5): # only show first 5 images
... img, label = fashion_mnist[i]
... # do something with img and label
... print(type(img), img.size, label)
... # <class 'PIL.Image.Image'> (28, 28) [9]
>>> transform = T.Compose(
... [
... T.ToTensor(),
... T.Normalize(
... mean=[127.5],
... std=[127.5],
... ),
... ]
... )
>>> fashion_mnist_test = FashionMNIST(
... mode="test",
... transform=transform, # apply transform to every image
... backend="cv2", # use OpenCV as image transform backend
... )
>>> print(len(fashion_mnist_test))
10000
>>> for img, label in itertools.islice(iter(fashion_mnist_test), 5): # only show first 5 images
... # do something with img and label
... print(type(img), img.shape, label)
... # <class 'paddle.Tensor'> [1, 28, 28] [9]
"""
NAME = 'fashion-mnist'
......
......@@ -58,49 +58,50 @@ class VOC2012(Dataset):
.. code-block:: python
import itertools
import paddle.vision.transforms as T
from paddle.vision.datasets import VOC2012
voc2012 = VOC2012()
print(len(voc2012))
# 2913
for i in range(5): # only show first 5 images
img, label = voc2012[i]
# do something with img and label
print(type(img), img.size)
# <class 'PIL.JpegImagePlugin.JpegImageFile'> (500, 281)
print(type(label), label.size)
# <class 'PIL.PngImagePlugin.PngImageFile'> (500, 281)
transform = T.Compose(
[
T.ToTensor(),
T.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
to_rgb=True,
),
]
)
voc2012_test = VOC2012(
mode="test",
transform=transform, # apply transform to every image
backend="cv2", # use OpenCV as image transform backend
)
print(len(voc2012_test))
# 1464
for img, label in itertools.islice(iter(voc2012_test), 5): # only show first 5 images
# do something with img and label
print(type(img), img.shape)
# <class 'paddle.Tensor'> [3, 281, 500]
print(type(label), label.shape)
# <class 'numpy.ndarray'> (281, 500)
>>> # doctest: +TIMEOUT(75)
>>> import itertools
>>> import paddle.vision.transforms as T
>>> from paddle.vision.datasets import VOC2012
>>> voc2012 = VOC2012()
>>> print(len(voc2012))
2913
>>> for i in range(5): # only show first 5 images
... img, label = voc2012[i]
... # do something with img and label
... print(type(img), img.size)
... # <class 'PIL.JpegImagePlugin.JpegImageFile'> (500, 281)
... print(type(label), label.size)
... # <class 'PIL.PngImagePlugin.PngImageFile'> (500, 281)
>>> transform = T.Compose(
... [
... T.ToTensor(),
... T.Normalize(
... mean=[0.5, 0.5, 0.5],
... std=[0.5, 0.5, 0.5],
... to_rgb=True,
... ),
... ]
... )
>>> voc2012_test = VOC2012(
... mode="test",
... transform=transform, # apply transform to every image
... backend="cv2", # use OpenCV as image transform backend
... )
>>> print(len(voc2012_test))
1464
>>> for img, label in itertools.islice(iter(voc2012_test), 5): # only show first 5 images
... # do something with img and label
... print(type(img), img.shape)
... # <class 'paddle.Tensor'> [3, 281, 500]
... print(type(label), label.shape)
... # <class 'numpy.ndarray'> (281, 500)
"""
def __init__(
......
......@@ -34,51 +34,51 @@ def set_image_backend(backend):
.. code-block:: python
import os
import shutil
import tempfile
import numpy as np
from PIL import Image
>>> import os
>>> import shutil
>>> import tempfile
>>> import numpy as np
>>> from PIL import Image
from paddle.vision import DatasetFolder
from paddle.vision import set_image_backend
>>> from paddle.vision import DatasetFolder
>>> from paddle.vision import set_image_backend
set_image_backend('pil')
>>> set_image_backend('pil')
def make_fake_dir():
data_dir = tempfile.mkdtemp()
>>> def make_fake_dir():
... data_dir = tempfile.mkdtemp()
...
... for i in range(2):
... sub_dir = os.path.join(data_dir, 'class_' + str(i))
... if not os.path.exists(sub_dir):
... os.makedirs(sub_dir)
... for j in range(2):
... fake_img = Image.fromarray((np.random.random((32, 32, 3)) * 255).astype('uint8'))
... fake_img.save(os.path.join(sub_dir, str(j) + '.png'))
... return data_dir
for i in range(2):
sub_dir = os.path.join(data_dir, 'class_' + str(i))
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
for j in range(2):
fake_img = Image.fromarray((np.random.random((32, 32, 3)) * 255).astype('uint8'))
fake_img.save(os.path.join(sub_dir, str(j) + '.png'))
return data_dir
>>> temp_dir = make_fake_dir()
temp_dir = make_fake_dir()
>>> pil_data_folder = DatasetFolder(temp_dir)
pil_data_folder = DatasetFolder(temp_dir)
>>> for items in pil_data_folder:
... break
for items in pil_data_folder:
break
>>> print(type(items[0]))
<class 'PIL.Image.Image'>
# should get PIL.Image.Image
print(type(items[0]))
>>> # use opencv as backend
>>> set_image_backend('cv2')
# use opencv as backend
# set_image_backend('cv2')
>>> cv2_data_folder = DatasetFolder(temp_dir)
# cv2_data_folder = DatasetFolder(temp_dir)
>>> for items in cv2_data_folder:
... break
# for items in cv2_data_folder:
# break
>>> print(type(items[0]))
<class 'numpy.ndarray'>
# should get numpy.ndarray
# print(type(items[0]))
shutil.rmtree(temp_dir)
>>> shutil.rmtree(temp_dir)
"""
global _image_backend
if backend not in ['pil', 'cv2', 'tensor']:
......@@ -101,10 +101,11 @@ def get_image_backend():
.. code-block:: python
from paddle.vision import get_image_backend
>>> from paddle.vision import get_image_backend
backend = get_image_backend()
print(backend)
>>> backend = get_image_backend()
>>> print(backend)
pil
"""
return _image_backend
......@@ -126,28 +127,28 @@ def image_load(path, backend=None):
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision import image_load, set_image_backend
>>> import numpy as np
>>> from PIL import Image
>>> from paddle.vision import image_load, set_image_backend
fake_img = Image.fromarray((np.random.random((32, 32, 3)) * 255).astype('uint8'))
>>> fake_img = Image.fromarray((np.random.random((32, 32, 3)) * 255).astype('uint8'))
path = 'temp.png'
fake_img.save(path)
>>> path = 'temp.png'
>>> fake_img.save(path)
set_image_backend('pil')
>>> set_image_backend('pil')
pil_img = image_load(path).convert('RGB')
>>> pil_img = image_load(path).convert('RGB')
# should be PIL.Image.Image
print(type(pil_img))
>>> print(type(pil_img))
<class 'PIL.Image.Image'>
# use opencv as backend
# set_image_backend('cv2')
>>> # use opencv as backend
>>> set_image_backend('cv2')
# np_img = image_load(path)
# # should get numpy.ndarray
# print(type(np_img))
>>> np_img = image_load(path)
>>> print(type(np_img))
<class 'numpy.ndarray'>
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册