未验证 提交 861a16ce 编写于 作者: 小飞猪 提交者: GitHub

[xdoctest][task 293-296] reformat example code with google style in `vision` (#56458)

* [Doctest]fix No.293-296, test=docs_preview

* empty commit

* code style

* empty commit

* fix timeout

* fix timeout
上级 b32314b6
...@@ -102,102 +102,106 @@ class DatasetFolder(Dataset): ...@@ -102,102 +102,106 @@ class DatasetFolder(Dataset):
.. code-block:: python .. code-block:: python
import shutil >>> import shutil
import tempfile >>> import tempfile
import cv2 >>> import cv2
import numpy as np >>> import numpy as np
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from pathlib import Path >>> from pathlib import Path
from paddle.vision.datasets import DatasetFolder >>> from paddle.vision.datasets import DatasetFolder
def make_fake_file(img_path: str): >>> def make_fake_file(img_path: str):
if img_path.endswith((".jpg", ".png", ".jpeg")): ... if img_path.endswith((".jpg", ".png", ".jpeg")):
fake_img = np.random.randint(0, 256, (32, 32, 3), dtype=np.uint8) ... fake_img = np.random.randint(0, 256, (32, 32, 3), dtype=np.uint8)
cv2.imwrite(img_path, fake_img) ... cv2.imwrite(img_path, fake_img)
elif img_path.endswith(".txt"): ... elif img_path.endswith(".txt"):
with open(img_path, "w") as f: ... with open(img_path, "w") as f:
f.write("This is a fake file.") ... f.write("This is a fake file.")
def make_directory(root, directory_hierarchy, file_maker=make_fake_file): >>> def make_directory(root, directory_hierarchy, file_maker=make_fake_file):
root = Path(root) ... root = Path(root)
root.mkdir(parents=True, exist_ok=True) ... root.mkdir(parents=True, exist_ok=True)
for subpath in directory_hierarchy: ... for subpath in directory_hierarchy:
if isinstance(subpath, str): ... if isinstance(subpath, str):
filepath = root / subpath ... filepath = root / subpath
file_maker(str(filepath)) ... file_maker(str(filepath))
else: ... else:
dirname = list(subpath.keys())[0] ... dirname = list(subpath.keys())[0]
make_directory(root / dirname, subpath[dirname]) ... make_directory(root / dirname, subpath[dirname])
directory_hirerarchy = [ >>> directory_hirerarchy = [
{"class_0": [ ... {"class_0": [
"abc.jpg", ... "abc.jpg",
"def.png"]}, ... "def.png"]},
{"class_1": [ ... {"class_1": [
"ghi.jpeg", ... "ghi.jpeg",
"jkl.png", ... "jkl.png",
{"mno": [ ... {"mno": [
"pqr.jpeg", ... "pqr.jpeg",
"stu.jpg"]}]}, ... "stu.jpg"]}]},
"this_will_be_ignored.txt", ... "this_will_be_ignored.txt",
] ... ]
# You can replace this with any directory to explore the structure >>> # You can replace this with any directory to explore the structure
# of generated data. e.g. fake_data_dir = "./temp_dir" >>> # of generated data. e.g. fake_data_dir = "./temp_dir"
fake_data_dir = tempfile.mkdtemp() >>> fake_data_dir = tempfile.mkdtemp()
make_directory(fake_data_dir, directory_hirerarchy) >>> make_directory(fake_data_dir, directory_hirerarchy)
data_folder_1 = DatasetFolder(fake_data_dir) >>> data_folder_1 = DatasetFolder(fake_data_dir)
print(data_folder_1.classes) >>> print(data_folder_1.classes)
# ['class_0', 'class_1'] ['class_0', 'class_1']
print(data_folder_1.class_to_idx) >>> print(data_folder_1.class_to_idx)
# {'class_0': 0, 'class_1': 1} {'class_0': 0, 'class_1': 1}
print(data_folder_1.samples) >>> print(data_folder_1.samples)
# [('./temp_dir/class_0/abc.jpg', 0), ('./temp_dir/class_0/def.png', 0), >>> # doctest: +SKIP(it's different with windows)
# ('./temp_dir/class_1/ghi.jpeg', 1), ('./temp_dir/class_1/jkl.png', 1), [('./temp_dir/class_0/abc.jpg', 0), ('./temp_dir/class_0/def.png', 0),
# ('./temp_dir/class_1/mno/pqr.jpeg', 1), ('./temp_dir/class_1/mno/stu.jpg', 1)] ('./temp_dir/class_1/ghi.jpeg', 1), ('./temp_dir/class_1/jkl.png', 1),
print(data_folder_1.targets) ('./temp_dir/class_1/mno/pqr.jpeg', 1), ('./temp_dir/class_1/mno/stu.jpg', 1)]
# [0, 0, 1, 1, 1, 1] >>> # doctest: -SKIP
print(len(data_folder_1)) >>> print(data_folder_1.targets)
# 6 [0, 0, 1, 1, 1, 1]
>>> print(len(data_folder_1))
for i in range(len(data_folder_1)): 6
img, label = data_folder_1[i]
# do something with img and label >>> for i in range(len(data_folder_1)):
print(type(img), img.size, label) ... img, label = data_folder_1[i]
# <class 'PIL.Image.Image'> (32, 32) 0 ... # do something with img and label
... print(type(img), img.size, label)
... # <class 'PIL.Image.Image'> (32, 32) 0
transform = T.Compose(
[
T.Resize(64), >>> transform = T.Compose(
T.ToTensor(), ... [
T.Normalize( ... T.Resize(64),
mean=[0.5, 0.5, 0.5], ... T.ToTensor(),
std=[0.5, 0.5, 0.5], ... T.Normalize(
to_rgb=True, ... mean=[0.5, 0.5, 0.5],
), ... std=[0.5, 0.5, 0.5],
] ... to_rgb=True,
) ... ),
... ]
data_folder_2 = DatasetFolder( ... )
fake_data_dir,
loader=lambda x: cv2.imread(x), # load image with OpenCV >>> data_folder_2 = DatasetFolder(
extensions=(".jpg",), # only load *.jpg files ... fake_data_dir,
transform=transform, # apply transform to every image ... loader=lambda x: cv2.imread(x), # load image with OpenCV
) ... extensions=(".jpg",), # only load *.jpg files
... transform=transform, # apply transform to every image
print([img_path for img_path, label in data_folder_2.samples]) ... )
# ['./temp_dir/class_0/abc.jpg', './temp_dir/class_1/mno/stu.jpg']
print(len(data_folder_2)) >>> print([img_path for img_path, label in data_folder_2.samples])
# 2 >>> # doctest: +SKIP(it's different with windows)
['./temp_dir/class_0/abc.jpg', './temp_dir/class_1/mno/stu.jpg']
for img, label in iter(data_folder_2): >>> # doctest: -SKIP
# do something with img and label >>> print(len(data_folder_2))
print(type(img), img.shape, label) 2
# <class 'paddle.Tensor'> [3, 64, 64] 0
>>> for img, label in iter(data_folder_2):
shutil.rmtree(fake_data_dir) ... # do something with img and label
... print(type(img), img.shape, label)
... # <class 'paddle.Tensor'> [3, 64, 64] 0
>>> shutil.rmtree(fake_data_dir)
""" """
def __init__( def __init__(
...@@ -335,92 +339,96 @@ class ImageFolder(Dataset): ...@@ -335,92 +339,96 @@ class ImageFolder(Dataset):
.. code-block:: python .. code-block:: python
import shutil >>> import shutil
import tempfile >>> import tempfile
import cv2 >>> import cv2
import numpy as np >>> import numpy as np
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from pathlib import Path >>> from pathlib import Path
from paddle.vision.datasets import ImageFolder >>> from paddle.vision.datasets import ImageFolder
def make_fake_file(img_path: str): >>> def make_fake_file(img_path: str):
if img_path.endswith((".jpg", ".png", ".jpeg")): ... if img_path.endswith((".jpg", ".png", ".jpeg")):
fake_img = np.random.randint(0, 256, (32, 32, 3), dtype=np.uint8) ... fake_img = np.random.randint(0, 256, (32, 32, 3), dtype=np.uint8)
cv2.imwrite(img_path, fake_img) ... cv2.imwrite(img_path, fake_img)
elif img_path.endswith(".txt"): ... elif img_path.endswith(".txt"):
with open(img_path, "w") as f: ... with open(img_path, "w") as f:
f.write("This is a fake file.") ... f.write("This is a fake file.")
def make_directory(root, directory_hierarchy, file_maker=make_fake_file): >>> def make_directory(root, directory_hierarchy, file_maker=make_fake_file):
root = Path(root) ... root = Path(root)
root.mkdir(parents=True, exist_ok=True) ... root.mkdir(parents=True, exist_ok=True)
for subpath in directory_hierarchy: ... for subpath in directory_hierarchy:
if isinstance(subpath, str): ... if isinstance(subpath, str):
filepath = root / subpath ... filepath = root / subpath
file_maker(str(filepath)) ... file_maker(str(filepath))
else: ... else:
dirname = list(subpath.keys())[0] ... dirname = list(subpath.keys())[0]
make_directory(root / dirname, subpath[dirname]) ... make_directory(root / dirname, subpath[dirname])
directory_hierarchy = [ >>> directory_hierarchy = [
"abc.jpg", ... "abc.jpg",
"def.png", ... "def.png",
{"ghi": [ ... {"ghi": [
"jkl.jpeg", ... "jkl.jpeg",
{"mno": [ ... {"mno": [
"pqr.jpg"]}]}, ... "pqr.jpg"]}]},
"this_will_be_ignored.txt", ... "this_will_be_ignored.txt",
] ... ]
# You can replace this with any directory to explore the structure >>> # You can replace this with any directory to explore the structure
# of generated data. e.g. fake_data_dir = "./temp_dir" >>> # of generated data. e.g. fake_data_dir = "./temp_dir"
fake_data_dir = tempfile.mkdtemp() >>> fake_data_dir = tempfile.mkdtemp()
make_directory(fake_data_dir, directory_hierarchy) >>> make_directory(fake_data_dir, directory_hierarchy)
image_folder_1 = ImageFolder(fake_data_dir) >>> image_folder_1 = ImageFolder(fake_data_dir)
print(image_folder_1.samples) >>> print(image_folder_1.samples)
# ['./temp_dir/abc.jpg', './temp_dir/def.png', >>> # doctest: +SKIP(it's different with windows)
# './temp_dir/ghi/jkl.jpeg', './temp_dir/ghi/mno/pqr.jpg'] ['./temp_dir/abc.jpg', './temp_dir/def.png',
print(len(image_folder_1)) './temp_dir/ghi/jkl.jpeg', './temp_dir/ghi/mno/pqr.jpg']
# 4 >>> # doctest: -SKIP
>>> print(len(image_folder_1))
for i in range(len(image_folder_1)): 4
(img,) = image_folder_1[i]
# do something with img >>> for i in range(len(image_folder_1)):
print(type(img), img.size) ... (img,) = image_folder_1[i]
# <class 'PIL.Image.Image'> (32, 32) ... # do something with img
... print(type(img), img.size)
... # <class 'PIL.Image.Image'> (32, 32)
transform = T.Compose(
[
T.Resize(64), >>> transform = T.Compose(
T.ToTensor(), ... [
T.Normalize( ... T.Resize(64),
mean=[0.5, 0.5, 0.5], ... T.ToTensor(),
std=[0.5, 0.5, 0.5], ... T.Normalize(
to_rgb=True, ... mean=[0.5, 0.5, 0.5],
), ... std=[0.5, 0.5, 0.5],
] ... to_rgb=True,
) ... ),
... ]
image_folder_2 = ImageFolder( ... )
fake_data_dir,
loader=lambda x: cv2.imread(x), # load image with OpenCV >>> image_folder_2 = ImageFolder(
extensions=(".jpg",), # only load *.jpg files ... fake_data_dir,
transform=transform, # apply transform to every image ... loader=lambda x: cv2.imread(x), # load image with OpenCV
) ... extensions=(".jpg",), # only load *.jpg files
... transform=transform, # apply transform to every image
print(image_folder_2.samples) ... )
# ['./temp_dir/abc.jpg', './temp_dir/ghi/mno/pqr.jpg']
print(len(image_folder_2)) >>> print(image_folder_2.samples)
# 2 >>> # doctest: +SKIP(it's different with windows)
['./temp_dir/abc.jpg', './temp_dir/ghi/mno/pqr.jpg']
for (img,) in iter(image_folder_2): >>> # doctest: -SKIP
# do something with img >>> print(len(image_folder_2))
print(type(img), img.shape) 2
# <class 'paddle.Tensor'> [3, 64, 64]
>>> for (img,) in iter(image_folder_2):
shutil.rmtree(fake_data_dir) ... # do something with img
... print(type(img), img.shape)
... # <class 'paddle.Tensor'> [3, 64, 64]
>>> shutil.rmtree(fake_data_dir)
""" """
def __init__( def __init__(
......
...@@ -50,44 +50,44 @@ class MNIST(Dataset): ...@@ -50,44 +50,44 @@ class MNIST(Dataset):
.. code-block:: python .. code-block:: python
import itertools >>> import itertools
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST >>> from paddle.vision.datasets import MNIST
mnist = MNIST() >>> mnist = MNIST()
print(len(mnist)) >>> print(len(mnist))
# 60000 60000
for i in range(5): # only show first 5 images >>> for i in range(5): # only show first 5 images
img, label = mnist[i] ... img, label = mnist[i]
# do something with img and label ... # do something with img and label
print(type(img), img.size, label) ... print(type(img), img.size, label)
# <class 'PIL.Image.Image'> (28, 28) [5] ... # <class 'PIL.Image.Image'> (28, 28) [5]
transform = T.Compose( >>> transform = T.Compose(
[ ... [
T.ToTensor(), ... T.ToTensor(),
T.Normalize( ... T.Normalize(
mean=[127.5], ... mean=[127.5],
std=[127.5], ... std=[127.5],
), ... ),
] ... ]
) ... )
mnist_test = MNIST( >>> mnist_test = MNIST(
mode="test", ... mode="test",
transform=transform, # apply transform to every image ... transform=transform, # apply transform to every image
backend="cv2", # use OpenCV as image transform backend ... backend="cv2", # use OpenCV as image transform backend
) ... )
print(len(mnist_test)) >>> print(len(mnist_test))
# 10000 10000
for img, label in itertools.islice(iter(mnist_test), 5): # only show first 5 images >>> for img, label in itertools.islice(iter(mnist_test), 5): # only show first 5 images
# do something with img and label ... # do something with img and label
print(type(img), img.shape, label) ... print(type(img), img.shape, label)
# <class 'paddle.Tensor'> [1, 28, 28] [7] ... # <class 'paddle.Tensor'> [1, 28, 28] [7]
""" """
NAME = 'mnist' NAME = 'mnist'
...@@ -261,44 +261,44 @@ class FashionMNIST(MNIST): ...@@ -261,44 +261,44 @@ class FashionMNIST(MNIST):
.. code-block:: python .. code-block:: python
import itertools >>> import itertools
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.vision.datasets import FashionMNIST >>> from paddle.vision.datasets import FashionMNIST
fashion_mnist = FashionMNIST() >>> fashion_mnist = FashionMNIST()
print(len(fashion_mnist)) >>> print(len(fashion_mnist))
# 60000 60000
for i in range(5): # only show first 5 images >>> for i in range(5): # only show first 5 images
img, label = fashion_mnist[i] ... img, label = fashion_mnist[i]
# do something with img and label ... # do something with img and label
print(type(img), img.size, label) ... print(type(img), img.size, label)
# <class 'PIL.Image.Image'> (28, 28) [9] ... # <class 'PIL.Image.Image'> (28, 28) [9]
transform = T.Compose( >>> transform = T.Compose(
[ ... [
T.ToTensor(), ... T.ToTensor(),
T.Normalize( ... T.Normalize(
mean=[127.5], ... mean=[127.5],
std=[127.5], ... std=[127.5],
), ... ),
] ... ]
) ... )
fashion_mnist_test = FashionMNIST( >>> fashion_mnist_test = FashionMNIST(
mode="test", ... mode="test",
transform=transform, # apply transform to every image ... transform=transform, # apply transform to every image
backend="cv2", # use OpenCV as image transform backend ... backend="cv2", # use OpenCV as image transform backend
) ... )
print(len(fashion_mnist_test)) >>> print(len(fashion_mnist_test))
# 10000 10000
for img, label in itertools.islice(iter(fashion_mnist_test), 5): # only show first 5 images >>> for img, label in itertools.islice(iter(fashion_mnist_test), 5): # only show first 5 images
# do something with img and label ... # do something with img and label
print(type(img), img.shape, label) ... print(type(img), img.shape, label)
# <class 'paddle.Tensor'> [1, 28, 28] [9] ... # <class 'paddle.Tensor'> [1, 28, 28] [9]
""" """
NAME = 'fashion-mnist' NAME = 'fashion-mnist'
......
...@@ -58,49 +58,50 @@ class VOC2012(Dataset): ...@@ -58,49 +58,50 @@ class VOC2012(Dataset):
.. code-block:: python .. code-block:: python
import itertools >>> # doctest: +TIMEOUT(75)
import paddle.vision.transforms as T >>> import itertools
from paddle.vision.datasets import VOC2012 >>> import paddle.vision.transforms as T
>>> from paddle.vision.datasets import VOC2012
voc2012 = VOC2012()
print(len(voc2012)) >>> voc2012 = VOC2012()
# 2913 >>> print(len(voc2012))
2913
for i in range(5): # only show first 5 images
img, label = voc2012[i] >>> for i in range(5): # only show first 5 images
# do something with img and label ... img, label = voc2012[i]
print(type(img), img.size) ... # do something with img and label
# <class 'PIL.JpegImagePlugin.JpegImageFile'> (500, 281) ... print(type(img), img.size)
print(type(label), label.size) ... # <class 'PIL.JpegImagePlugin.JpegImageFile'> (500, 281)
# <class 'PIL.PngImagePlugin.PngImageFile'> (500, 281) ... print(type(label), label.size)
... # <class 'PIL.PngImagePlugin.PngImageFile'> (500, 281)
transform = T.Compose(
[ >>> transform = T.Compose(
T.ToTensor(), ... [
T.Normalize( ... T.ToTensor(),
mean=[0.5, 0.5, 0.5], ... T.Normalize(
std=[0.5, 0.5, 0.5], ... mean=[0.5, 0.5, 0.5],
to_rgb=True, ... std=[0.5, 0.5, 0.5],
), ... to_rgb=True,
] ... ),
) ... ]
... )
voc2012_test = VOC2012(
mode="test", >>> voc2012_test = VOC2012(
transform=transform, # apply transform to every image ... mode="test",
backend="cv2", # use OpenCV as image transform backend ... transform=transform, # apply transform to every image
) ... backend="cv2", # use OpenCV as image transform backend
print(len(voc2012_test)) ... )
# 1464 >>> print(len(voc2012_test))
1464
for img, label in itertools.islice(iter(voc2012_test), 5): # only show first 5 images
# do something with img and label >>> for img, label in itertools.islice(iter(voc2012_test), 5): # only show first 5 images
print(type(img), img.shape) ... # do something with img and label
# <class 'paddle.Tensor'> [3, 281, 500] ... print(type(img), img.shape)
print(type(label), label.shape) ... # <class 'paddle.Tensor'> [3, 281, 500]
# <class 'numpy.ndarray'> (281, 500) ... print(type(label), label.shape)
... # <class 'numpy.ndarray'> (281, 500)
""" """
def __init__( def __init__(
......
...@@ -34,51 +34,51 @@ def set_image_backend(backend): ...@@ -34,51 +34,51 @@ def set_image_backend(backend):
.. code-block:: python .. code-block:: python
import os >>> import os
import shutil >>> import shutil
import tempfile >>> import tempfile
import numpy as np >>> import numpy as np
from PIL import Image >>> from PIL import Image
from paddle.vision import DatasetFolder >>> from paddle.vision import DatasetFolder
from paddle.vision import set_image_backend >>> from paddle.vision import set_image_backend
set_image_backend('pil') >>> set_image_backend('pil')
def make_fake_dir(): >>> def make_fake_dir():
data_dir = tempfile.mkdtemp() ... data_dir = tempfile.mkdtemp()
...
... for i in range(2):
... sub_dir = os.path.join(data_dir, 'class_' + str(i))
... if not os.path.exists(sub_dir):
... os.makedirs(sub_dir)
... for j in range(2):
... fake_img = Image.fromarray((np.random.random((32, 32, 3)) * 255).astype('uint8'))
... fake_img.save(os.path.join(sub_dir, str(j) + '.png'))
... return data_dir
for i in range(2): >>> temp_dir = make_fake_dir()
sub_dir = os.path.join(data_dir, 'class_' + str(i))
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
for j in range(2):
fake_img = Image.fromarray((np.random.random((32, 32, 3)) * 255).astype('uint8'))
fake_img.save(os.path.join(sub_dir, str(j) + '.png'))
return data_dir
temp_dir = make_fake_dir() >>> pil_data_folder = DatasetFolder(temp_dir)
pil_data_folder = DatasetFolder(temp_dir) >>> for items in pil_data_folder:
... break
for items in pil_data_folder: >>> print(type(items[0]))
break <class 'PIL.Image.Image'>
# should get PIL.Image.Image >>> # use opencv as backend
print(type(items[0])) >>> set_image_backend('cv2')
# use opencv as backend >>> cv2_data_folder = DatasetFolder(temp_dir)
# set_image_backend('cv2')
# cv2_data_folder = DatasetFolder(temp_dir) >>> for items in cv2_data_folder:
... break
# for items in cv2_data_folder: >>> print(type(items[0]))
# break <class 'numpy.ndarray'>
# should get numpy.ndarray >>> shutil.rmtree(temp_dir)
# print(type(items[0]))
shutil.rmtree(temp_dir)
""" """
global _image_backend global _image_backend
if backend not in ['pil', 'cv2', 'tensor']: if backend not in ['pil', 'cv2', 'tensor']:
...@@ -101,10 +101,11 @@ def get_image_backend(): ...@@ -101,10 +101,11 @@ def get_image_backend():
.. code-block:: python .. code-block:: python
from paddle.vision import get_image_backend >>> from paddle.vision import get_image_backend
backend = get_image_backend() >>> backend = get_image_backend()
print(backend) >>> print(backend)
pil
""" """
return _image_backend return _image_backend
...@@ -126,28 +127,28 @@ def image_load(path, backend=None): ...@@ -126,28 +127,28 @@ def image_load(path, backend=None):
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
from PIL import Image >>> from PIL import Image
from paddle.vision import image_load, set_image_backend >>> from paddle.vision import image_load, set_image_backend
fake_img = Image.fromarray((np.random.random((32, 32, 3)) * 255).astype('uint8')) >>> fake_img = Image.fromarray((np.random.random((32, 32, 3)) * 255).astype('uint8'))
path = 'temp.png' >>> path = 'temp.png'
fake_img.save(path) >>> fake_img.save(path)
set_image_backend('pil') >>> set_image_backend('pil')
pil_img = image_load(path).convert('RGB') >>> pil_img = image_load(path).convert('RGB')
# should be PIL.Image.Image >>> print(type(pil_img))
print(type(pil_img)) <class 'PIL.Image.Image'>
# use opencv as backend >>> # use opencv as backend
# set_image_backend('cv2') >>> set_image_backend('cv2')
# np_img = image_load(path) >>> np_img = image_load(path)
# # should get numpy.ndarray >>> print(type(np_img))
# print(type(np_img)) <class 'numpy.ndarray'>
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册