未验证 提交 fc1e505e 编写于 作者: L LoneRanger 提交者: GitHub

[xdoctest] reformat example code with google style in No.6-No.10 (#56146)

* fix sample code

* fix bug

* fix bug

* Update regularizer.py

* Update __init__.py

* Update decorator.py

* fix code-style
上级 34eecb0e
......@@ -33,7 +33,7 @@ items. It can be any function with no parameter that creates a iterable
.. code-block:: python
iterable = data_reader()
>>> iterable = data_reader()
Element produced from the iterable should be a **single** entry of data,
**not** a mini batch. That entry of data could be a single item, or a tuple of
......@@ -45,21 +45,21 @@ An example implementation for single item data reader creator:
.. code-block:: python
def reader_creator_random_image(width, height):
def reader():
while True:
yield numpy.random.uniform(-1, 1, size=width*height)
return reader
>>> def reader_creator_random_image(width, height):
... def reader():
... while True:
... yield numpy.random.uniform(-1, 1, size=width*height)
... return reader
An example implementation for multiple item data reader creator:
.. code-block:: python
def reader_creator_random_image_and_label(width, height, label):
def reader():
while True:
yield numpy.random.uniform(-1, 1, size=width*height), label
return reader
>>> def reader_creator_random_image_and_label(width, height, label):
... def reader():
... while True:
... yield numpy.random.uniform(-1, 1, size=width*height), label
... return reader
"""
......
......@@ -60,18 +60,20 @@ def cache(reader):
Examples:
.. code-block:: python
import paddle
def reader():
for i in range(3):
yield i
# All data is cached into memory
cached_reader = paddle.io.cache(reader)
# Output: 0 1 2
for i in cached_reader():
print(i)
>>> import paddle
>>> def reader():
... for i in range(3):
... yield i
...
>>> # All data is cached into memory
>>> cached_reader = paddle.fluid.io.cache(reader)
>>> for i in cached_reader():
... print(i)
0
1
2
"""
all_data = tuple(reader())
......@@ -103,14 +105,14 @@ def map_readers(func, *readers):
.. code-block:: python
import paddle.reader
d = {"h": 0, "i": 1}
def func(x):
return d[x]
def reader():
yield "h"
yield "i"
map_reader_result = paddle.reader.map_readers(func, reader)
>>> import paddle.reader
>>> d = {"h": 0, "i": 1}
>>> def func(x):
... return d[x]
>>> def reader():
... yield "h"
... yield "i"
>>> map_reader_result = paddle.reader.map_readers(func, reader)
"""
def reader():
......@@ -142,15 +144,14 @@ def shuffle(reader, buf_size):
Examples:
.. code-block:: python
import paddle.fluid as fluid
def reader():
for i in range(5):
yield i
shuffled_reader = fluid.io.shuffle(reader, 3)
for e in shuffled_reader():
print(e)
# outputs are 0~4 unordered arrangement
>>> # doctest: +SKIP('outputs are 0~4 unordered arrangement')
>>> def reader():
... for i in range(5):
... yield i
>>> shuffled_reader = paddle.reader.decorator.shuffle(reader, 3)
>>> for e in shuffled_reader():
... print(e)
>>> # outputs are 0~4 unordered arrangement
"""
def data_reader():
......@@ -197,27 +198,26 @@ def chain(*readers):
Examples:
.. code-block:: python
import paddle
def reader_creator_3(start):
def reader():
for i in range(start, start + 3):
yield [i, i, i]
return reader
c = paddle.reader.chain(reader_creator_3(0), reader_creator_3(10), reader_creator_3(20))
for e in c():
print(e)
# Output:
# [0, 0, 0]
# [1, 1, 1]
# [2, 2, 2]
# [10, 10, 10]
# [11, 11, 11]
# [12, 12, 12]
# [20, 20, 20]
# [21, 21, 21]
# [22, 22, 22]
>>> import paddle
>>> def reader_creator_3(start):
... def reader():
... for i in range(start, start + 3):
... yield [i, i, i]
... return reader
...
>>> c = paddle.reader.chain(reader_creator_3(0), reader_creator_3(10), reader_creator_3(20))
>>> for e in c():
... print(e)
[0, 0, 0]
[1, 1, 1]
[2, 2, 2]
[10, 10, 10]
[11, 11, 11]
[12, 12, 12]
[20, 20, 20]
[21, 21, 21]
[22, 22, 22]
"""
......@@ -257,13 +257,12 @@ def compose(*readers, **kwargs):
Examples:
.. code-block:: python
import paddle.fluid as fluid
def reader_creator_10(dur):
def reader():
for i in range(10):
yield i
return reader
reader = fluid.io.compose(reader_creator_10(0), reader_creator_10(0))
>>> def reader_creator_10(dur):
... def reader():
... for i in range(10):
... yield i
... return reader
>>> reader = paddle.reader.decorator.compose(reader_creator_10(0), reader_creator_10(0))
"""
check_alignment = kwargs.pop('check_alignment', True)
......@@ -311,18 +310,21 @@ def buffered(reader, size):
Examples:
.. code-block:: python
import paddle
def reader():
for i in range(3):
yield i
# Create a buffered reader, and the buffer size is 2.
buffered_reader = paddle.io.buffered(reader, 2)
# Output: 0 1 2
for i in buffered_reader():
print(i)
>>> import paddle
>>> def reader():
... for i in range(3):
... yield i
...
>>> # Create a buffered reader, and the buffer size is 2.
>>> buffered_reader = paddle.reader.decorator.buffered(reader, 2)
>>> # Output: 0 1 2
>>> for i in buffered_reader():
... print(i)
0
1
2
"""
class EndSignal:
......@@ -373,15 +375,17 @@ def firstn(reader, n):
Examples:
.. code-block:: python
import paddle.fluid as fluid
def reader():
for i in range(100):
yield i
firstn_reader = fluid.io.firstn(reader, 5)
for e in firstn_reader():
print(e)
# the outputs are: 0 1 2 3 4
>>> def reader():
... for i in range(100):
... yield i
>>> firstn_reader = paddle.reader.decorator.firstn(reader, 5)
>>> for e in firstn_reader():
... print(e)
0
1
2
3
4
"""
# TODO(yuyang18): Check if just drop the reader, could clean the opened
......@@ -524,59 +528,55 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000):
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
from paddle.fluid.io import multiprocess_reader
import numpy as np
sample_files = ['sample_file_1', 'sample_file_2']
def fake_input_files():
with open(sample_files[0], 'w') as f:
np.savez(f, a=np.array([1, 2]), b=np.array([3, 4]), c=np.array([5, 6]), d=np.array([7, 8]))
with open(sample_files[1], 'w') as f:
np.savez(f, a=np.array([9, 10]), b=np.array([11, 12]), c=np.array([13, 14]))
def generate_reader(file_name):
# load data file
def _impl():
data = np.load(file_name)
for item in sorted(data.files):
yield data[item],
return _impl
if __name__ == '__main__':
# generate sample input files
fake_input_files()
with fluid.program_guard(fluid.Program(), fluid.Program()):
place = fluid.CPUPlace()
# the 1st 2 is batch size
image = paddle.static.data(name='image', dtype='int64', shape=[2, 1, 2])
paddle.static.Print(image)
# print detailed tensor info of image variable
reader = fluid.io.PyReader(feed_list=[image], capacity=2)
decorated_reader = multiprocess_reader(
[generate_reader(sample_files[0]), generate_reader(sample_files[1])], False)
reader.decorate_sample_generator(decorated_reader, batch_size=2, places=[place])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for data in reader():
res = exe.run(feed=data, fetch_list=[image])
print(res[0])
# print below content in this case
# [[[1 2]], [[3 4]]]
# [[[5 6]], [[7 8]]]
# [[[9 10]], [[11 12]]]
# [13,14] will be dropped
>>> import paddle
>>> import numpy as np
>>> sample_files = ['sample_file_1', 'sample_file_2']
>>> def fake_input_files():
... with open(sample_files[0], 'wb') as f:
... np.savez(f, a=np.array([1, 2]), b=np.array([3, 4]), c=np.array([5, 6]), d=np.array([7, 8]))
... with open(sample_files[1], 'wb') as f:
... np.savez(f, a=np.array([9, 10]), b=np.array([11, 12]), c=np.array([13, 14]))
...
...
>>> def generate_reader(file_name):
... # load data file
... def _impl():
... data = np.load(file_name)
... for item in sorted(data.files):
... yield data[item],
... return _impl
...
>>> if __name__ == '__main__':
... # generate sample input files
... fake_input_files()
...
... with fluid.program_guard(fluid.Program(), fluid.Program()):
... place = fluid.CPUPlace()
... # the 1st 2 is batch size
...
... image = paddle.static.data(name='image', dtype='int64', shape=[2, 1, 2])
... paddle.static.Print(image)
... # print detailed tensor info of image variable
...
... reader = fluid.io.PyReader(feed_list=[image], capacity=2)
...
... decorated_reader = paddle.reader.multiprocess_reader(
... [generate_reader(sample_files[0]), generate_reader(sample_files[1])], False)
...
... reader.decorate_sample_generator(decorated_reader, batch_size=2, places=[place])
...
... exe = fluid.Executor(place)
... exe.run(fluid.default_startup_program())
...
... for data in reader():
... res = exe.run(feed=data, fetch_list=[image])
... print(res[0])
[[[1 2]], [[3 4]]]
[[[5 6]], [[7 8]]]
[[[9 10]], [[11 12]]]
"""
if sys.platform == 'win32':
......
......@@ -67,42 +67,42 @@ class L1Decay(WeightDecayRegularizer):
.. code-block:: python
:name: code-example1
# Example1: set Regularizer in optimizer
import paddle
from paddle.regularizer import L1Decay
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand(shape=[10, 10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
momentum = paddle.optimizer.Momentum(
learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=L1Decay(0.0001))
back = out.backward()
momentum.step()
momentum.clear_grad()
>>> # Example1: set Regularizer in optimizer
>>> import paddle
>>> from paddle.regularizer import L1Decay
>>> linear = paddle.nn.Linear(10, 10)
>>> inp = paddle.rand(shape=[10, 10], dtype="float32")
>>> out = linear(inp)
>>> loss = paddle.mean(out)
>>> beta1 = paddle.to_tensor([0.9], dtype="float32")
>>> beta2 = paddle.to_tensor([0.99], dtype="float32")
>>> momentum = paddle.optimizer.Momentum(
... learning_rate=0.1,
... parameters=linear.parameters(),
... weight_decay=L1Decay(0.0001))
>>> back = out.backward()
>>> momentum.step()
>>> momentum.clear_grad()
.. code-block:: python
:name: code-example2
# Example2: set Regularizer in parameters
# Set L1 regularization in parameters.
# Global regularizer does not take effect on my_conv2d for this case.
from paddle.nn import Conv2D
from paddle import ParamAttr
from paddle.regularizer import L2Decay
my_conv2d = Conv2D(
in_channels=10,
out_channels=10,
kernel_size=1,
stride=1,
padding=0,
weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
bias_attr=False)
>>> # Example2: set Regularizer in parameters
>>> # Set L1 regularization in parameters.
>>> # Global regularizer does not take effect on my_conv2d for this case.
>>> from paddle.nn import Conv2D
>>> from paddle import ParamAttr
>>> from paddle.regularizer import L1Decay
>>> my_conv2d = Conv2D(
... in_channels=10,
... out_channels=10,
... kernel_size=1,
... stride=1,
... padding=0,
... weight_attr=ParamAttr(regularizer=L1Decay(coeff=0.01)),
... bias_attr=False)
"""
def __init__(self, coeff=0.0):
......@@ -178,40 +178,41 @@ class L2Decay(WeightDecayRegularizer):
.. code-block:: python
:name: code-example1
# Example1: set Regularizer in optimizer
import paddle
from paddle.regularizer import L2Decay
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand(shape=[10, 10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
momentum = paddle.optimizer.Momentum(
learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=L2Decay(0.0001))
back = out.backward()
momentum.step()
momentum.clear_grad()
>>> # Example1: set Regularizer in optimizer
>>> import paddle
>>> from paddle.regularizer import L2Decay
>>> linear = paddle.nn.Linear(10, 10)
>>> inp = paddle.rand(shape=[10, 10], dtype="float32")
>>> out = linear(inp)
>>> loss = paddle.mean(out)
>>> beta1 = paddle.to_tensor([0.9], dtype="float32")
>>> beta2 = paddle.to_tensor([0.99], dtype="float32")
>>> momentum = paddle.optimizer.Momentum(
... learning_rate=0.1,
... parameters=linear.parameters(),
... weight_decay=L2Decay(0.0001))
>>> back = out.backward()
>>> momentum.step()
>>> momentum.clear_grad()
.. code-block:: python
:name: code-example2
# Example2: set Regularizer in parameters
# Set L2 regularization in parameters.
# Global regularizer does not take effect on my_conv2d for this case.
from paddle.nn import Conv2D
from paddle import ParamAttr
from paddle.regularizer import L2Decay
my_conv2d = Conv2D(
in_channels=10,
out_channels=10,
kernel_size=1,
stride=1,
padding=0,
weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
bias_attr=False)
>>> # Example2: set Regularizer in parameters
>>> # Set L2 regularization in parameters.
>>> # Global regularizer does not take effect on my_conv2d for this case.
>>> from paddle.nn import Conv2D
>>> from paddle import ParamAttr
>>> from paddle.regularizer import L2Decay
>>> my_conv2d = Conv2D(
... in_channels=10,
... out_channels=10,
... kernel_size=1,
... stride=1,
... padding=0,
... weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
... bias_attr=False)
"""
def __init__(self, coeff=0.0):
......
......@@ -51,58 +51,55 @@ def frame(x, frame_length, hop_length, axis=-1, name=None):
.. code-block:: python
import paddle
from paddle.signal import frame
# 1D
x = paddle.arange(8)
y0 = frame(x, frame_length=4, hop_length=2, axis=-1) # [4, 3]
# [[0, 2, 4],
# [1, 3, 5],
# [2, 4, 6],
# [3, 5, 7]]
y1 = frame(x, frame_length=4, hop_length=2, axis=0) # [3, 4]
# [[0, 1, 2, 3],
# [2, 3, 4, 5],
# [4, 5, 6, 7]]
# 2D
x0 = paddle.arange(16).reshape([2, 8])
y0 = frame(x0, frame_length=4, hop_length=2, axis=-1) # [2, 4, 3]
# [[[0, 2, 4],
# [1, 3, 5],
# [2, 4, 6],
# [3, 5, 7]],
#
# [[8 , 10, 12],
# [9 , 11, 13],
# [10, 12, 14],
# [11, 13, 15]]]
x1 = paddle.arange(16).reshape([8, 2])
y1 = frame(x1, frame_length=4, hop_length=2, axis=0) # [3, 4, 2]
# [[[0 , 1 ],
# [2 , 3 ],
# [4 , 5 ],
# [6 , 7 ]],
#
# [4 , 5 ],
# [6 , 7 ],
# [8 , 9 ],
# [10, 11]],
#
# [8 , 9 ],
# [10, 11],
# [12, 13],
# [14, 15]]]
# > 2D
x0 = paddle.arange(32).reshape([2, 2, 8])
y0 = frame(x0, frame_length=4, hop_length=2, axis=-1) # [2, 2, 4, 3]
x1 = paddle.arange(32).reshape([8, 2, 2])
y1 = frame(x1, frame_length=4, hop_length=2, axis=0) # [3, 4, 2, 2]
>>> import paddle
>>> from paddle import signal
>>> # 1D
>>> x = paddle.arange(8)
>>> y0 = signal.frame(x, frame_length=4, hop_length=2, axis=-1)
>>> print(y0)
Tensor(shape=[4, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 2, 4],
[1, 3, 5],
[2, 4, 6],
[3, 5, 7]])
>>> y1 = signal.frame(x, frame_length=4, hop_length=2, axis=0)
>>> print(y1)
Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 1, 2, 3],
[2, 3, 4, 5],
[4, 5, 6, 7]])
>>> # 2D
>>> x0 = paddle.arange(16).reshape([2, 8])
>>> y0 = signal.frame(x0, frame_length=4, hop_length=2, axis=-1)
>>> print(y0)
Tensor(shape=[2, 4, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[[0 , 2 , 4 ],
[1 , 3 , 5 ],
[2 , 4 , 6 ],
[3 , 5 , 7 ]],
[[8 , 10, 12],
[9 , 11, 13],
[10, 12, 14],
[11, 13, 15]]])
>>> x1 = paddle.arange(16).reshape([8, 2])
>>> y1 = signal.frame(x1, frame_length=4, hop_length=2, axis=0)
>>> print(y1.shape)
[3, 4, 2]
>>> # > 2D
>>> x0 = paddle.arange(32).reshape([2, 2, 8])
>>> y0 = signal.frame(x0, frame_length=4, hop_length=2, axis=-1)
>>> print(y0.shape)
[2, 2, 4, 3]
>>> x1 = paddle.arange(32).reshape([8, 2, 2])
>>> y1 = signal.frame(x1, frame_length=4, hop_length=2, axis=0)
>>> print(y1.shape)
[3, 4, 2, 2]
"""
if axis not in [0, -1]:
raise ValueError(f'Unexpected axis: {axis}. It should be 0 or -1.')
......@@ -169,34 +166,51 @@ def overlap_add(x, hop_length, axis=-1, name=None):
.. code-block:: python
import paddle
from paddle.signal import overlap_add
# 2D
x0 = paddle.arange(16).reshape([8, 2])
# [[0 , 1 ],
# [2 , 3 ],
# [4 , 5 ],
# [6 , 7 ],
# [8 , 9 ],
# [10, 11],
# [12, 13],
# [14, 15]]
y0 = overlap_add(x0, hop_length=2, axis=-1) # [10]
# [0 , 2 , 5 , 9 , 13, 17, 21, 25, 13, 15]
x1 = paddle.arange(16).reshape([2, 8])
# [[0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ],
# [8 , 9 , 10, 11, 12, 13, 14, 15]]
y1 = overlap_add(x1, hop_length=2, axis=0) # [10]
# [0 , 1 , 10, 12, 14, 16, 18, 20, 14, 15]
# > 2D
x0 = paddle.arange(32).reshape([2, 1, 8, 2])
y0 = overlap_add(x0, hop_length=2, axis=-1) # [2, 1, 10]
x1 = paddle.arange(32).reshape([2, 8, 1, 2])
y1 = overlap_add(x1, hop_length=2, axis=0) # [10, 1, 2]
>>> import paddle
>>> from paddle.signal import overlap_add
>>> # 2D
>>> x0 = paddle.arange(16).reshape([8, 2])
>>> print(x0)
Tensor(shape=[8, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0 , 1 ],
[2 , 3 ],
[4 , 5 ],
[6 , 7 ],
[8 , 9 ],
[10, 11],
[12, 13],
[14, 15]])
>>> y0 = overlap_add(x0, hop_length=2, axis=-1)
>>> print(y0)
Tensor(shape=[10], dtype=int64, place=Place(cpu), stop_gradient=True,
[0 , 2 , 5 , 9 , 13, 17, 21, 25, 13, 15])
>>> x1 = paddle.arange(16).reshape([2, 8])
>>> print(x1)
Tensor(shape=[2, 8], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ],
[8 , 9 , 10, 11, 12, 13, 14, 15]])
>>> y1 = overlap_add(x1, hop_length=2, axis=0)
>>> print(y1)
Tensor(shape=[10], dtype=int64, place=Place(cpu), stop_gradient=True,
[0 , 1 , 10, 12, 14, 16, 18, 20, 14, 15])
>>> # > 2D
>>> x0 = paddle.arange(32).reshape([2, 1, 8, 2])
>>> y0 = overlap_add(x0, hop_length=2, axis=-1)
>>> print(y0.shape)
[2, 1, 10]
>>> x1 = paddle.arange(32).reshape([2, 8, 1, 2])
>>> y1 = overlap_add(x1, hop_length=2, axis=0)
>>> print(y1.shape)
[10, 1, 2]
"""
if axis not in [0, -1]:
raise ValueError(f'Unexpected axis: {axis}. It should be 0 or -1.')
......@@ -289,18 +303,30 @@ def stft(
Examples:
.. code-block:: python
import paddle
from paddle.signal import stft
>>> import paddle
>>> from paddle.signal import stft
# real-valued input
x = paddle.randn([8, 48000], dtype=paddle.float64)
y1 = stft(x, n_fft=512) # [8, 257, 376]
y2 = stft(x, n_fft=512, onesided=False) # [8, 512, 376]
>>> # real-valued input
>>> x = paddle.randn([8, 48000], dtype=paddle.float64)
>>> y1 = stft(x, n_fft=512)
>>> print(y1.shape)
[8, 257, 376]
# complex input
x = paddle.randn([8, 48000], dtype=paddle.float64) + \
paddle.randn([8, 48000], dtype=paddle.float64)*1j # [8, 48000] complex128
y1 = stft(x, n_fft=512, center=False, onesided=False) # [8, 512, 372]
>>> y2 = stft(x, n_fft=512, onesided=False)
>>> print(y2.shape)
[8, 512, 376]
>>> # complex input
>>> x = paddle.randn([8, 48000], dtype=paddle.float64) + \
... paddle.randn([8, 48000], dtype=paddle.float64)*1j
>>> print(x.shape)
[8, 48000]
>>> print(x.dtype)
paddle.complex128
>>> y1 = stft(x, n_fft=512, center=False, onesided=False)
>>> print(y1.shape)
[8, 512, 372]
"""
......@@ -465,20 +491,25 @@ def istft(
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.signal import stft, istft
>>> import numpy as np
>>> import paddle
>>> from paddle.signal import stft, istft
paddle.seed(0)
>>> paddle.seed(0)
# STFT
x = paddle.randn([8, 48000], dtype=paddle.float64)
y = stft(x, n_fft=512) # [8, 257, 376]
>>> # STFT
>>> x = paddle.randn([8, 48000], dtype=paddle.float64)
>>> y = stft(x, n_fft=512)
>>> print(y.shape)
[8, 257, 376]
# ISTFT
x_ = istft(y, n_fft=512) # [8, 48000]
>>> # ISTFT
>>> x_ = istft(y, n_fft=512)
>>> print(x_.shape)
[8, 48000]
np.allclose(x, x_) # True
>>> np.allclose(x, x_)
True
"""
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'istft')
......
......@@ -27,8 +27,8 @@ def get_include():
Examples:
.. code-block:: python
import paddle
include_dir = paddle.sysconfig.get_include()
>>> import paddle
>>> include_dir = paddle.sysconfig.get_include()
"""
import paddle
......@@ -46,8 +46,8 @@ def get_lib():
Examples:
.. code-block:: python
import paddle
include_dir = paddle.sysconfig.get_lib()
>>> import paddle
>>> include_dir = paddle.sysconfig.get_lib()
"""
import paddle
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册