未验证 提交 0c3b3698 编写于 作者: N Nyakku Shigure 提交者: GitHub

[xdoctest] reformat example code with google style in `paddle/jit` (#55645)

* [xdoctest] reformat example code for paddle.jit.api

* test=docs_preview

* add some ... for decorator

* skip some example, test=docs_preview

* add ..., test=docs_preview

* skip some test, test=docs_preview

* more jit files, test=docs_preview

* remove some empty lines, test=docs_preview

* format program translator, test=docs_preview

* remove a blank line, test=docs_preview

* skip translated layer.program, test=docs_preview

* fix doc format, test=docs_preview
上级 eee4b8fb
此差异已折叠。
......@@ -162,27 +162,28 @@ def convert_call(func):
Examples:
.. code-block:: python
import paddle
from paddle.jit.dy2static import Call
paddle.enable_static()
def dyfunc(x):
if paddle.mean(x) < 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
new_func = Call(dyfunc)
x = paddle.tensor.manipulation.fill_constant(shape=[3, 3], value=0, dtype='float64')
x_v = new_func(x)
exe = paddle.static.Executor(paddle.CPUPlace())
out = exe.run(fetch_list=[x_v])
print(out[0])
# [[1. 1. 1.]
# [1. 1. 1.]
# [1. 1. 1.]]
>>> # doctest: +SKIP
>>> import paddle
>>> from paddle.jit.dy2static import Call
>>> paddle.enable_static()
>>> def dyfunc(x):
... if paddle.mean(x) < 0:
... x_v = x - 1
... else:
... x_v = x + 1
... return x_v
...
>>> new_func = Call(dyfunc)
>>> x = paddle.tensor.manipulation.fill_constant(shape=[3, 3], value=0, dtype='float64')
>>> x_v = new_func(x)
>>> exe = paddle.static.Executor(paddle.CPUPlace())
>>> out = exe.run(fetch_list=[x_v])
>>> print(out[0])
[[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]
"""
translator_logger.log(1, f"Convert callable object: convert {func}.")
......
......@@ -206,14 +206,14 @@ def set_verbosity(level=0, also_to_stdout=False):
Examples:
.. code-block:: python
import os
import paddle
>>> import os
>>> import paddle
paddle.jit.set_verbosity(1)
# The verbosity level is now 1
>>> paddle.jit.set_verbosity(1)
>>> # The verbosity level is now 1
os.environ['TRANSLATOR_VERBOSITY'] = '3'
# The verbosity level is now 3, but it has no effect because it has a lower priority than `set_verbosity`
>>> os.environ['TRANSLATOR_VERBOSITY'] = '3'
>>> # The verbosity level is now 3, but it has no effect because it has a lower priority than `set_verbosity`
"""
_TRANSLATOR_LOGGER.verbosity_level = level
_TRANSLATOR_LOGGER.need_to_echo_log_to_stdout = also_to_stdout
......@@ -244,14 +244,15 @@ def set_code_level(level=LOG_AllTransformer, also_to_stdout=False):
Examples:
.. code-block:: python
import paddle
>>> import os
>>> import paddle
paddle.jit.set_code_level(2)
# It will print the transformed code at level 2, which means to print the code after second transformer,
# as the date of August 28, 2020, it is CastTransformer.
>>> paddle.jit.set_code_level(2)
>>> # It will print the transformed code at level 2, which means to print the code after second transformer,
>>> # as the date of August 28, 2020, it is CastTransformer.
os.environ['TRANSLATOR_CODE_LEVEL'] = '3'
# The code level is now 3, but it has no effect because it has a lower priority than `set_code_level`
>>> os.environ['TRANSLATOR_CODE_LEVEL'] = '3'
>>> # The code level is now 3, but it has no effect because it has a lower priority than `set_code_level`
"""
_TRANSLATOR_LOGGER.transformed_code_level = level
......
......@@ -641,24 +641,25 @@ class StaticFunction:
Examples:
.. code-block:: python
import paddle
from paddle.jit import to_static
from paddle.static import InputSpec
paddle.disable_static()
def foo(x, y):
z = x + y
return z
# usage 1:
decorated_foo = to_static(foo, input_spec=[InputSpec([10], name='x'), InputSpec([10], name='y')])
print(decorated_foo.concrete_program)
# usage 2:
decorated_foo = to_static(foo)
out_foo = decorated_foo(paddle.rand([10]), paddle.rand([10]))
print(decorated_foo.concrete_program)
>>> # doctest: +SKIP
>>> import paddle
>>> from paddle.jit import to_static
>>> from paddle.static import InputSpec
>>> paddle.disable_static()
>>> def foo(x, y):
... z = x + y
... return z
...
>>> # usage 1:
>>> decorated_foo = to_static(foo, input_spec=[InputSpec([10], name='x'), InputSpec([10], name='y')])
>>> print(decorated_foo.concrete_program)
>>> # usage 2:
>>> decorated_foo = to_static(foo)
>>> out_foo = decorated_foo(paddle.rand([10]), paddle.rand([10]))
>>> print(decorated_foo.concrete_program)
"""
return self.concrete_program_specify_input_spec(input_spec=None)
......@@ -760,25 +761,26 @@ class StaticFunction:
Example::
.. code-block:: python
import paddle
class Net(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, x, flag=True):
if flag:
out = x + 1
else:
out = x - 1
return out
x = paddle.randn([10, 1], 'float32')
net = paddle.jit.to_static(Net()) # convert into static graph mode
out = net(x)
net.forward.rollback() # rollback into dygraph mode
out = net(x)
>>> # doctest: +SKIP
>>> import paddle
>>> class Net(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
...
... def forward(self, x, flag=True):
... if flag:
... out = x + 1
... else:
... out = x - 1
... return out
...
>>> x = paddle.randn([10, 1], 'float32')
>>> net = paddle.jit.to_static(Net()) # convert into static graph mode
>>> out = net(x)
>>> net.forward.rollback() # rollback into dygraph mode
>>> out = net(x)
"""
def rollback_impl(class_instance):
......@@ -819,24 +821,24 @@ class StaticFunction:
Example::
.. code-block:: python
import copy
import paddle
class Net(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, x, flag=True):
if flag:
out = x + 1
else:
out = x - 1
return out
x = paddle.randn([10, 1], 'float32')
net = paddle.jit.to_static(Net()) # convert into static graph mode
copy_net = copy.deepcopy(net) # deepcopy a new net without @to_static
>>> import copy
>>> import paddle
>>> class Net(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
...
... def forward(self, x, flag=True):
... if flag:
... out = x + 1
... else:
... out = x - 1
... return out
...
>>> x = paddle.randn([10, 1], 'float32')
>>> net = paddle.jit.to_static(Net()) # convert into static graph mode
>>> copy_net = copy.deepcopy(net) # deepcopy a new net without @to_static
Please attention that original 'net' will unwrap @to_static and rollback into simple Layer.
"""
......@@ -1378,11 +1380,11 @@ class ProgramTranslator:
Examples:
.. code-block:: python
import paddle
>>> import paddle
# Two methods get same object because ProgramTranslator is a singleton
paddle.jit.ProgramTranslator()
paddle.jit.ProgramTranslator.get_instance()
>>> # Two methods get same object because ProgramTranslator is a singleton
>>> paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> paddle.jit.dy2static.program_translator.ProgramTranslator.get_instance()
"""
......@@ -1433,24 +1435,23 @@ class ProgramTranslator:
Examples:
.. code-block:: python
import paddle
@paddle.jit.to_static
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
paddle.jit.enable_to_static(False)
x = paddle.ones([1, 2])
# ProgramTranslator is disabled so the func is run in dygraph
print(func(x)) # [[0. 0.]]
>>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
... if paddle.mean(x) > 0:
... x_v = x - 1
... else:
... x_v = x + 1
... return x_v
...
...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> x = paddle.ones([1, 2])
>>> x_v = prog_trans.get_output(func, x)
>>> print(x_v)
Tensor(shape=[1, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0., 0.]])
"""
check_type(
enable_to_static,
......@@ -1477,23 +1478,23 @@ class ProgramTranslator:
Examples:
.. code-block:: python
import paddle
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
prog_trans = paddle.jit.ProgramTranslator()
x = paddle.ones([1, 2])
x_v = prog_trans.get_output(func, x)
print(x_v) # [[0. 0.]]
>>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
... if paddle.mean(x) > 0:
... x_v = x - 1
... else:
... x_v = x + 1
... return x_v
...
...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> x = paddle.ones([1, 2])
>>> x_v = prog_trans.get_output(func, x)
>>> print(x_v)
Tensor(shape=[1, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0., 0.]])
"""
assert callable(
dygraph_func
......@@ -1560,21 +1561,19 @@ class ProgramTranslator:
Examples:
.. code-block:: python
import paddle
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
prog_trans = paddle.jit.ProgramTranslator()
static_func = prog_trans.get_func(func)
print(callable(static_func)) # True
>>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
... if paddle.mean(x) > 0:
... x_v = x - 1
... else:
... x_v = x + 1
... return x_v
...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> static_func = prog_trans.get_func(func)
>>> print(callable(static_func))
True
"""
assert callable(
dygraph_func
......@@ -1611,25 +1610,22 @@ class ProgramTranslator:
Examples:
.. code-block:: python
import paddle
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
prog_trans = paddle.jit.ProgramTranslator()
x = paddle.ones([1, 2])
main_prog, start_prog, inputs, outputs = prog_trans.get_program(func, x)
print([i.name for i in inputs])
# [u'generated_tensor_0'] the feed input Tensor name representing x
print([o.name for o in outputs])
# [u'_generated_var_4'] the fetch output Tensor name representing x_v
>>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
... if paddle.mean(x) > 0:
... x_v = x - 1
... else:
... x_v = x + 1
... return x_v
...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> x = paddle.ones([1, 2])
>>> main_prog, start_prog, inputs, outputs = prog_trans.get_program(func, x)
>>> print([i.name for i in inputs])
>>> # [u'generated_tensor_0'] the feed input Tensor name representing x
>>> print([o.name for o in outputs])
>>> # [u'_generated_var_4'] the fetch output Tensor name representing x_v
"""
assert callable(
dygraph_func
......@@ -1681,22 +1677,20 @@ class ProgramTranslator:
Examples:
.. code-block:: python
import paddle
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
prog_trans = paddle.jit.ProgramTranslator()
code = prog_trans.get_code(func)
print(type(code)) # <class 'str'>
>>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
... if paddle.mean(x) > 0:
... x_v = x - 1
... else:
... x_v = x + 1
... return x_v
...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> code = prog_trans.get_code(func)
>>> print(type(code))
<class 'str'>
"""
assert callable(
dygraph_func
......@@ -1728,11 +1722,10 @@ class ProgramTranslator:
Examples:
.. code-block:: python
import paddle
prog_trans = paddle.jit.ProgramTranslator()
prog_cache = prog_trans.get_program_cache()
>>> import paddle
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> prog_cache = prog_trans.get_program_cache()
"""
return self._program_cache
......@@ -1751,23 +1744,22 @@ def enable_to_static(enable_to_static_bool):
Examples:
.. code-block:: python
import paddle
@paddle.jit.to_static
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
paddle.jit.enable_to_static(False)
x = paddle.ones([1, 2])
# ProgramTranslator is disabled so the func is run in dygraph
print(func(x)) # [[0. 0.]]
>>> import paddle
>>> @paddle.jit.to_static
>>> def func(x):
... if paddle.mean(x) > 0:
... x_v = x - 1
... else:
... x_v = x + 1
... return x_v
...
>>> paddle.jit.enable_to_static(False)
>>> x = paddle.ones([1, 2])
>>> # ProgramTranslator is disabled so the func is run in dygraph
>>> print(func(x))
Tensor(shape=[1, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0., 0.]])
"""
check_type(
......
......@@ -1312,87 +1312,86 @@ class TranslatedLayer(layers.Layer):
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super().__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# 1. train & save model.
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
model_path = "linear.example.model"
paddle.jit.save(layer, model_path)
# 2. load model as TranslatedLayer
# load
translated_layer = paddle.jit.load(model_path)
# inference
translated_layer.eval()
x = paddle.randn([1, IMAGE_SIZE], 'float32')
pred = translated_layer(x)
# fine-tune
translated_layer.train()
adam = opt.Adam(learning_rate=0.001, parameters=translated_layer.parameters())
train(translated_layer, loader, loss_fn, adam)
>>> # doctest: +SKIP
>>> import numpy as np
>>> import paddle
>>> import paddle.nn as nn
>>> import paddle.optimizer as opt
>>> BATCH_SIZE = 16
>>> BATCH_NUM = 4
>>> EPOCH_NUM = 4
>>> IMAGE_SIZE = 784
>>> CLASS_NUM = 10
>>> # define a random dataset
>>> class RandomDataset(paddle.io.Dataset):
... def __init__(self, num_samples):
... self.num_samples = num_samples
...
... def __getitem__(self, idx):
... image = np.random.random([IMAGE_SIZE]).astype('float32')
... label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
... return image, label
...
... def __len__(self):
... return self.num_samples
...
>>> class LinearNet(nn.Layer):
... def __init__(self):
... super().__init__()
... self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
...
... @paddle.jit.to_static
... def forward(self, x):
... return self._linear(x)
...
>>> def train(layer, loader, loss_fn, opt):
... for epoch_id in range(EPOCH_NUM):
... for batch_id, (image, label) in enumerate(loader()):
... out = layer(image)
... loss = loss_fn(out, label)
... loss.backward()
... opt.step()
... opt.clear_grad()
... print("Epoch {} batch {}: loss = {}".format(
... epoch_id, batch_id, np.mean(loss.numpy())))
...
>>> # 1. train & save model.
>>> # create network
>>> layer = LinearNet()
>>> loss_fn = nn.CrossEntropyLoss()
>>> adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
>>> # create data loader
>>> dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
>>> loader = paddle.io.DataLoader(dataset,
... batch_size=BATCH_SIZE,
... shuffle=True,
... drop_last=True,
... num_workers=2
... )
>>> # train
>>> train(layer, loader, loss_fn, adam)
>>> # save
>>> model_path = "linear.example.model"
>>> paddle.jit.save(layer, model_path)
>>> # 2. load model as TranslatedLayer
>>> # load
>>> translated_layer = paddle.jit.load(model_path)
>>> # inference
>>> translated_layer.eval()
>>> x = paddle.randn([1, IMAGE_SIZE], 'float32')
>>> pred = translated_layer(x)
>>> # fine-tune
>>> translated_layer.train()
>>> adam = opt.Adam(learning_rate=0.001, parameters=translated_layer.parameters())
>>> train(translated_layer, loader, loss_fn, adam)
"""
......@@ -1523,76 +1522,76 @@ class TranslatedLayer(layers.Layer):
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super().__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
model_path = "linear.example.model"
paddle.jit.save(layer, model_path)
# load
translated_layer = paddle.jit.load(model_path)
# get program
program = translated_layer.program()
>>> # doctest: +SKIP
>>> import numpy as np
>>> import paddle
>>> from paddle import nn
>>> import paddle.optimizer as opt
>>> BATCH_SIZE = 16
>>> BATCH_NUM = 4
>>> EPOCH_NUM = 4
>>> IMAGE_SIZE = 784
>>> CLASS_NUM = 10
>>> # define a random dataset
>>> class RandomDataset(paddle.io.Dataset):
... def __init__(self, num_samples):
... self.num_samples = num_samples
...
... def __getitem__(self, idx):
... image = np.random.random([IMAGE_SIZE]).astype('float32')
... label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
... return image, label
...
... def __len__(self):
... return self.num_samples
...
>>> class LinearNet(nn.Layer):
... def __init__(self):
... super().__init__()
... self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
...
... @paddle.jit.to_static
... def forward(self, x):
... return self._linear(x)
...
>>> def train(layer, loader, loss_fn, opt):
... for epoch_id in range(EPOCH_NUM):
... for batch_id, (image, label) in enumerate(loader()):
... out = layer(image)
... loss = loss_fn(out, label)
... loss.backward()
... opt.step()
... opt.clear_grad()
... print("Epoch {} batch {}: loss = {}".format(
... epoch_id, batch_id, np.mean(loss.numpy())))
...
>>> # create network
>>> layer = LinearNet()
>>> loss_fn = nn.CrossEntropyLoss()
>>> adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
>>> # create data loader
>>> dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
>>> loader = paddle.io.DataLoader(dataset,
... batch_size=BATCH_SIZE,
... shuffle=True,
... drop_last=True,
... num_workers=2
... )
>>> # train
>>> train(layer, loader, loss_fn, adam)
>>> # save
>>> model_path = "linear.example.model"
>>> paddle.jit.save(layer, model_path)
>>> # load
>>> translated_layer = paddle.jit.load(model_path)
>>> # get program
>>> program = translated_layer.program()
"""
# 1. get program holder
program_holder = self._get_program_holder(method_name)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册