未验证 提交 0c3b3698 编写于 作者: N Nyakku Shigure 提交者: GitHub

[xdoctest] reformat example code with google style in `paddle/jit` (#55645)

* [xdoctest] reformat example code for paddle.jit.api

* test=docs_preview

* add some ... for decorator

* skip some example, test=docs_preview

* add ..., test=docs_preview

* skip some test, test=docs_preview

* more jit files, test=docs_preview

* remove some empty lines, test=docs_preview

* format program translator, test=docs_preview

* remove a blank line, test=docs_preview

* skip translated layer.program, test=docs_preview

* fix doc format, test=docs_preview
上级 eee4b8fb
此差异已折叠。
...@@ -162,27 +162,28 @@ def convert_call(func): ...@@ -162,27 +162,28 @@ def convert_call(func):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +SKIP
from paddle.jit.dy2static import Call >>> import paddle
>>> from paddle.jit.dy2static import Call
paddle.enable_static()
def dyfunc(x): >>> paddle.enable_static()
if paddle.mean(x) < 0: >>> def dyfunc(x):
x_v = x - 1 ... if paddle.mean(x) < 0:
else: ... x_v = x - 1
x_v = x + 1 ... else:
return x_v ... x_v = x + 1
... return x_v
new_func = Call(dyfunc) ...
x = paddle.tensor.manipulation.fill_constant(shape=[3, 3], value=0, dtype='float64') >>> new_func = Call(dyfunc)
x_v = new_func(x) >>> x = paddle.tensor.manipulation.fill_constant(shape=[3, 3], value=0, dtype='float64')
>>> x_v = new_func(x)
exe = paddle.static.Executor(paddle.CPUPlace())
out = exe.run(fetch_list=[x_v]) >>> exe = paddle.static.Executor(paddle.CPUPlace())
print(out[0]) >>> out = exe.run(fetch_list=[x_v])
# [[1. 1. 1.] >>> print(out[0])
# [1. 1. 1.] [[1. 1. 1.]
# [1. 1. 1.]] [1. 1. 1.]
[1. 1. 1.]]
""" """
translator_logger.log(1, f"Convert callable object: convert {func}.") translator_logger.log(1, f"Convert callable object: convert {func}.")
......
...@@ -206,14 +206,14 @@ def set_verbosity(level=0, also_to_stdout=False): ...@@ -206,14 +206,14 @@ def set_verbosity(level=0, also_to_stdout=False):
Examples: Examples:
.. code-block:: python .. code-block:: python
import os >>> import os
import paddle >>> import paddle
paddle.jit.set_verbosity(1) >>> paddle.jit.set_verbosity(1)
# The verbosity level is now 1 >>> # The verbosity level is now 1
os.environ['TRANSLATOR_VERBOSITY'] = '3' >>> os.environ['TRANSLATOR_VERBOSITY'] = '3'
# The verbosity level is now 3, but it has no effect because it has a lower priority than `set_verbosity` >>> # The verbosity level is now 3, but it has no effect because it has a lower priority than `set_verbosity`
""" """
_TRANSLATOR_LOGGER.verbosity_level = level _TRANSLATOR_LOGGER.verbosity_level = level
_TRANSLATOR_LOGGER.need_to_echo_log_to_stdout = also_to_stdout _TRANSLATOR_LOGGER.need_to_echo_log_to_stdout = also_to_stdout
...@@ -244,14 +244,15 @@ def set_code_level(level=LOG_AllTransformer, also_to_stdout=False): ...@@ -244,14 +244,15 @@ def set_code_level(level=LOG_AllTransformer, also_to_stdout=False):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import os
>>> import paddle
paddle.jit.set_code_level(2) >>> paddle.jit.set_code_level(2)
# It will print the transformed code at level 2, which means to print the code after second transformer, >>> # It will print the transformed code at level 2, which means to print the code after second transformer,
# as the date of August 28, 2020, it is CastTransformer. >>> # as the date of August 28, 2020, it is CastTransformer.
os.environ['TRANSLATOR_CODE_LEVEL'] = '3' >>> os.environ['TRANSLATOR_CODE_LEVEL'] = '3'
# The code level is now 3, but it has no effect because it has a lower priority than `set_code_level` >>> # The code level is now 3, but it has no effect because it has a lower priority than `set_code_level`
""" """
_TRANSLATOR_LOGGER.transformed_code_level = level _TRANSLATOR_LOGGER.transformed_code_level = level
......
...@@ -641,24 +641,25 @@ class StaticFunction: ...@@ -641,24 +641,25 @@ class StaticFunction:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +SKIP
from paddle.jit import to_static >>> import paddle
from paddle.static import InputSpec >>> from paddle.jit import to_static
>>> from paddle.static import InputSpec
paddle.disable_static()
>>> paddle.disable_static()
def foo(x, y):
z = x + y >>> def foo(x, y):
return z ... z = x + y
... return z
# usage 1: ...
decorated_foo = to_static(foo, input_spec=[InputSpec([10], name='x'), InputSpec([10], name='y')]) >>> # usage 1:
print(decorated_foo.concrete_program) >>> decorated_foo = to_static(foo, input_spec=[InputSpec([10], name='x'), InputSpec([10], name='y')])
>>> print(decorated_foo.concrete_program)
# usage 2:
decorated_foo = to_static(foo) >>> # usage 2:
out_foo = decorated_foo(paddle.rand([10]), paddle.rand([10])) >>> decorated_foo = to_static(foo)
print(decorated_foo.concrete_program) >>> out_foo = decorated_foo(paddle.rand([10]), paddle.rand([10]))
>>> print(decorated_foo.concrete_program)
""" """
return self.concrete_program_specify_input_spec(input_spec=None) return self.concrete_program_specify_input_spec(input_spec=None)
...@@ -760,25 +761,26 @@ class StaticFunction: ...@@ -760,25 +761,26 @@ class StaticFunction:
Example:: Example::
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +SKIP
>>> import paddle
class Net(paddle.nn.Layer):
def __init__(self): >>> class Net(paddle.nn.Layer):
super().__init__() ... def __init__(self):
... super().__init__()
def forward(self, x, flag=True): ...
if flag: ... def forward(self, x, flag=True):
out = x + 1 ... if flag:
else: ... out = x + 1
out = x - 1 ... else:
return out ... out = x - 1
... return out
x = paddle.randn([10, 1], 'float32') ...
net = paddle.jit.to_static(Net()) # convert into static graph mode >>> x = paddle.randn([10, 1], 'float32')
out = net(x) >>> net = paddle.jit.to_static(Net()) # convert into static graph mode
>>> out = net(x)
net.forward.rollback() # rollback into dygraph mode
out = net(x) >>> net.forward.rollback() # rollback into dygraph mode
>>> out = net(x)
""" """
def rollback_impl(class_instance): def rollback_impl(class_instance):
...@@ -819,24 +821,24 @@ class StaticFunction: ...@@ -819,24 +821,24 @@ class StaticFunction:
Example:: Example::
.. code-block:: python .. code-block:: python
import copy >>> import copy
import paddle >>> import paddle
class Net(paddle.nn.Layer): >>> class Net(paddle.nn.Layer):
def __init__(self): ... def __init__(self):
super().__init__() ... super().__init__()
...
def forward(self, x, flag=True): ... def forward(self, x, flag=True):
if flag: ... if flag:
out = x + 1 ... out = x + 1
else: ... else:
out = x - 1 ... out = x - 1
return out ... return out
...
x = paddle.randn([10, 1], 'float32') >>> x = paddle.randn([10, 1], 'float32')
net = paddle.jit.to_static(Net()) # convert into static graph mode >>> net = paddle.jit.to_static(Net()) # convert into static graph mode
copy_net = copy.deepcopy(net) # deepcopy a new net without @to_static >>> copy_net = copy.deepcopy(net) # deepcopy a new net without @to_static
Please attention that original 'net' will unwrap @to_static and rollback into simple Layer. Please attention that original 'net' will unwrap @to_static and rollback into simple Layer.
""" """
...@@ -1378,11 +1380,11 @@ class ProgramTranslator: ...@@ -1378,11 +1380,11 @@ class ProgramTranslator:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
# Two methods get same object because ProgramTranslator is a singleton >>> # Two methods get same object because ProgramTranslator is a singleton
paddle.jit.ProgramTranslator() >>> paddle.jit.dy2static.program_translator.ProgramTranslator()
paddle.jit.ProgramTranslator.get_instance() >>> paddle.jit.dy2static.program_translator.ProgramTranslator.get_instance()
""" """
...@@ -1433,24 +1435,23 @@ class ProgramTranslator: ...@@ -1433,24 +1435,23 @@ class ProgramTranslator:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
@paddle.jit.to_static ... if paddle.mean(x) > 0:
def func(x): ... x_v = x - 1
if paddle.mean(x) > 0: ... else:
x_v = x - 1 ... x_v = x + 1
else: ... return x_v
x_v = x + 1 ...
return x_v ...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
paddle.jit.enable_to_static(False) >>> x = paddle.ones([1, 2])
>>> x_v = prog_trans.get_output(func, x)
x = paddle.ones([1, 2]) >>> print(x_v)
# ProgramTranslator is disabled so the func is run in dygraph Tensor(shape=[1, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
print(func(x)) # [[0. 0.]] [[0., 0.]])
""" """
check_type( check_type(
enable_to_static, enable_to_static,
...@@ -1477,23 +1478,23 @@ class ProgramTranslator: ...@@ -1477,23 +1478,23 @@ class ProgramTranslator:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
def func(x): ... if paddle.mean(x) > 0:
if paddle.mean(x) > 0: ... x_v = x - 1
x_v = x - 1 ... else:
else: ... x_v = x + 1
x_v = x + 1 ... return x_v
return x_v ...
...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
prog_trans = paddle.jit.ProgramTranslator()
>>> x = paddle.ones([1, 2])
x = paddle.ones([1, 2]) >>> x_v = prog_trans.get_output(func, x)
x_v = prog_trans.get_output(func, x) >>> print(x_v)
print(x_v) # [[0. 0.]] Tensor(shape=[1, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0., 0.]])
""" """
assert callable( assert callable(
dygraph_func dygraph_func
...@@ -1560,21 +1561,19 @@ class ProgramTranslator: ...@@ -1560,21 +1561,19 @@ class ProgramTranslator:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
def func(x): ... if paddle.mean(x) > 0:
if paddle.mean(x) > 0: ... x_v = x - 1
x_v = x - 1 ... else:
else: ... x_v = x + 1
x_v = x + 1 ... return x_v
return x_v ...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> static_func = prog_trans.get_func(func)
prog_trans = paddle.jit.ProgramTranslator() >>> print(callable(static_func))
static_func = prog_trans.get_func(func) True
print(callable(static_func)) # True
""" """
assert callable( assert callable(
dygraph_func dygraph_func
...@@ -1611,25 +1610,22 @@ class ProgramTranslator: ...@@ -1611,25 +1610,22 @@ class ProgramTranslator:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
def func(x): ... if paddle.mean(x) > 0:
if paddle.mean(x) > 0: ... x_v = x - 1
x_v = x - 1 ... else:
else: ... x_v = x + 1
x_v = x + 1 ... return x_v
return x_v ...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> x = paddle.ones([1, 2])
prog_trans = paddle.jit.ProgramTranslator() >>> main_prog, start_prog, inputs, outputs = prog_trans.get_program(func, x)
x = paddle.ones([1, 2]) >>> print([i.name for i in inputs])
main_prog, start_prog, inputs, outputs = prog_trans.get_program(func, x) >>> # [u'generated_tensor_0'] the feed input Tensor name representing x
print([i.name for i in inputs]) >>> print([o.name for o in outputs])
# [u'generated_tensor_0'] the feed input Tensor name representing x >>> # [u'_generated_var_4'] the fetch output Tensor name representing x_v
print([o.name for o in outputs])
# [u'_generated_var_4'] the fetch output Tensor name representing x_v
""" """
assert callable( assert callable(
dygraph_func dygraph_func
...@@ -1681,22 +1677,20 @@ class ProgramTranslator: ...@@ -1681,22 +1677,20 @@ class ProgramTranslator:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +SKIP
>>> import paddle
>>> def func(x):
def func(x): ... if paddle.mean(x) > 0:
if paddle.mean(x) > 0: ... x_v = x - 1
x_v = x - 1 ... else:
else: ... x_v = x + 1
x_v = x + 1 ... return x_v
return x_v ...
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
prog_trans = paddle.jit.ProgramTranslator() >>> code = prog_trans.get_code(func)
>>> print(type(code))
code = prog_trans.get_code(func) <class 'str'>
print(type(code)) # <class 'str'>
""" """
assert callable( assert callable(
dygraph_func dygraph_func
...@@ -1728,11 +1722,10 @@ class ProgramTranslator: ...@@ -1728,11 +1722,10 @@ class ProgramTranslator:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
prog_trans = paddle.jit.ProgramTranslator()
prog_cache = prog_trans.get_program_cache()
>>> prog_trans = paddle.jit.dy2static.program_translator.ProgramTranslator()
>>> prog_cache = prog_trans.get_program_cache()
""" """
return self._program_cache return self._program_cache
...@@ -1751,23 +1744,22 @@ def enable_to_static(enable_to_static_bool): ...@@ -1751,23 +1744,22 @@ def enable_to_static(enable_to_static_bool):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
>>> @paddle.jit.to_static
>>> def func(x):
@paddle.jit.to_static ... if paddle.mean(x) > 0:
def func(x): ... x_v = x - 1
if paddle.mean(x) > 0: ... else:
x_v = x - 1 ... x_v = x + 1
else: ... return x_v
x_v = x + 1 ...
return x_v >>> paddle.jit.enable_to_static(False)
>>> x = paddle.ones([1, 2])
paddle.jit.enable_to_static(False) >>> # ProgramTranslator is disabled so the func is run in dygraph
>>> print(func(x))
x = paddle.ones([1, 2]) Tensor(shape=[1, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# ProgramTranslator is disabled so the func is run in dygraph [[0., 0.]])
print(func(x)) # [[0. 0.]]
""" """
check_type( check_type(
......
...@@ -1312,87 +1312,86 @@ class TranslatedLayer(layers.Layer): ...@@ -1312,87 +1312,86 @@ class TranslatedLayer(layers.Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> # doctest: +SKIP
import paddle >>> import numpy as np
import paddle.nn as nn >>> import paddle
import paddle.optimizer as opt >>> import paddle.nn as nn
>>> import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4 >>> BATCH_SIZE = 16
EPOCH_NUM = 4 >>> BATCH_NUM = 4
>>> EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10 >>> IMAGE_SIZE = 784
>>> CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset): >>> # define a random dataset
def __init__(self, num_samples): >>> class RandomDataset(paddle.io.Dataset):
self.num_samples = num_samples ... def __init__(self, num_samples):
... self.num_samples = num_samples
def __getitem__(self, idx): ...
image = np.random.random([IMAGE_SIZE]).astype('float32') ... def __getitem__(self, idx):
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') ... image = np.random.random([IMAGE_SIZE]).astype('float32')
return image, label ... label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
... return image, label
def __len__(self): ...
return self.num_samples ... def __len__(self):
... return self.num_samples
class LinearNet(nn.Layer): ...
def __init__(self): >>> class LinearNet(nn.Layer):
super().__init__() ... def __init__(self):
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) ... super().__init__()
... self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static ...
def forward(self, x): ... @paddle.jit.to_static
return self._linear(x) ... def forward(self, x):
... return self._linear(x)
def train(layer, loader, loss_fn, opt): ...
for epoch_id in range(EPOCH_NUM): >>> def train(layer, loader, loss_fn, opt):
for batch_id, (image, label) in enumerate(loader()): ... for epoch_id in range(EPOCH_NUM):
out = layer(image) ... for batch_id, (image, label) in enumerate(loader()):
loss = loss_fn(out, label) ... out = layer(image)
loss.backward() ... loss = loss_fn(out, label)
opt.step() ... loss.backward()
opt.clear_grad() ... opt.step()
print("Epoch {} batch {}: loss = {}".format( ... opt.clear_grad()
epoch_id, batch_id, np.mean(loss.numpy()))) ... print("Epoch {} batch {}: loss = {}".format(
... epoch_id, batch_id, np.mean(loss.numpy())))
# 1. train & save model. ...
>>> # 1. train & save model.
# create network >>> # create network
layer = LinearNet() >>> layer = LinearNet()
loss_fn = nn.CrossEntropyLoss() >>> loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) >>> adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader >>> # create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) >>> dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset, >>> loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE, ... batch_size=BATCH_SIZE,
shuffle=True, ... shuffle=True,
drop_last=True, ... drop_last=True,
num_workers=2) ... num_workers=2
... )
# train >>> # train
train(layer, loader, loss_fn, adam) >>> train(layer, loader, loss_fn, adam)
# save >>> # save
model_path = "linear.example.model" >>> model_path = "linear.example.model"
paddle.jit.save(layer, model_path) >>> paddle.jit.save(layer, model_path)
# 2. load model as TranslatedLayer >>> # 2. load model as TranslatedLayer
>>> # load
# load >>> translated_layer = paddle.jit.load(model_path)
translated_layer = paddle.jit.load(model_path)
>>> # inference
# inference >>> translated_layer.eval()
translated_layer.eval() >>> x = paddle.randn([1, IMAGE_SIZE], 'float32')
x = paddle.randn([1, IMAGE_SIZE], 'float32') >>> pred = translated_layer(x)
pred = translated_layer(x)
>>> # fine-tune
# fine-tune >>> translated_layer.train()
translated_layer.train() >>> adam = opt.Adam(learning_rate=0.001, parameters=translated_layer.parameters())
adam = opt.Adam(learning_rate=0.001, parameters=translated_layer.parameters()) >>> train(translated_layer, loader, loss_fn, adam)
train(translated_layer, loader, loss_fn, adam)
""" """
...@@ -1523,76 +1522,76 @@ class TranslatedLayer(layers.Layer): ...@@ -1523,76 +1522,76 @@ class TranslatedLayer(layers.Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> # doctest: +SKIP
import paddle >>> import numpy as np
import paddle.nn as nn >>> import paddle
import paddle.optimizer as opt >>> from paddle import nn
>>> import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4 >>> BATCH_SIZE = 16
EPOCH_NUM = 4 >>> BATCH_NUM = 4
>>> EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10 >>> IMAGE_SIZE = 784
>>> CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset): >>> # define a random dataset
def __init__(self, num_samples): >>> class RandomDataset(paddle.io.Dataset):
self.num_samples = num_samples ... def __init__(self, num_samples):
... self.num_samples = num_samples
def __getitem__(self, idx): ...
image = np.random.random([IMAGE_SIZE]).astype('float32') ... def __getitem__(self, idx):
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') ... image = np.random.random([IMAGE_SIZE]).astype('float32')
return image, label ... label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
... return image, label
def __len__(self): ...
return self.num_samples ... def __len__(self):
... return self.num_samples
class LinearNet(nn.Layer): ...
def __init__(self): >>> class LinearNet(nn.Layer):
super().__init__() ... def __init__(self):
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) ... super().__init__()
... self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static ...
def forward(self, x): ... @paddle.jit.to_static
return self._linear(x) ... def forward(self, x):
... return self._linear(x)
def train(layer, loader, loss_fn, opt): ...
for epoch_id in range(EPOCH_NUM): >>> def train(layer, loader, loss_fn, opt):
for batch_id, (image, label) in enumerate(loader()): ... for epoch_id in range(EPOCH_NUM):
out = layer(image) ... for batch_id, (image, label) in enumerate(loader()):
loss = loss_fn(out, label) ... out = layer(image)
loss.backward() ... loss = loss_fn(out, label)
opt.step() ... loss.backward()
opt.clear_grad() ... opt.step()
print("Epoch {} batch {}: loss = {}".format( ... opt.clear_grad()
epoch_id, batch_id, np.mean(loss.numpy()))) ... print("Epoch {} batch {}: loss = {}".format(
... epoch_id, batch_id, np.mean(loss.numpy())))
# create network ...
layer = LinearNet() >>> # create network
loss_fn = nn.CrossEntropyLoss() >>> layer = LinearNet()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) >>> loss_fn = nn.CrossEntropyLoss()
>>> adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader >>> # create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) >>> dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset, >>> loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE, ... batch_size=BATCH_SIZE,
shuffle=True, ... shuffle=True,
drop_last=True, ... drop_last=True,
num_workers=2) ... num_workers=2
... )
# train >>> # train
train(layer, loader, loss_fn, adam) >>> train(layer, loader, loss_fn, adam)
# save >>> # save
model_path = "linear.example.model" >>> model_path = "linear.example.model"
paddle.jit.save(layer, model_path) >>> paddle.jit.save(layer, model_path)
# load >>> # load
translated_layer = paddle.jit.load(model_path) >>> translated_layer = paddle.jit.load(model_path)
# get program >>> # get program
program = translated_layer.program() >>> program = translated_layer.program()
""" """
# 1. get program holder # 1. get program holder
program_holder = self._get_program_holder(method_name) program_holder = self._get_program_holder(method_name)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册