未验证 提交 ad8bbe58 编写于 作者: Z Zhen Wang 提交者: GitHub

Fix some api example codes' bugs and these APIs include load_inference_model,...

Fix some api example codes' bugs and these APIs include load_inference_model, load_vars, save_vars, L1DecayRegularizer and L2DecayRegularizer. (#17324)

* fix some api example codes' bugs.

* update API.spec. test=develop test=document_preview

* add import fluid. test=develop test=document_preview
上级 68ec0a6f
...@@ -47,14 +47,14 @@ paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.Paralle ...@@ -47,14 +47,14 @@ paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.Paralle
paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.GradientScaleStrategy, arg0: int) -> None paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.GradientScaleStrategy, arg0: int) -> None
paddle.fluid.BuildStrategy.ReduceStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.ReduceStrategy, arg0: int) -> None paddle.fluid.BuildStrategy.ReduceStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.ReduceStrategy, arg0: int) -> None
paddle.fluid.BuildStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy) -> None paddle.fluid.BuildStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy) -> None
paddle.fluid.io.save_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'b55d6193a1d4198d45b013fc5779e1f2')) paddle.fluid.io.save_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '869104f47e6fd21d897c3fcc426aa942'))
paddle.fluid.io.save_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '3a7a99abac3e1bf898871fe609354218')) paddle.fluid.io.save_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '3a7a99abac3e1bf898871fe609354218'))
paddle.fluid.io.save_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '9141bb5f32caf7975eb3fd88c8a1b2da')) paddle.fluid.io.save_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '9141bb5f32caf7975eb3fd88c8a1b2da'))
paddle.fluid.io.load_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '0a5308f496632ab1ec3ba1f1377e6f95')) paddle.fluid.io.load_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '1bb9454cf09d71f190bb51550c5a3ac9'))
paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '41779819cef32f2246e83aebc5a002e2')) paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '41779819cef32f2246e83aebc5a002e2'))
paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2')) paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2'))
paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', 'af82e1b5fe5764029905a191b987f63d')) paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', 'af82e1b5fe5764029905a191b987f63d'))
paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '648f64d8fd81572eef34a69e533459ef')) paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'aaf3392332f2e5ef9d9177985be2c04a'))
paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable'], varargs=None, keywords=None, defaults=(True, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable'], varargs=None, keywords=None, defaults=(True, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '4a072de39998ee4e0de33fcec11325a6')) paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '4a072de39998ee4e0de33fcec11325a6'))
paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', '3db4b24d33fe4f711e303f9673dc5c6a')) paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', '3db4b24d33fe4f711e303f9673dc5c6a'))
......
...@@ -144,27 +144,37 @@ def save_vars(executor, ...@@ -144,27 +144,37 @@ def save_vars(executor,
Examples: Examples:
.. code-block:: python .. code-block:: python
exe = fluid.Executor(fluid.CPUPlace()) import paddle.fluid as fluid
param_path = "./my_paddle_model" main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')
hidden_w = fluid.layers.matmul(x=data, y=w)
hidden_b = fluid.layers.elementwise_add(hidden_w, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
param_path = "./my_paddle_model"
# The first usage: using `main_program` to specify variables # The first usage: using `main_program` to specify variables
def name_has_fc(var): def name_has_fc(var):
res = "fc" in var.name res = "fc" in var.name
return res return res
fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog,
prog = fluid.default_main_program()
fluid.io.save_vars(executor=exe, dirname=path, main_program=prog,
vars=None, predicate = name_has_fc) vars=None, predicate = name_has_fc)
# All variables in `main_program` whose name includes "fc" will be saved. # All variables in `main_program` whose name includes "fc" will be saved.
# And variables are going to be saved separately. # And variables are going to be saved separately.
# The second usage: using `vars` to specify variables # The second usage: using `vars` to specify variables
var_list = [var_a, var_b, var_c] var_list = [w, b]
path = "./my_paddle_vars"
fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, fluid.io.save_vars(executor=exe, dirname=path, vars=var_list,
filename="vars_file") filename="vars_file")
# var_a, var_b and var_c will be saved. And they are going to be # var_a, var_b and var_c will be saved. And they are going to be
# saved in the same file named 'var_file' in the path "./my_paddle_model". # saved in the same file named 'var_file' in the path "./my_paddle_vars".
""" """
save_dirname = os.path.normpath(dirname) save_dirname = os.path.normpath(dirname)
if vars is None: if vars is None:
...@@ -546,27 +556,40 @@ def load_vars(executor, ...@@ -546,27 +556,40 @@ def load_vars(executor,
Examples: Examples:
.. code-block:: python .. code-block:: python
exe = fluid.Executor(fluid.CPUPlace()) import paddle.fluid as fluid
param_path = "./my_paddle_model" main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')
hidden_w = fluid.layers.matmul(x=data, y=w)
hidden_b = fluid.layers.elementwise_add(hidden_w, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
param_path = "./my_paddle_model"
# The first usage: using `main_program` to specify variables # The first usage: using `main_program` to specify variables
def name_has_fc(var): def name_has_fc(var):
res = "fc" in var.name res = "fc" in var.name
return res return res
fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog,
prog = fluid.default_main_program() vars=None, predicate=name_has_fc)
fluid.io.load_vars(executor=exe, dirname=path, main_program=prog, fluid.io.load_vars(executor=exe, dirname=param_path, main_program=main_prog,
vars=None, predicate=name_has_fc) vars=None, predicate=name_has_fc)
# All variables in `main_program` whose name includes "fc" will be loaded. # All variables in `main_program` whose name includes "fc" will be loaded.
# And all the variables are supposed to have been saved in differnet files. # And all the variables are supposed to have been saved in differnet files.
# The second usage: using `vars` to specify variables # The second usage: using `vars` to specify variables
var_list = [var_a, var_b, var_c] path = "./my_paddle_vars"
var_list = [w, b]
fluid.io.save_vars(executor=exe, dirname=path, vars=var_list,
filename="vars_file")
fluid.io.load_vars(executor=exe, dirname=path, vars=var_list, fluid.io.load_vars(executor=exe, dirname=path, vars=var_list,
filename="vars_file") filename="vars_file")
# var_a, var_b and var_c will be loaded. And they are supposed to haven # w and b will be loaded. And they are supposed to haven
# been saved in the same file named 'var_file' in the path "./my_paddle_model". # been saved in the same file named 'var_file' in the path "./my_paddle_vars".
""" """
load_dirname = os.path.normpath(dirname) load_dirname = os.path.normpath(dirname)
if vars is None: if vars is None:
...@@ -1088,25 +1111,43 @@ def load_inference_model(dirname, ...@@ -1088,25 +1111,43 @@ def load_inference_model(dirname,
Examples: Examples:
.. code-block:: python .. code-block:: python
exe = fluid.Executor(fluid.CPUPlace()) import paddle.fluid as fluid
import numpy as np
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
b = fluid.layers.create_parameter(shape=[200], dtype='float32')
hidden_w = fluid.layers.matmul(x=data, y=w)
hidden_b = fluid.layers.elementwise_add(hidden_w, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
path = "./infer_model" path = "./infer_model"
endpoints = ["127.0.0.1:2023","127.0.0.1:2024"] fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'],
[inference_program, feed_target_names, fetch_targets] = target_vars=[hidden_b], executor=exe, main_program=main_prog)
tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32)
[inference_program, feed_target_names, fetch_targets] = \
fluid.io.load_inference_model(dirname=path, executor=exe) fluid.io.load_inference_model(dirname=path, executor=exe)
results = exe.run(inference_program, results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img}, feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets) fetch_list=fetch_targets)
# endpoints is your pserver endpoints list, the above is just an example
endpoints = ["127.0.0.1:2023","127.0.0.1:2024"]
# if we need lookup table, we will use: # if we need lookup table, we will use:
fluid.io.load_inference_model(dirname=path, executor=exe, pserver_endpoints=endpoints) [dist_inference_program, dist_feed_target_names, dist_fetch_targets] = \
fluid.io.load_inference_model(dirname=path,
executor=exe,
pserver_endpoints=endpoints)
# In this example, the inference program was saved in the # In this example, the inference program was saved in the
# "./infer_model/__model__" and parameters were saved in # "./infer_model/__model__" and parameters were saved in
# separate files in ""./infer_model". # separate files in "./infer_model".
# After getting inference program, feed target names and # After getting inference program, feed target names and
# fetch targets, we can use an Executor to run the inference # fetch targets, we can use an Executor to run the inference
# program to get the inference result. # program to get the inference result.
""" """
load_dirname = os.path.normpath(dirname) load_dirname = os.path.normpath(dirname)
if not os.path.isdir(load_dirname): if not os.path.isdir(load_dirname):
......
...@@ -124,11 +124,21 @@ class L2DecayRegularizer(WeightDecayRegularizer): ...@@ -124,11 +124,21 @@ class L2DecayRegularizer(WeightDecayRegularizer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = fluid.layers.fc(input=data, size=128, act='relu')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.Adagrad( optimizer = fluid.optimizer.Adagrad(
learning_rate=1e-4, learning_rate=1e-4,
regularization=fluid.regularizer.L2DecayRegularizer( regularization=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=0.1)) regularization_coeff=0.1))
optimizer.minimize(avg_cost) optimizer.minimize(avg_loss)
""" """
def __init__(self, regularization_coeff=0.0): def __init__(self, regularization_coeff=0.0):
...@@ -183,11 +193,21 @@ class L1DecayRegularizer(WeightDecayRegularizer): ...@@ -183,11 +193,21 @@ class L1DecayRegularizer(WeightDecayRegularizer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = fluid.layers.fc(input=data, size=128, act='relu')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.Adagrad( optimizer = fluid.optimizer.Adagrad(
learning_rate=1e-4, learning_rate=1e-4,
regularization=fluid.regularizer.L1DecayRegularizer( regularization=fluid.regularizer.L1DecayRegularizer(
regularization_coeff=0.1)) regularization_coeff=0.1))
optimizer.minimize(avg_cost) optimizer.minimize(avg_loss)
""" """
def __init__(self, regularization_coeff=0.0): def __init__(self, regularization_coeff=0.0):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册