未验证 提交 f096af83 编写于 作者: S Shibo Tao 提交者: GitHub

fix document sample. test=develop (#28721)

上级 3c5f2cac
...@@ -78,23 +78,21 @@ def save_inference_model(path_prefix, feed_vars, fetch_vars, executor): ...@@ -78,23 +78,21 @@ def save_inference_model(path_prefix, feed_vars, fetch_vars, executor):
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.fluid as fluid
paddle.enable_static() paddle.enable_static()
path_prefix = "./infer_model" path_prefix = "./infer_model"
# User defined network, here a softmax regession example # User defined network, here a softmax regession example
image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32') image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64') label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) predict = paddle.static.nn.fc(image, 10, activation='softmax')
predict = fluid.layers.fc(input=image, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label) loss = paddle.nn.functional.cross_entropy(predict, label)
avg_loss = fluid.layers.mean(loss) avg_loss = paddle.tensor.stat.mean(loss)
exe = fluid.Executor(fluid.CPUPlace()) exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(paddle.static.default_startup_program())
# Feed data and train process # Feed data and train process
...@@ -223,22 +221,20 @@ def load_inference_model(path_prefix, executor, **configs): ...@@ -223,22 +221,20 @@ def load_inference_model(path_prefix, executor, **configs):
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.fluid as fluid
import numpy as np import numpy as np
paddle.enable_static() paddle.enable_static()
# Build the model # Build the model
startup_prog = fluid.default_startup_program() startup_prog = paddle.static.default_startup_program()
main_prog = fluid.default_main_program() main_prog = paddle.static.default_main_program()
with fluid.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
image = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) image = paddle.static.data(name="img", shape=[64, 784])
w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32') w = paddle.create_parameter(shape=[784, 200], dtype='float32')
b = fluid.layers.create_parameter(shape=[200], dtype='float32') b = paddle.create_parameter(shape=[200], dtype='float32')
hidden_w = fluid.layers.matmul(x=image, y=w) hidden_w = paddle.matmul(x=image, y=w)
hidden_b = fluid.layers.elementwise_add(hidden_w, b) hidden_b = paddle.add(hidden_w, b)
place = fluid.CPUPlace() exe = paddle.static.Executor(paddle.CPUPlace())
exe = fluid.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
# Save the inference model # Save the inference model
...@@ -247,7 +243,7 @@ def load_inference_model(path_prefix, executor, **configs): ...@@ -247,7 +243,7 @@ def load_inference_model(path_prefix, executor, **configs):
[inference_program, feed_target_names, fetch_targets] = ( [inference_program, feed_target_names, fetch_targets] = (
paddle.static.io.load_inference_model(path_prefix, exe)) paddle.static.io.load_inference_model(path_prefix, exe))
tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32) tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32)
results = exe.run(inference_program, results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img}, feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets) fetch_list=fetch_targets)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册