提交 cc1a9f42 编写于 作者: Z zhongpu 提交者: hong

fix sample code in paddle/fluid/imperative/README.md (#22141)

* fix sample code, test=develop

* polish code style, test=develop
上级 bde7ee97
...@@ -169,30 +169,32 @@ with fluid.imperative.guard(): ...@@ -169,30 +169,32 @@ with fluid.imperative.guard():
dy_grad = var_inp._gradient() dy_grad = var_inp._gradient()
class MLP(fluid.imperative.Layer): class MLP(fluid.Layer):
def __init__(self): def __init__(self, input_size):
super(MLP, self).__init__() super(MLP, self).__init__()
self._fc1 = FC(3, self._linear1 = Linear(input_size,
3,
fluid.ParamAttr( fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1))) initializer=fluid.initializer.Constant(value=0.1)))
self._fc2 = FC(4, self._linear2 = Linear(3,
4,
fluid.ParamAttr( fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1))) initializer=fluid.initializer.Constant(value=0.1)))
def forward(self, inputs): def forward(self, inputs):
x = self._fc1(inputs) x = self._linear1(inputs)
x = self._fc2(x) x = self._linear2(x)
x = fluid.layers.reduce_sum(x) x = fluid.layers.reduce_sum(x)
return x return x
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
with fluid.imperative.guard(): with fluid.dygraph.guard():
var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.dygraph.base.to_variable(np_inp)
mlp = MLP() mlp = MLP(input_size=2)
out = mlp(var_inp) out = mlp(var_inp)
dy_out = out._numpy() dy_out = out.numpy()
out._backward() out.backward()
``` ```
# Plan # Plan
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册