From d7b159355c02b336895531ea2b8a439727d988bf Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 15 Jan 2019 15:46:06 +0800 Subject: [PATCH] add more doc test=develop --- paddle/fluid/imperative/README.md | 33 ++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/imperative/README.md b/paddle/fluid/imperative/README.md index adabb5b0a..c23a77904 100644 --- a/paddle/fluid/imperative/README.md +++ b/paddle/fluid/imperative/README.md @@ -38,8 +38,6 @@ class PyLayer(core.PyLayer): def backward(inputs): # any backward logic implemented with numpy io. - - ``` @@ -62,9 +60,13 @@ class IVariable(PyVarBase): def __init__(self): self._ivar = core.VarBase() + # Move var to a device. def to(device): pass + # Get var value. def value(): pass + # Trigger backward. def backward(): pass + # Get var's gradient value. def gradient_value(): pass # operators to override. ``` @@ -100,18 +102,22 @@ Lots of research already. https://autodiff-workshop.github.io/ https://en.wikipedia.org/wiki/Automatic_differentiation -## Execution Engine +Basically, trace the forward execution, and perform autodiff +when needed. -Lazy execution of pushed C++ operations. +* Can be triggered by `backward()`. +* Can select a block of code to trace and autodiff. +* Use `require_grad` to drop some forward subgraph that doesn't need autodiff. -## Tests +## Execution Engine -* All op tests run once in static graph, once in imperative mode. +Lazy execution of pushed C++ operations. ## Refactor * All function layers with parameters converted to class Layers. -* Models converted to imperative mode. +* Existing models converted to imperative mode. +* All op tests run once in static graph, once in imperative mode. # Examples @@ -140,6 +146,15 @@ class MyPyLayer(fluid.imperative.PyLayer): return np.array(dout) * (1 - np.square(np.array(out))) +np_inp = np.ones([2, 2], np.float32) +with fluid.imperative.guard(): + my_py_layer = MyPyLayer() + outs = my_py_layer(np_inp) + dy_out = np.sum(outs[0]._numpy()) + outs[0]._backward() + dy_grad = var_inp._gradient() + + class MLP(fluid.imperative.Layer): def __init__(self): super(MLP, self).__init__() @@ -171,6 +186,10 @@ class MLP(fluid.imperative.Layer): TODO +## I/O + +TODO + # Plan 2.1,3 fulltime, Can run a few simple models. (Currently, 2 20% engs) -- GitLab