提交 336160e6 编写于 作者: M minqiyang

Complete imperative optimizer implementation

test=develop
上级 28013a50
......@@ -84,7 +84,6 @@ class Tracer {
*op->output_vars_ = outputs;
for (size_t i = 0; i < outputs.size(); ++i) {
const std::string vname = outputs[i]->var_desc_->Name();
LOG(ERROR) << "output name: " << vname;
framework::Variable* var = root_scope_->Var(vname);
if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block->FindVar(vname);
......
......@@ -139,7 +139,6 @@ PYBIND11_MODULE(core, m) {
.def_property("value",
[](const imperative::VarBase &self) { return self.var_; },
[](imperative::VarBase &self, framework::Variable *var) {
LOG(ERROR) << "set var to pointer: " << var;
self.var_ = var;
},
py::return_value_policy::reference)
......
......@@ -1289,13 +1289,22 @@ class Block(object):
Operator: the append Operator.
"""
op_desc = self.desc.append_op()
op = Operator(block=self, desc=op_desc, *args, **kwargs)
op = Operator(
block=self,
desc=op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.append(op)
self._trace_op(op, kwargs.get("stop_gradient", False))
return op
def _trace_op(self, op, stop_gradient=False):
if _in_imperative_mode():
_imperative_tracer().trace(op.iop, [v._ivar for v in op.inputs],
[v._ivar for v in op.outputs], self.desc,
kwargs.get("stop_gradient", False))
self.ops.append(op)
return op
stop_gradient)
def _insert_op(self, index, *args, **kwargs):
"""
......@@ -1342,12 +1351,15 @@ class Block(object):
def _prepend_op(self, *args, **kwargs):
op_desc = self.desc._prepend_op()
op = Operator(self, op_desc, *args, **kwargs)
if _in_imperative_mode():
_imperative_tracer().trace(op.iop, [v._ivar for v in op.inputs],
[v._ivar for v in op.outputs], self.desc,
kwargs.get("stop_gradient", False))
op = Operator(
self,
op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.insert(0, op)
self._trace_op(op, kwargs.get("stop_gradient", False))
return op
def _sync_with_cpp(self):
......
......@@ -23,7 +23,6 @@ import numpy as np
from .framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating
from . import unique_name
from paddle.fluid.imperative import base as imperative_base
from paddle.fluid.imperative.base import to_variable
from paddle.fluid.initializer import Constant, Xavier
from .param_attr import ParamAttr, WeightNormParamAttr
from . import core
......@@ -51,7 +50,7 @@ class LayerHelper(object):
return default_startup_program()
def to_variable(self, x):
return base.to_variable(x, self.main_program.current_block())
return imperative_base.to_variable(x, self.main_program.current_block())
def append_op(self, *args, **kwargs):
return self.main_program.current_block().append_op(*args, **kwargs)
......@@ -371,7 +370,7 @@ class LayerHelper(object):
def set_variable_initializer(self, var, initializer):
assert isinstance(var, Variable)
if imperative_base.enabled():
initializer(var, self.startup_program.global_block())
initializer(var, var.block)
else:
self.startup_program.global_block().create_var(
name=var.name,
......
......@@ -302,7 +302,7 @@ class Optimizer(object):
This method combines interface `append_backward()` and
`create_optimization_pass()` into one.
"""
if imperative_base.enabled:
if imperative_base.enabled():
if parameter_list is not None:
params_grads = parameter_list
else:
......@@ -315,7 +315,7 @@ class Optimizer(object):
block=loss.block,
name=param._ivar._grad_name(),
stop_gradient=True)
grad_var._value = param._ivar.grad_value()
grad_var._value = param._ivar.grad_value
params_grads.append((param, grad_var))
optimize_ops = self._create_optimization_pass(params_grads, loss,
......
......@@ -43,15 +43,6 @@ class SimpleImgConvPool(fluid.imperative.PyLayer):
bias_attr=None):
super(SimpleImgConvPool, self).__init__()
# groups = 1
# dilation = [1, 1]
# pad = [0, 0]
# stride = [1, 1]
# input_size = [2, 3, 5, 5] # NCHW
# assert np.mod(input_size[1], groups) == 0
# f_c = input_size[1] // groups
# filter_size = [6, f_c, 3, 3]
self._conv2d = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
......@@ -108,7 +99,9 @@ class TestImperativeMnist(unittest.TestCase):
def test_mnist_cpu_float32(self):
with fluid.imperative.guard():
mnist = MNIST()
sgd = SGDOptimizer(learning_rate=1e-3)
for i in range(1):
x_data = np.random.rand(128, 1, 28, 28).astype('float32')
img = to_variable(x_data)
y_data = np.random.rand(128, 1).astype('int64')
......@@ -120,36 +113,8 @@ class TestImperativeMnist(unittest.TestCase):
out._backward()
filter_grad = mnist._simple_img_conv_pool_1._conv2d._filter_param._gradient(
)
# print(filter_grad)
sgd = SGDOptimizer(learning_rate=1e-3)
sgd.minimize(out)
# np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
# with fluid.imperative.guard():
# mlp = MLP()
# out = mlp(np_inp)
# dy_out = out._numpy()
# out._backward()
# dy_grad = mlp._fc1._w._gradient()
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[2, 2], append_batch_size=False)
# mlp = MLP()
# out = mlp(inp)
# param_grads = fluid.backward.append_backward(
# out, parameter_list=[mlp._fc1._w.name])[0]
# exe = fluid.Executor(fluid.CPUPlace())
# exe.run(fluid.default_startup_program())
# static_out, static_grad = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[out.name, param_grads[1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册