diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index de7899055d588648d59c99853da5d820d9c0ba0d..f6dac762fd7074f3bffa26e8b4aa69733009433c 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -84,7 +84,6 @@ class Tracer { *op->output_vars_ = outputs; for (size_t i = 0; i < outputs.size(); ++i) { const std::string vname = outputs[i]->var_desc_->Name(); - LOG(ERROR) << "output name: " << vname; framework::Variable* var = root_scope_->Var(vname); if (!var->IsInitialized()) { framework::VarDesc* var_desc = block->FindVar(vname); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 23248a5dee0c15a9ac139d422815aedbe495e2ef..74fee64671eed1822bf24486efbd811ad76dfb1d 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -139,7 +139,6 @@ PYBIND11_MODULE(core, m) { .def_property("value", [](const imperative::VarBase &self) { return self.var_; }, [](imperative::VarBase &self, framework::Variable *var) { - LOG(ERROR) << "set var to pointer: " << var; self.var_ = var; }, py::return_value_policy::reference) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 6c5dd844601cbfb6c0e6e453928d0426357cfd93..fc00adfbb60698e9749494a85875a2f0ceac098d 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1289,13 +1289,22 @@ class Block(object): Operator: the append Operator. """ op_desc = self.desc.append_op() - op = Operator(block=self, desc=op_desc, *args, **kwargs) + op = Operator( + block=self, + desc=op_desc, + type=kwargs.get("type", None), + inputs=kwargs.get("inputs", None), + outputs=kwargs.get("outputs", None), + attrs=kwargs.get("attrs", None)) + self.ops.append(op) + self._trace_op(op, kwargs.get("stop_gradient", False)) + return op + + def _trace_op(self, op, stop_gradient=False): if _in_imperative_mode(): _imperative_tracer().trace(op.iop, [v._ivar for v in op.inputs], [v._ivar for v in op.outputs], self.desc, - kwargs.get("stop_gradient", False)) - self.ops.append(op) - return op + stop_gradient) def _insert_op(self, index, *args, **kwargs): """ @@ -1342,12 +1351,15 @@ class Block(object): def _prepend_op(self, *args, **kwargs): op_desc = self.desc._prepend_op() - op = Operator(self, op_desc, *args, **kwargs) - if _in_imperative_mode(): - _imperative_tracer().trace(op.iop, [v._ivar for v in op.inputs], - [v._ivar for v in op.outputs], self.desc, - kwargs.get("stop_gradient", False)) + op = Operator( + self, + op_desc, + type=kwargs.get("type", None), + inputs=kwargs.get("inputs", None), + outputs=kwargs.get("outputs", None), + attrs=kwargs.get("attrs", None)) self.ops.insert(0, op) + self._trace_op(op, kwargs.get("stop_gradient", False)) return op def _sync_with_cpp(self): diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 8a8470db46eaf8855473b18d023dfe3c7a97da15..5429a735336d94336544edafc62808e88ab59c17 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -23,7 +23,6 @@ import numpy as np from .framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating from . import unique_name from paddle.fluid.imperative import base as imperative_base -from paddle.fluid.imperative.base import to_variable from paddle.fluid.initializer import Constant, Xavier from .param_attr import ParamAttr, WeightNormParamAttr from . import core @@ -51,7 +50,7 @@ class LayerHelper(object): return default_startup_program() def to_variable(self, x): - return base.to_variable(x, self.main_program.current_block()) + return imperative_base.to_variable(x, self.main_program.current_block()) def append_op(self, *args, **kwargs): return self.main_program.current_block().append_op(*args, **kwargs) @@ -371,7 +370,7 @@ class LayerHelper(object): def set_variable_initializer(self, var, initializer): assert isinstance(var, Variable) if imperative_base.enabled(): - initializer(var, self.startup_program.global_block()) + initializer(var, var.block) else: self.startup_program.global_block().create_var( name=var.name, diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 5cdbe7c10d3ed635062d8df87df3f041e32e9eab..779cb5f961639aa919827a1c1726e974fdf1cbe1 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -302,7 +302,7 @@ class Optimizer(object): This method combines interface `append_backward()` and `create_optimization_pass()` into one. """ - if imperative_base.enabled: + if imperative_base.enabled(): if parameter_list is not None: params_grads = parameter_list else: @@ -315,7 +315,7 @@ class Optimizer(object): block=loss.block, name=param._ivar._grad_name(), stop_gradient=True) - grad_var._value = param._ivar.grad_value() + grad_var._value = param._ivar.grad_value params_grads.append((param, grad_var)) optimize_ops = self._create_optimization_pass(params_grads, loss, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py index 12d605316c7f7aa2e287e1b3c268d82391bc4125..a2e008615c228a3bd2c5eff5aaebc590baccc84f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py @@ -43,15 +43,6 @@ class SimpleImgConvPool(fluid.imperative.PyLayer): bias_attr=None): super(SimpleImgConvPool, self).__init__() - # groups = 1 - # dilation = [1, 1] - # pad = [0, 0] - # stride = [1, 1] - # input_size = [2, 3, 5, 5] # NCHW - # assert np.mod(input_size[1], groups) == 0 - # f_c = input_size[1] // groups - # filter_size = [6, f_c, 3, 3] - self._conv2d = Conv2D( num_channels=num_channels, num_filters=num_filters, @@ -108,47 +99,21 @@ class TestImperativeMnist(unittest.TestCase): def test_mnist_cpu_float32(self): with fluid.imperative.guard(): mnist = MNIST() - - x_data = np.random.rand(128, 1, 28, 28).astype('float32') - img = to_variable(x_data) - y_data = np.random.rand(128, 1).astype('int64') - label = to_variable(y_data) - label._stop_gradient = True - - predict = mnist(img) - out = fluid.layers.cross_entropy(predict, label) - out._backward() - filter_grad = mnist._simple_img_conv_pool_1._conv2d._filter_param._gradient( - ) - # print(filter_grad) - sgd = SGDOptimizer(learning_rate=1e-3) - sgd.minimize(out) - - # np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) - # with fluid.imperative.guard(): - # mlp = MLP() - # out = mlp(np_inp) - # dy_out = out._numpy() - # out._backward() - # dy_grad = mlp._fc1._w._gradient() - - # with new_program_scope(): - # inp = fluid.layers.data( - # name="inp", shape=[2, 2], append_batch_size=False) - # mlp = MLP() - # out = mlp(inp) - # param_grads = fluid.backward.append_backward( - # out, parameter_list=[mlp._fc1._w.name])[0] - # exe = fluid.Executor(fluid.CPUPlace()) - # exe.run(fluid.default_startup_program()) - - # static_out, static_grad = exe.run( - # feed={inp.name: np_inp}, - # fetch_list=[out.name, param_grads[1].name]) - - # self.assertTrue(np.allclose(dy_out, static_out)) - # self.assertTrue(np.allclose(dy_grad, static_grad)) + + for i in range(1): + x_data = np.random.rand(128, 1, 28, 28).astype('float32') + img = to_variable(x_data) + y_data = np.random.rand(128, 1).astype('int64') + label = to_variable(y_data) + label._stop_gradient = True + + predict = mnist(img) + out = fluid.layers.cross_entropy(predict, label) + out._backward() + filter_grad = mnist._simple_img_conv_pool_1._conv2d._filter_param._gradient( + ) + sgd.minimize(out) if __name__ == '__main__':