diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 612503768079472ba233ee3fcd43a47fdba9a0cc..342cb68ab2bf8ceb543317ed8d8f2356ef6b2cde 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -188,11 +188,13 @@ std::vector OpBase::ApplyGrad(framework::Scope* scope) { std::vector ret; for (size_t i = 0; i < input_vars_->size(); ++i) { bool found = false; + VarBase* origin_var = (*input_vars_)[i]; for (const std::string& outvar : grad_op_desc_->OutputArgumentNames()) { Variable* var = scope->FindVar(outvar); - VarBase* origin_var = (*input_vars_)[i]; std::string orig_var = grad_to_var_->at(outvar); - PADDLE_ENFORCE(origin_var->var_desc_->Name() == orig_var); + if (origin_var->var_desc_->Name() != orig_var) { + continue; + } VLOG(3) << "apply grad " << outvar << " with origin " << orig_var; origin_var->ApplyGrad(scope, var); found = true; diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index 433d07c0e5aa0986ab1e9fe349ef865d2851c0c0..97772dc110135d9d2533e1574933d49f7c8cd346 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -43,9 +43,12 @@ void CreateGradOp(const framework::OpDesc& op_desc, class Tracer { public: - explicit Tracer(framework::BlockDesc* root_block) : root_block_(root_block) { + explicit Tracer(framework::BlockDesc* root_block, + framework::BlockDesc* startup_block) + : root_block_(root_block), startup_block_(startup_block) { root_scope_ = new framework::Scope(); scopes_[root_block_] = root_scope_; + scopes_[startup_block_] = root_scope_; } virtual ~Tracer() { delete root_scope_; } @@ -80,6 +83,8 @@ class Tracer { } else { op->pre_ops_->push_back(nullptr); } + VLOG(3) << "input vname " << vname << " " + << var->Get().dims().size(); } *op->output_vars_ = outputs; @@ -98,12 +103,19 @@ class Tracer { outputs[i]->pre_op_ = op; outputs[i]->pre_op_out_idx_ = i; } + + VLOG(3) << "tracer running " << op_desc->Type(); op_base->Run(*scope, platform::CPUPlace()); - framework::OpDesc* grad_op_desc; - auto grad_to_var = new std::unordered_map(); - CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var); - op->grad_op_desc_ = grad_op_desc; - op->grad_to_var_ = grad_to_var; + if (block == startup_block_) { + op->grad_op_desc_ = nullptr; + op->grad_to_var_ = nullptr; + } else { + framework::OpDesc* grad_op_desc; + auto grad_to_var = new std::unordered_map(); + CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var); + op->grad_op_desc_ = grad_op_desc; + op->grad_to_var_ = grad_to_var; + } op->block_ = block; } @@ -121,6 +133,7 @@ class Tracer { private: std::map scopes_; framework::BlockDesc* root_block_; + framework::BlockDesc* startup_block_; framework::Scope* root_scope_; }; diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index 8a111e6065b102fd177b9e313cd87dcf8c22b669..271428408cb26296ff318bb39414ad0e8ecc0ac8 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -49,7 +49,8 @@ class MulOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GT( y_dims.size(), y_num_col_dims, "The input tensor Y's rank of MulOp should be larger than " - "y_num_col_dims."); + "y_num_col_dims: %ld vs %ld", + y_dims.size(), y_num_col_dims); auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims); auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims); diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 34e9c897d9e95feb185083b7c0a6a824d8dc809c..be63fb877869b64035207342e5d4398e481dbc99 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -24,8 +24,9 @@ namespace pybind { void BindTracer(pybind11::module *m) { pybind11::class_(*m, "Tracer", "") .def("__init__", - [](imperative::Tracer &self, framework::BlockDesc *root_block) { - new (&self) imperative::Tracer(root_block); + [](imperative::Tracer &self, framework::BlockDesc *root_block, + framework::BlockDesc *startup_block) { + new (&self) imperative::Tracer(root_block, startup_block); }) .def("trace", &imperative::Tracer::Trace) .def("get_scope", &imperative::Tracer::GetScope, diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 17fe8dc3c8a28ad129e2d377820da95e8e7a02d9..34f2f2c2dabbb9ba23b203037e23f884610d445a 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -564,8 +564,11 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, grad_to_var = dict() op_desc = _create_op_desc_( - "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, { - "shape": [1], + "fill_constant", + {}, + {"Out": [_append_grad_suffix_(loss.name)]}, + { + "shape": [1], # TODO(panyx0718): This can be loss.shape. "value": 1.0, "dtype": loss.dtype, "force_cpu": False, diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 089792059465c60da43d02e8389f4e36900c2292..10d441cf3ec6720737746a145d351485fa8fc279 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1316,6 +1316,9 @@ class Block(object): def _prepend_op(self, *args, **kwargs): op_desc = self.desc._prepend_op() op = Operator(self, op_desc, *args, **kwargs) + if _in_imperative_mode(): + _imperative_tracer().trace(op.iop, [v._ivar for v in op.inputs], + [v._ivar for v in op.outputs], self.desc) self.ops.insert(0, op) return op diff --git a/python/paddle/fluid/imperative/base.py b/python/paddle/fluid/imperative/base.py index 15d38ddb56c71ef7de67f79cf52cd26070f470cb..aa48ef71aa61086764019ac29abd9cb4c53325fa 100644 --- a/python/paddle/fluid/imperative/base.py +++ b/python/paddle/fluid/imperative/base.py @@ -28,7 +28,8 @@ def enabled(): def guard(): train = framework.Program() startup = framework.Program() - tracer = core.Tracer(train.current_block().desc) + tracer = core.Tracer(train.current_block().desc, + startup.current_block().desc) with framework.program_guard(train, startup): with framework.unique_name.guard(): with framework._imperative_guard(tracer): diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index 1a28f7f4ae35295394b560d79e3dc0cdd5f2beab..044717c31975d671818cae17cd989774c96ed9fa 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -25,11 +25,9 @@ __all__ = ['PyLayer'] class PyLayer(core.Layer): def __init__(self): - pass + self._built = False def __call__(self, inputs): - # TODO(panyx0718): Support declarative mode as well. - assert base.enabled() if not isinstance(inputs, list) and not isinstance(inputs, tuple): inputs = [inputs] @@ -37,8 +35,15 @@ class PyLayer(core.Layer): for x in inputs: py_var = base.to_variable(x) var_inputs.append(py_var) + if not self._built: + self._build_once(inputs) + self._built = True + outputs = self.forward(var_inputs) return outputs + def _build_once(self, inputs): + pass + def forward(self, inputs): return [] diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 4d8311a0d3ada78e4f6cc54f8990e2a2e2cadc4d..d8bc919784bf85538ef092b29ecdd8c88ae910d0 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -29,6 +29,7 @@ from . import utils from .. import unique_name from functools import reduce from .. import core +from ..imperative import layers __all__ = [ 'fc', @@ -9426,3 +9427,47 @@ def huber_loss(input, label, delta): 'Residual': residual}, attrs={'delta': delta}) return out + + +class FC(layers.PyLayer): + def __init__(self, + size, + param_attr=None, + num_flatten_dims=1, + dtype=core.VarDesc.VarType.FP32): + super(FC, self).__init__() + self._size = size + self._num_flatten_dims = num_flatten_dims + self._dtype = dtype + self._helper = LayerHelper('FC', param_attr=param_attr) + + def _build_once(self, inputs): + input_shape = inputs[0].shape + param_shape = [ + reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], 1) + ] + [self._size] + self._w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=param_shape, + dtype=self._dtype, + is_bias=False) + + def forward(self, inputs): + tmp = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type="mul", + inputs={"X": inputs[0], + "Y": self._w}, + outputs={"Out": tmp}, + attrs={ + "x_num_col_dims": self._num_flatten_dims, + "y_num_col_dims": 1 + }) + + out = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type="sum", + inputs={"X": [tmp]}, + outputs={"Out": out}, + attrs={"use_mkldnn": False}) + return out diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index b5b6305155d1ef3dcf6ce590c221664754c5bdc8..0fe69d1bd4b1b10c09879871c8cf1fc197d1106b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -12,12 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import unittest -import sys import numpy as np import paddle.fluid as fluid from paddle.fluid import core +from paddle.fluid.layers.nn import FC + + +@contextlib.contextmanager +def new_program_scope(): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + yield class MyLayer(fluid.imperative.PyLayer): @@ -30,6 +41,23 @@ class MyLayer(fluid.imperative.PyLayer): return [fluid.layers.elementwise_mul(x, x)] +class MLP(fluid.imperative.PyLayer): + def __init__(self): + super(MLP, self).__init__() + self._fc1 = FC(3, + fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1))) + self._fc2 = FC(4, + fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1))) + + def forward(self, inputs): + x = self._fc1(inputs[0]) + x = self._fc2(x) + x = fluid.layers.reduce_sum(x) + return x + + class TestImperative(unittest.TestCase): def test_layer(self): with fluid.imperative.guard(): @@ -39,13 +67,56 @@ class TestImperative(unittest.TestCase): l.forward([]) def test_layer_in_out(self): + np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32) with fluid.imperative.guard(): l = MyLayer() - x = l(np.array([1.0, 2.0, -1.0], dtype=np.float32))[0] + x = l(np_inp)[0] self.assertIsNotNone(x) - sys.stderr.write("%s output: %s\n" % (x, x._numpy())) + dy_out = x._numpy() x._backward() - sys.stderr.write("grad %s\n" % l._x_for_debug._gradient()) + dy_grad = l._x_for_debug._gradient() + + with new_program_scope(): + inp = fluid.layers.data( + name="inp", shape=[3], append_batch_size=False) + l = MyLayer() + x = l(inp)[0] + param_grads = fluid.backward.append_backward( + x, parameter_list=[l._x_for_debug.name])[0] + exe = fluid.Executor(fluid.CPUPlace()) + + static_out, static_grad = exe.run( + feed={inp.name: np_inp}, + fetch_list=[x.name, param_grads[1].name]) + + self.assertTrue(np.allclose(dy_out, static_out)) + self.assertTrue(np.allclose(dy_grad, static_grad)) + + def test_mlp(self): + np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) + with fluid.imperative.guard(): + mlp = MLP() + out = mlp(np_inp) + dy_out = out._numpy() + out._backward() + dy_grad = mlp._fc1._w._gradient() + + with new_program_scope(): + inp = fluid.layers.data( + name="inp", shape=[2, 2], append_batch_size=False) + mlp = MLP() + out = mlp(inp) + param_grads = fluid.backward.append_backward( + out, parameter_list=[mlp._fc1._w.name])[0] + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + static_out, static_grad = exe.run( + feed={inp.name: np_inp}, + fetch_list=[out.name, param_grads[1].name]) + + self.assertTrue(np.allclose(dy_out, static_out)) + self.assertTrue(np.allclose(dy_grad, static_grad)) if __name__ == '__main__':