diff --git a/paddle/fluid/imperative/py_layer_fwd.h b/paddle/fluid/imperative/py_layer_fwd.h index 79251d7bf7ad6b2ffe0168649c207878d16336da..159371970dcacf0701eb056fdf5caf5c55c473ab 100644 --- a/paddle/fluid/imperative/py_layer_fwd.h +++ b/paddle/fluid/imperative/py_layer_fwd.h @@ -226,6 +226,15 @@ py::object PyLayerApply(const platform::Place& place, const py::handle& cls, } } if (if_inplace) { + // when pylayer forward is inplace strategy, check whether tensor is leaf + for (auto& t : input_vars) { + PADDLE_ENFORCE_EQ(t->IsLeaf() && !t->OverridedStopGradient(), false, + platform::errors::InvalidArgument( + "Leaf Var (%s) that doesn't stop gradient can't " + "use inplace strategy.", + t->Name())); + } + inplace_map["X"] = "Out"; } diff --git a/python/paddle/fluid/tests/unittests/test_pylayer_op.py b/python/paddle/fluid/tests/unittests/test_pylayer_op.py index a852b4c90421acb7865abf2aeb58f7a0b346bf41..200273c6066f9df569d7e15012a42e607d20bad5 100644 --- a/python/paddle/fluid/tests/unittests/test_pylayer_op.py +++ b/python/paddle/fluid/tests/unittests/test_pylayer_op.py @@ -406,6 +406,32 @@ class TestPyLayer(unittest.TestCase): z.backward() self.assertTrue(data.grad is not None) + def test_pylayer_inplace_and_leaf_exception(self): + class cus_pylayer_op(PyLayer): + @staticmethod + def forward(ctx, x): + return x + + @staticmethod + def backward(ctx, dy): + return dy + + class Layer(paddle.nn.Layer): + def __init__(self): + super(Layer, self).__init__() + + def forward(self, data): + z = cus_pylayer_op.apply(data) + return z.mean() + + for i in range(2): + data = paddle.ones([2, 3], dtype="float64") / (i + 1) + data.stop_gradient = False + layer = Layer() + + with self.assertRaises(ValueError): + z = layer(data) + def test_backward_in_backward(self): class cus_tanh(PyLayer): @staticmethod