diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 24787c4ce338ec787dee73858d4c8bcb5de7298f..d8ee400e35082b857937e59afdc16d28423f4bc1 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -401,7 +401,7 @@ void VarBase::_CopyGradientFrom(const VarBase& src) { auto& src_tensor = src.Var().Get(); PADDLE_ENFORCE_EQ(src_tensor.IsInitialized(), true, platform::errors::InvalidArgument( - "tensor has not been initialized", src.Name())); + "Tensor %s has not been initialized", src.Name())); auto* grad_t = grad_var_->MutableVar()->GetMutable(); auto* var_ = MutableVar()->GetMutable(); grad_t->ShareDataWith(src_tensor); diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index a7e3c700bdd932843ee627b318a281f59d8e557f..445654efb54a676e132154b21e65704ce90d17e0 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -1910,54 +1910,50 @@ void BindImperative(py::module *m_ptr) { .def("_clear", [](const std::shared_ptr &self) { auto *t = self->MutableVar()->GetMutable(); - PADDLE_ENFORCE_EQ(t->IsInitialized(), true, - platform::errors::InvalidArgument( - "tensor has not been initialized")); + PADDLE_ENFORCE_EQ( + t->IsInitialized(), true, + platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", self->Name())); t->clear(); }) .def("_offset", [](const std::shared_ptr &self) { auto *t = self->MutableVar()->GetMutable(); - PADDLE_ENFORCE_EQ(t->IsInitialized(), true, - platform::errors::InvalidArgument( - "tensor has not been initialized")); + PADDLE_ENFORCE_EQ( + t->IsInitialized(), true, + platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", self->Name())); return t->offset(); }) - .def("_share_buffer_with", + .def("_share_buffer_to", [](const std::shared_ptr &self, - std::shared_ptr &target_t) { - auto *t = self->MutableVar()->GetMutable(); - auto *t_t = - target_t->MutableVar()->GetMutable(); - PADDLE_ENFORCE_EQ(t->IsInitialized(), true, - platform::errors::InvalidArgument( - "tensor has not been initialized")); - PADDLE_ENFORCE_EQ(t_t->IsInitialized(), true, - platform::errors::InvalidArgument( - "tensor has not been initialized")); - t->ShareBufferWith(*t_t); + std::shared_ptr &dst) { + auto *src = self->MutableVar()->GetMutable(); + auto *dst_ = dst->MutableVar()->GetMutable(); + PADDLE_ENFORCE_EQ( + src->IsInitialized(), true, + platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", self->Name())); + dst_->ShareBufferWith(*src); }) .def("_is_shared_buffer_with", [](const std::shared_ptr &self, - std::shared_ptr &target_t) { - auto *t = self->MutableVar()->GetMutable(); - auto *t_t = - target_t->MutableVar()->GetMutable(); - PADDLE_ENFORCE_EQ(t->IsInitialized(), true, - platform::errors::InvalidArgument( - "tensor has not been initialized")); - PADDLE_ENFORCE_EQ(t_t->IsInitialized(), true, - platform::errors::InvalidArgument( - "tensor has not been initialized")); - return t->IsSharedBufferWith(*t_t); + std::shared_ptr &dst) { + auto *src = self->MutableVar()->GetMutable(); + auto *dst_ = dst->MutableVar()->GetMutable(); + if (!src->IsInitialized() || !dst_->IsInitialized()) { + return false; + } + return dst_->IsSharedBufferWith(*src); }) .def("_slice", [](const std::shared_ptr &self, int64_t begin_idx, int64_t end_idx) { auto *t = self->MutableVar()->GetMutable(); - PADDLE_ENFORCE_EQ(t->IsInitialized(), true, - platform::errors::InvalidArgument( - "tensor has not been initialized")); + PADDLE_ENFORCE_EQ( + t->IsInitialized(), true, + platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", self->Name())); return t->Slice(begin_idx, end_idx); }) .def("_copy_gradient_from", @@ -1966,9 +1962,10 @@ void BindImperative(py::module *m_ptr) { .def("_numel", [](std::shared_ptr &self) { auto *t = self->MutableVar()->GetMutable(); - PADDLE_ENFORCE_EQ(t->IsInitialized(), true, - platform::errors::InvalidArgument( - "tensor has not been initialized")); + PADDLE_ENFORCE_EQ( + t->IsInitialized(), true, + platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", self->Name())); return t->numel(); }) .def_property("name", &imperative::VarBase::Name, diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index e2a90ed135b90920b469de1e9185d957f48aa7ff..34e679e752a8690fe2107519b4b740befa2b5ff3 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -1189,15 +1189,15 @@ class TestVarBaseOffset(unittest.TestCase): self.assertEqual(actual_x._offset(), expected_offset) -class TestVarBaseShareBufferWith(unittest.TestCase): - def test_share_buffer_with(self): +class TestVarBaseShareBufferTo(unittest.TestCase): + def test_share_buffer_To(self): paddle.disable_static() - np_x = np.random.random((3, 8, 8)) - np_y = np.random.random((3, 8, 8)) - x = paddle.to_tensor(np_x, dtype="float64") - y = paddle.to_tensor(np_y, dtype="float64") - x._share_buffer_with(y) - self.assertEqual(x._is_shared_buffer_with(y), True) + np_src = np.random.random((3, 8, 8)) + src = paddle.to_tensor(np_src, dtype="float64") + # empty_var + dst = core.VarBase() + src._share_buffer_to(dst) + self.assertEqual(src._is_shared_buffer_with(dst), True) class TestVarBaseTo(unittest.TestCase):