未验证 提交 bf412f46 编写于 作者: Z Zhou Wei 提交者: GitHub

add tensor clone (#27953)

* add tensor clone

* fix unittest test_var_base
上级 2e845182
......@@ -54,7 +54,7 @@ class AssignFunctor {
out_rows.set_height(rows.height());
auto &t = rows.value();
auto *m = out_rows.mutable_value();
framework::TensorCopy(t, dev_ctx_.GetPlace(), dev_ctx_, m);
framework::TensorCopy(t, t.place(), m);
}
template <typename T>
......@@ -70,7 +70,7 @@ class AssignFunctor {
framework::LoDTensor *out) const {
if (lod_tensor.numel() == 0) return;
auto &out_tensor = *out;
TensorCopy(lod_tensor, dev_ctx_.GetPlace(), dev_ctx_, &out_tensor);
TensorCopy(lod_tensor, lod_tensor.place(), &out_tensor);
out_tensor.set_lod(lod_tensor.lod());
}
......
......@@ -718,6 +718,54 @@ void BindImperative(py::module *m_ptr) {
loss.clear_gradient()
print("After clear_gradient {}".format(loss.grad))
)DOC")
.def("clone",
[](std::shared_ptr<imperative::VarBase> &self) {
const auto &tensor = self->Var().Get<framework::LoDTensor>();
PADDLE_ENFORCE_EQ(
tensor.IsInitialized(), true,
platform::errors::InvalidArgument(
"%s has not been initialized", self->Name()));
auto tracer = imperative::GetCurrentTracer();
auto new_var = std::make_shared<imperative::VarBase>(
true, tracer->GenerateUniqueName(self->Name() + "_clone"));
framework::AttributeMap attrs;
imperative::NameVarBaseMap ins = {{"X", {self}}};
imperative::NameVarBaseMap outs = {{"Out", {new_var}}};
tracer->TraceOp("assign", ins, outs, attrs);
return new_var;
},
py::return_value_policy::copy, R"DOC(
Returns a new Tensor, which is clone of origin Tensor, and it remains in the current graph.
It will always have a Tensor copy.
Tn addition, the cloned Tensor provides gradient propagation.
Returns: The cloned Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(1.0, stop_gradient=False)
clone_x = x.clone()
y = clone_x**2
y.backward()
print(clone_x.stop_gradient) # False
print(clone_x.grad) # [2.0], support gradient propagation
print(x.stop_gradient) # False
print(x.grad) # [2.0], clone_x support gradient propagation for x
x = paddle.to_tensor(1.0)
clone_x = x.clone()
clone_x.stop_gradient = False
z = clone_x**3
z.backward()
print(clone_x.stop_gradient) # False
print(clone_x.grad) # [3.0], support gradient propagation
print(x.stop_gradient) # True
print(x.grad) # None
)DOC")
.def("_run_backward",
[](imperative::VarBase &self, const imperative::Tracer &tracer,
bool retain_graph) {
......
......@@ -55,6 +55,15 @@ class TestVarBase(unittest.TestCase):
np.array_equal(x.numpy(), np.array([1.2]).astype(
'float32')))
self.assertEqual(x.dtype, core.VarDesc.VarType.FP32)
clone_x = x.clone()
self.assertTrue(
np.array_equal(clone_x.numpy(),
np.array([1.2]).astype('float32')))
self.assertEqual(clone_x.dtype, core.VarDesc.VarType.FP32)
y = clone_x**2
y.backward()
self.assertTrue(
np.array_equal(x.grad, np.array([2.4]).astype('float32')))
# set_default_dtype take effect on complex
x = paddle.to_tensor(1 + 2j, place=place, stop_gradient=False)
......@@ -132,7 +141,7 @@ class TestVarBase(unittest.TestCase):
_test_place(core.CPUPlace())
if core.is_compiled_with_cuda():
_test_place(core.CUDAPinnedPlace())
#_test_place(core.CUDAPinnedPlace())
_test_place(core.CUDAPlace(0))
def test_to_variable(self):
......@@ -405,6 +414,7 @@ class TestVarBase(unittest.TestCase):
self.assertListEqual(list(var_base.shape), list(static_var.shape))
def test_tensor_str(self):
paddle.enable_static()
paddle.disable_static(paddle.CPUPlace())
paddle.manual_seed(10)
a = paddle.rand([10, 20])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册