diff --git a/paddle/phi/kernels/assign_kernel.cc b/paddle/phi/kernels/assign_kernel.cc index a330227fcfafd21d98ef4ee928c1c1d095a93e45..5eafc869fa551ac5dfdc7d2e4bec24d75808938f 100644 --- a/paddle/phi/kernels/assign_kernel.cc +++ b/paddle/phi/kernels/assign_kernel.cc @@ -22,16 +22,23 @@ namespace phi { +template +void AssignRawKernel(const Context& dev_ctx, + const DenseTensor& x, + DenseTensor* out) { + Copy(dev_ctx, x, x.place(), false, out); +} + template void AssignKernel(const Context& dev_ctx, paddle::optional x, DenseTensor* out) { - if (x.get_ptr()) { - if (!x.is_initialized()) { + if (x) { + if (!x->IsInitialized()) { return; } auto& x_tensor = *x.get_ptr(); - Copy(dev_ctx, x_tensor, x_tensor.place(), false, out); + AssignRawKernel(dev_ctx, x_tensor, out); } } @@ -104,6 +111,12 @@ void AssignValueKernel(const Context& dev_ctx, } // namespace phi +PD_REGISTER_GENERAL_KERNEL(assign_raw, + CPU, + ALL_LAYOUT, + phi::AssignRawKernel, + ALL_DTYPE) {} + PD_REGISTER_GENERAL_KERNEL( assign, CPU, ALL_LAYOUT, phi::AssignKernel, ALL_DTYPE) { kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); @@ -123,6 +136,11 @@ PD_REGISTER_KERNEL(assign_value, int64_t) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) +PD_REGISTER_GENERAL_KERNEL(assign_raw, + GPU, + ALL_LAYOUT, + phi::AssignRawKernel, + ALL_DTYPE) {} PD_REGISTER_GENERAL_KERNEL( assign, GPU, ALL_LAYOUT, phi::AssignKernel, ALL_DTYPE) { kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); diff --git a/paddle/phi/kernels/assign_kernel.h b/paddle/phi/kernels/assign_kernel.h index f1f3f024205a10f91a98f06592d776d76f2a3319..437a2a0c189e868e8d6bee6567473439393eebaa 100644 --- a/paddle/phi/kernels/assign_kernel.h +++ b/paddle/phi/kernels/assign_kernel.h @@ -21,6 +21,11 @@ namespace phi { +template +void AssignRawKernel(const Context& dev_ctx, + const DenseTensor& x, + DenseTensor* out); + // In order to be compatible with the `AsDispensable` input in the original // assign op maker, the input parameter here needs to be dispensable, but // this looks weird diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 72aee0ba87e58be6f563a8189a54dc7357529c9c..9bf245ff388b405a63786c932483ca15bee4320a 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -23,7 +23,7 @@ from .. import framework from ..framework import convert_np_dtype_to_dtype_, _in_legacy_dygraph from .. import core from .. import unique_name -from ..framework import Variable, Parameter, ParamBase, _getitem_impl_, _setitem_impl_, EagerParamBase +from ..framework import Variable, Parameter, ParamBase, _getitem_impl_, _setitem_impl_, EagerParamBase, in_dygraph_mode from .base import switch_to_static_graph from .math_op_patch import monkey_patch_math_varbase from .parallel import scale_loss @@ -798,6 +798,9 @@ def monkey_patch_varbase(): @framework.dygraph_only def clone(self): + if in_dygraph_mode(): + return _C_ops.final_state_assign(self) + if _in_legacy_dygraph(): output = core.VarBase() else: diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 28e0d4eff377f6f23da436d8219da4475a474bff..3a8dfdc858079c34d749e07158a61b4add553096 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -622,12 +622,15 @@ def assign(input, output=None): # after this api. if isinstance(input, (Variable, core.VarBase)): if _non_static_mode(): - if output is None: - if _in_legacy_dygraph(): - output = core.VarBase() - else: - output = core.eager.Tensor() - _C_ops.assign(input, output) + if in_dygraph_mode() and output is None: + output = _C_ops.final_state_assign(input) + else: + if output is None: + if _in_legacy_dygraph(): + output = core.VarBase() + else: + output = core.eager.Tensor() + _C_ops.assign(input, output) else: check_dtype(input.dtype, 'input', [ 'float16', 'uint16', 'float32', 'float64', 'int32', 'int64', diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index 3dbd9311a71ed6005b2b53c728bd9cc3343246ea..bfe23c621270d7117c92c7ba06991acc1fb74d37 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -27,30 +27,32 @@ from paddle.fluid.backward import append_backward class TestAssignOp(op_test.OpTest): def setUp(self): + self.python_api = paddle.assign self.op_type = "assign" x = np.random.random(size=(100, 10)).astype('float64') self.inputs = {'X': x} self.outputs = {'Out': x} def test_forward(self): - self.check_output() + self.check_output(check_eager=True) def test_backward(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestAssignFP16Op(op_test.OpTest): def setUp(self): + self.python_api = paddle.assign self.op_type = "assign" x = np.random.random(size=(100, 10)).astype('float16') self.inputs = {'X': x} self.outputs = {'Out': x} def test_forward(self): - self.check_output() + self.check_output(check_eager=True) def test_backward(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestAssignOpWithLoDTensorArray(unittest.TestCase): @@ -171,6 +173,8 @@ class TestAssignOApi(unittest.TestCase): def test_clone(self): paddle.disable_static() + self.python_api = paddle.clone + x = paddle.ones([2]) x.stop_gradient = False clone_x = paddle.clone(x) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 892577a46f4d29126c899dc8847eb101b1b71666..3d0a6ae7b09886d779916e78c54de02d0debe0a0 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -167,6 +167,16 @@ func : asinh backward : asinh_grad +# assign +- api : assign + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + kernel : + func : assign_raw + backward : assign_grad + # atan - api : atan args : (Tensor x) diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 602fecc83b8f786e7105384b23023a470e7cef12..6ef11ca2b3df9627803d3e62759d9aa21ad5357f 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -89,6 +89,16 @@ kernel : func : asinh_grad +- backward_api : assign_grad + forward : assign (Tensor x) -> Tensor(out) + args : (Tensor out_grad) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [out_grad] + kernel : + func : assign_raw + - backward_api : atan2_grad forward : atan2 (Tensor x, Tensor y) -> Tensor(out) args : (Tensor x, Tensor y, Tensor out_grad)