未验证 提交 437bebda 编写于 作者: C chentianyu03 提交者: GitHub

[Yaml] Add assign yaml (#41428)

* add assign yaml

* add assign api

* add assign backward api

* add assign

* add assign yaml

* add assign

* assign yaml

* add assign raw kernel and use assign_raw in yaml

* merge develop branch

* add missing python_api
上级 cd2a4cdf
...@@ -22,16 +22,23 @@ ...@@ -22,16 +22,23 @@
namespace phi { namespace phi {
template <typename Context>
void AssignRawKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out) {
Copy<Context>(dev_ctx, x, x.place(), false, out);
}
template <typename Context> template <typename Context>
void AssignKernel(const Context& dev_ctx, void AssignKernel(const Context& dev_ctx,
paddle::optional<const DenseTensor&> x, paddle::optional<const DenseTensor&> x,
DenseTensor* out) { DenseTensor* out) {
if (x.get_ptr()) { if (x) {
if (!x.is_initialized()) { if (!x->IsInitialized()) {
return; return;
} }
auto& x_tensor = *x.get_ptr(); auto& x_tensor = *x.get_ptr();
Copy<Context>(dev_ctx, x_tensor, x_tensor.place(), false, out); AssignRawKernel<Context>(dev_ctx, x_tensor, out);
} }
} }
...@@ -104,6 +111,12 @@ void AssignValueKernel(const Context& dev_ctx, ...@@ -104,6 +111,12 @@ void AssignValueKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_GENERAL_KERNEL(assign_raw,
CPU,
ALL_LAYOUT,
phi::AssignRawKernel<phi::CPUContext>,
ALL_DTYPE) {}
PD_REGISTER_GENERAL_KERNEL( PD_REGISTER_GENERAL_KERNEL(
assign, CPU, ALL_LAYOUT, phi::AssignKernel<phi::CPUContext>, ALL_DTYPE) { assign, CPU, ALL_LAYOUT, phi::AssignKernel<phi::CPUContext>, ALL_DTYPE) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
...@@ -123,6 +136,11 @@ PD_REGISTER_KERNEL(assign_value, ...@@ -123,6 +136,11 @@ PD_REGISTER_KERNEL(assign_value,
int64_t) {} int64_t) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_GENERAL_KERNEL(assign_raw,
GPU,
ALL_LAYOUT,
phi::AssignRawKernel<phi::GPUContext>,
ALL_DTYPE) {}
PD_REGISTER_GENERAL_KERNEL( PD_REGISTER_GENERAL_KERNEL(
assign, GPU, ALL_LAYOUT, phi::AssignKernel<phi::GPUContext>, ALL_DTYPE) { assign, GPU, ALL_LAYOUT, phi::AssignKernel<phi::GPUContext>, ALL_DTYPE) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
......
...@@ -21,6 +21,11 @@ ...@@ -21,6 +21,11 @@
namespace phi { namespace phi {
template <typename Context>
void AssignRawKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out);
// In order to be compatible with the `AsDispensable` input in the original // In order to be compatible with the `AsDispensable` input in the original
// assign op maker, the input parameter here needs to be dispensable, but // assign op maker, the input parameter here needs to be dispensable, but
// this looks weird // this looks weird
......
...@@ -23,7 +23,7 @@ from .. import framework ...@@ -23,7 +23,7 @@ from .. import framework
from ..framework import convert_np_dtype_to_dtype_, _in_legacy_dygraph from ..framework import convert_np_dtype_to_dtype_, _in_legacy_dygraph
from .. import core from .. import core
from .. import unique_name from .. import unique_name
from ..framework import Variable, Parameter, ParamBase, _getitem_impl_, _setitem_impl_, EagerParamBase from ..framework import Variable, Parameter, ParamBase, _getitem_impl_, _setitem_impl_, EagerParamBase, in_dygraph_mode
from .base import switch_to_static_graph from .base import switch_to_static_graph
from .math_op_patch import monkey_patch_math_varbase from .math_op_patch import monkey_patch_math_varbase
from .parallel import scale_loss from .parallel import scale_loss
...@@ -798,6 +798,9 @@ def monkey_patch_varbase(): ...@@ -798,6 +798,9 @@ def monkey_patch_varbase():
@framework.dygraph_only @framework.dygraph_only
def clone(self): def clone(self):
if in_dygraph_mode():
return _C_ops.final_state_assign(self)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
output = core.VarBase() output = core.VarBase()
else: else:
......
...@@ -622,12 +622,15 @@ def assign(input, output=None): ...@@ -622,12 +622,15 @@ def assign(input, output=None):
# after this api. # after this api.
if isinstance(input, (Variable, core.VarBase)): if isinstance(input, (Variable, core.VarBase)):
if _non_static_mode(): if _non_static_mode():
if output is None: if in_dygraph_mode() and output is None:
if _in_legacy_dygraph(): output = _C_ops.final_state_assign(input)
output = core.VarBase() else:
else: if output is None:
output = core.eager.Tensor() if _in_legacy_dygraph():
_C_ops.assign(input, output) output = core.VarBase()
else:
output = core.eager.Tensor()
_C_ops.assign(input, output)
else: else:
check_dtype(input.dtype, 'input', [ check_dtype(input.dtype, 'input', [
'float16', 'uint16', 'float32', 'float64', 'int32', 'int64', 'float16', 'uint16', 'float32', 'float64', 'int32', 'int64',
......
...@@ -27,30 +27,32 @@ from paddle.fluid.backward import append_backward ...@@ -27,30 +27,32 @@ from paddle.fluid.backward import append_backward
class TestAssignOp(op_test.OpTest): class TestAssignOp(op_test.OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.assign
self.op_type = "assign" self.op_type = "assign"
x = np.random.random(size=(100, 10)).astype('float64') x = np.random.random(size=(100, 10)).astype('float64')
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Out': x} self.outputs = {'Out': x}
def test_forward(self): def test_forward(self):
self.check_output() self.check_output(check_eager=True)
def test_backward(self): def test_backward(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestAssignFP16Op(op_test.OpTest): class TestAssignFP16Op(op_test.OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.assign
self.op_type = "assign" self.op_type = "assign"
x = np.random.random(size=(100, 10)).astype('float16') x = np.random.random(size=(100, 10)).astype('float16')
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Out': x} self.outputs = {'Out': x}
def test_forward(self): def test_forward(self):
self.check_output() self.check_output(check_eager=True)
def test_backward(self): def test_backward(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestAssignOpWithLoDTensorArray(unittest.TestCase): class TestAssignOpWithLoDTensorArray(unittest.TestCase):
...@@ -171,6 +173,8 @@ class TestAssignOApi(unittest.TestCase): ...@@ -171,6 +173,8 @@ class TestAssignOApi(unittest.TestCase):
def test_clone(self): def test_clone(self):
paddle.disable_static() paddle.disable_static()
self.python_api = paddle.clone
x = paddle.ones([2]) x = paddle.ones([2])
x.stop_gradient = False x.stop_gradient = False
clone_x = paddle.clone(x) clone_x = paddle.clone(x)
......
...@@ -167,6 +167,16 @@ ...@@ -167,6 +167,16 @@
func : asinh func : asinh
backward : asinh_grad backward : asinh_grad
# assign
- api : assign
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : assign_raw
backward : assign_grad
# atan # atan
- api : atan - api : atan
args : (Tensor x) args : (Tensor x)
......
...@@ -89,6 +89,16 @@ ...@@ -89,6 +89,16 @@
kernel : kernel :
func : asinh_grad func : asinh_grad
- backward_api : assign_grad
forward : assign (Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out_grad]
kernel :
func : assign_raw
- backward_api : atan2_grad - backward_api : atan2_grad
forward : atan2 (Tensor x, Tensor y) -> Tensor(out) forward : atan2 (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册