未验证 提交 437bebda 编写于 作者: C chentianyu03 提交者: GitHub

[Yaml] Add assign yaml (#41428)

* add assign yaml

* add assign api

* add assign backward api

* add assign

* add assign yaml

* add assign

* assign yaml

* add assign raw kernel and use assign_raw in yaml

* merge develop branch

* add missing python_api
上级 cd2a4cdf
......@@ -22,16 +22,23 @@
namespace phi {
template <typename Context>
void AssignRawKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out) {
Copy<Context>(dev_ctx, x, x.place(), false, out);
}
template <typename Context>
void AssignKernel(const Context& dev_ctx,
paddle::optional<const DenseTensor&> x,
DenseTensor* out) {
if (x.get_ptr()) {
if (!x.is_initialized()) {
if (x) {
if (!x->IsInitialized()) {
return;
}
auto& x_tensor = *x.get_ptr();
Copy<Context>(dev_ctx, x_tensor, x_tensor.place(), false, out);
AssignRawKernel<Context>(dev_ctx, x_tensor, out);
}
}
......@@ -104,6 +111,12 @@ void AssignValueKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_GENERAL_KERNEL(assign_raw,
CPU,
ALL_LAYOUT,
phi::AssignRawKernel<phi::CPUContext>,
ALL_DTYPE) {}
PD_REGISTER_GENERAL_KERNEL(
assign, CPU, ALL_LAYOUT, phi::AssignKernel<phi::CPUContext>, ALL_DTYPE) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
......@@ -123,6 +136,11 @@ PD_REGISTER_KERNEL(assign_value,
int64_t) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_GENERAL_KERNEL(assign_raw,
GPU,
ALL_LAYOUT,
phi::AssignRawKernel<phi::GPUContext>,
ALL_DTYPE) {}
PD_REGISTER_GENERAL_KERNEL(
assign, GPU, ALL_LAYOUT, phi::AssignKernel<phi::GPUContext>, ALL_DTYPE) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
......
......@@ -21,6 +21,11 @@
namespace phi {
template <typename Context>
void AssignRawKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out);
// In order to be compatible with the `AsDispensable` input in the original
// assign op maker, the input parameter here needs to be dispensable, but
// this looks weird
......
......@@ -23,7 +23,7 @@ from .. import framework
from ..framework import convert_np_dtype_to_dtype_, _in_legacy_dygraph
from .. import core
from .. import unique_name
from ..framework import Variable, Parameter, ParamBase, _getitem_impl_, _setitem_impl_, EagerParamBase
from ..framework import Variable, Parameter, ParamBase, _getitem_impl_, _setitem_impl_, EagerParamBase, in_dygraph_mode
from .base import switch_to_static_graph
from .math_op_patch import monkey_patch_math_varbase
from .parallel import scale_loss
......@@ -798,6 +798,9 @@ def monkey_patch_varbase():
@framework.dygraph_only
def clone(self):
if in_dygraph_mode():
return _C_ops.final_state_assign(self)
if _in_legacy_dygraph():
output = core.VarBase()
else:
......
......@@ -622,6 +622,9 @@ def assign(input, output=None):
# after this api.
if isinstance(input, (Variable, core.VarBase)):
if _non_static_mode():
if in_dygraph_mode() and output is None:
output = _C_ops.final_state_assign(input)
else:
if output is None:
if _in_legacy_dygraph():
output = core.VarBase()
......
......@@ -27,30 +27,32 @@ from paddle.fluid.backward import append_backward
class TestAssignOp(op_test.OpTest):
def setUp(self):
self.python_api = paddle.assign
self.op_type = "assign"
x = np.random.random(size=(100, 10)).astype('float64')
self.inputs = {'X': x}
self.outputs = {'Out': x}
def test_forward(self):
self.check_output()
self.check_output(check_eager=True)
def test_backward(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestAssignFP16Op(op_test.OpTest):
def setUp(self):
self.python_api = paddle.assign
self.op_type = "assign"
x = np.random.random(size=(100, 10)).astype('float16')
self.inputs = {'X': x}
self.outputs = {'Out': x}
def test_forward(self):
self.check_output()
self.check_output(check_eager=True)
def test_backward(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
......@@ -171,6 +173,8 @@ class TestAssignOApi(unittest.TestCase):
def test_clone(self):
paddle.disable_static()
self.python_api = paddle.clone
x = paddle.ones([2])
x.stop_gradient = False
clone_x = paddle.clone(x)
......
......@@ -167,6 +167,16 @@
func : asinh
backward : asinh_grad
# assign
- api : assign
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : assign_raw
backward : assign_grad
# atan
- api : atan
args : (Tensor x)
......
......@@ -89,6 +89,16 @@
kernel :
func : asinh_grad
- backward_api : assign_grad
forward : assign (Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out_grad]
kernel :
func : assign_raw
- backward_api : atan2_grad
forward : atan2 (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册