未验证 提交 88584396 编写于 作者: Y Yulong Ao 提交者: GitHub

[Phi] Add yaml for assign_value (#44596)

* [Phi] Add yaml for assign_value

* [Phi] Fix the bug of the assign api and modify the unittest

* [Phi] Fix the bug when the tensor does not have the backend info

* [Phi] Replace the functional-style cast init by the brace-init

* [Phi] Cast the data explicitly
上级 856f741a
......@@ -56,6 +56,7 @@ yaml_types_mapping = {
'Scalar(int64_t)' : 'paddle::experimental::Scalar',
'Scalar(float)' : 'paddle::experimental::Scalar',
'Scalar(double)' : 'paddle::experimental::Scalar',
'Scalar[]' : 'std::vector<phi::Scalar>',
'IntArray' : 'paddle::experimental::IntArray'
}
......
......@@ -45,6 +45,7 @@ atype_to_parsing_function = {
"std::vector<double>": "CastPyArg2Float64s",
"std::vector<std::string>": "CastPyArg2Strings",
"paddle::experimental::Scalar": "CastPyArg2Scalar",
"std::vector<phi::Scalar>": "CastPyArg2ScalarArray",
"paddle::experimental::IntArray": "CastPyArg2IntArray",
"paddle::Place": "CastPyArg2Place",
"paddle::experimental::DataType": "CastPyArg2DataType",
......@@ -87,6 +88,7 @@ no_amp_list = [
'rmsprop',
'sgd_',
'sgd',
'assign_value_',
'sparse_momentum_',
'sparse_momentum',
]
......
......@@ -1253,6 +1253,54 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
return paddle::experimental::Scalar(1.0);
}
std::vector<phi::Scalar> CastPyArg2ScalarArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
if (obj == Py_None) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"a list of int, float, or bool, but got %s",
op_type,
arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name;
if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr;
item = PyList_GetItem(obj, 0);
if (PyObject_CheckFloatOrToFloat(&item)) {
std::vector<phi::Scalar> value;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
value.emplace_back(phi::Scalar{PyFloat_AsDouble(item)});
}
return value;
} else if (PyObject_CheckLongOrToLong(&item)) {
std::vector<phi::Scalar> value;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
value.emplace_back(
phi::Scalar{static_cast<int64_t>(PyLong_AsLong(item))});
}
return value;
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"a list of int, float, or bool, but got %s",
op_type,
arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
// Fake a ScalarArray
return std::vector<phi::Scalar>({phi::Scalar(1.0)});
}
paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
......
......@@ -191,6 +191,10 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
std::vector<phi::Scalar> CastPyArg2ScalarArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
......
......@@ -53,7 +53,7 @@ bool HasAllocation(const phi::TensorBase& t) {
}
BackendSet GetTensorBackendSet(const phi::TensorBase& t) {
if (HasAllocation(t)) {
if (HasAllocation(t) && t.place().GetType() != AllocationType::UNDEFINED) {
BackendSet backend_set(phi::TransToPhiBackend(t.place()));
switch (t.layout()) {
case DataLayout::MKLDNN:
......
......@@ -125,6 +125,7 @@ class BaseAPI(object):
'Scalar(int64_t)': 'const Scalar&',
'Scalar(float)': 'const Scalar&',
'Scalar(dobule)': 'const Scalar&',
'Scalar[]': 'const std::vector<phi::Scalar>&',
'int': 'int',
'int32_t': 'int32_t',
'int64_t': 'int64_t',
......@@ -648,6 +649,10 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
if 'IntArray' in self.attrs['attr_info'][param][0]:
kernel_args_type_list.append('const phi::IntArray&')
param = 'phi::IntArray(' + param + ')'
elif 'vector<phi::Scalar>' in self.attrs['attr_info'][param][0]:
kernel_args_type_list.append(
'const std::vector<phi::Scalar>&')
param = param
elif 'Scalar' in self.attrs['attr_info'][param][0]:
kernel_args_type_list.append('const phi::Scalar&')
param = 'phi::Scalar(' + param + ')'
......
......@@ -31,6 +31,7 @@ attr_types_map = {
'Scalar(int)': 'const Scalar&',
'Scalar(int64_t)': 'const Scalar&',
'Scalar(float)': 'const Scalar&',
'Scalar[]': 'const std::vector<Scalar>&',
'Place': 'Place',
'DataLayout': 'DataLayout',
'DataType': 'DataType',
......@@ -58,6 +59,7 @@ opmaker_attr_types_map = {
'Scalar(int)': 'int',
'Scalar(int64_t)': 'int64_t',
'Scalar(float)': 'float',
'Scalar[]': 'std::vector<Scalar>',
'Place': 'int',
'DataLayout': 'int',
'DataType': 'int',
......@@ -83,7 +85,8 @@ output_type_map = {'Tensor': 'Tensor', 'Tensor[]': 'std::vector<Tensor>'}
phi_attr_types_map = attr_types_map.copy()
phi_attr_types_map.update({
'IntArray': 'const phi::IntArray&',
'Scalar': 'const phi::Scalar&'
'Scalar': 'const phi::Scalar&',
'Scalar[]': 'std::vector<phi::Scalar>&'
})
#--------------------------- phi dense tensor ---------------------------
......
......@@ -246,6 +246,20 @@
inplace : (output -> out)
backward : assign_out__grad
# assgin_value
- api : assign_value_
args : (Tensor output, int[] shape, DataType dtype, Scalar[] values, Place place = {})
output : Tensor(out)
inplace: (output -> out)
infer_meta :
func : AssignValueInferMeta
param : [shape, dtype]
kernel :
func : assign_value
param : [shape, dtype, values]
data_type : dtype
backend : place > output
# atan
- api : atan
args : (Tensor x)
......
......@@ -209,6 +209,8 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
std::vector<double>);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(
std::vector<std::string>);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(
std::vector<Scalar>);
template <typename... Tail>
struct InferMetaFnCallHelper<MetaTensor*, Tail...> {
......
......@@ -690,12 +690,20 @@ def assign(input, output=None):
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
if output is None:
output = helper.create_variable_for_type_inference(dtype=dtype)
if _non_static_mode():
if in_dygraph_mode():
if output is None:
output = zeros(list(input.shape), dtype)
_C_ops.final_state_assign_value_(output, list(input.shape), dtype,
values, _current_expected_place())
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_C_ops.assign_value(output, 'shape', list(input.shape), 'dtype',
dtype, value_name, values)
else:
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(type='assign_value',
outputs={'Out': [output]},
attrs={
......
......@@ -23,6 +23,7 @@ from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.backward import append_backward
import paddle.fluid.framework as framework
class TestAssignOp(op_test.OpTest):
......@@ -35,14 +36,20 @@ class TestAssignOp(op_test.OpTest):
self.outputs = {'Out': x}
def test_forward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignFP16Op(op_test.OpTest):
......@@ -55,19 +62,26 @@ class TestAssignFP16Op(op_test.OpTest):
self.outputs = {'Out': x}
def test_forward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
def test_assign_LoDTensorArray(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
main_program = Program()
startup_program = Program()
......@@ -97,11 +111,13 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase):
fetch_list=[sums.name, x.grad_name])
self.assertTrue(np.allclose(res[0], feed_add))
self.assertTrue(np.allclose(res[1], ones / 1000.0))
paddle.disable_static()
class TestAssignOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
# The type of input must be Variable or numpy.ndarray.
x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
......@@ -110,11 +126,13 @@ class TestAssignOpError(unittest.TestCase):
# When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
x2 = np.array([[2.5, 2.5]], dtype='uint8')
self.assertRaises(TypeError, fluid.layers.assign, x2)
paddle.disable_static()
class TestAssignOApi(unittest.TestCase):
def test_assign_LoDTensorArray(self):
paddle.enable_static()
main_program = Program()
startup_program = Program()
with program_guard(main_program):
......@@ -142,6 +160,7 @@ class TestAssignOApi(unittest.TestCase):
fetch_list=[sums.name, x.grad_name])
self.assertTrue(np.allclose(res[0], feed_add))
self.assertTrue(np.allclose(res[1], ones / 1000.0))
paddle.disable_static()
def test_assign_NumpyArray(self):
with fluid.dygraph.guard():
......@@ -172,24 +191,19 @@ class TestAssignOApi(unittest.TestCase):
self.assertTrue(np.allclose(result1.numpy(), array))
def test_assign_List(self):
paddle.disable_static()
l = [1, 2, 3]
result = paddle.assign(l)
self.assertTrue(np.allclose(result.numpy(), np.array(l)))
paddle.enable_static()
def test_assign_BasicTypes(self):
paddle.disable_static()
result1 = paddle.assign(2)
result2 = paddle.assign(3.0)
result3 = paddle.assign(True)
self.assertTrue(np.allclose(result1.numpy(), np.array([2])))
self.assertTrue(np.allclose(result2.numpy(), np.array([3.0])))
self.assertTrue(np.allclose(result3.numpy(), np.array([1])))
paddle.enable_static()
def test_clone(self):
paddle.disable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.python_api = paddle.clone
......@@ -216,11 +230,13 @@ class TestAssignOApi(unittest.TestCase):
fetch_list=[clone_x])[0]
self.assertTrue(np.array_equal(y_np, x_np), True)
paddle.disable_static()
class TestAssignOpErrorApi(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with program_guard(Program(), Program()):
# The type of input must be Variable or numpy.ndarray.
......@@ -231,6 +247,7 @@ class TestAssignOpErrorApi(unittest.TestCase):
x2 = np.array([[2.5, 2.5]], dtype='uint8')
self.assertRaises(TypeError, paddle.assign, x2)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
def test_type_error(self):
paddle.enable_static()
......@@ -238,8 +255,8 @@ class TestAssignOpErrorApi(unittest.TestCase):
x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
# not support to assign list(var)
self.assertRaises(TypeError, paddle.assign, x)
paddle.disable_static()
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -18,10 +18,13 @@ import unittest
import numpy
import op_test
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
paddle.enable_static()
class TestAssignValueOp(op_test.OpTest):
......
......@@ -1569,13 +1569,20 @@ def assign(x, output=None):
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
if _non_static_mode():
if in_dygraph_mode():
if output is None:
output = zeros(list(input.shape), dtype)
_C_ops.final_state_assign_value_(output, list(input.shape), dtype,
values, _current_expected_place())
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_C_ops.assign_value(output, 'shape', list(input.shape), 'dtype',
dtype, value_name, values)
else:
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(type='assign_value',
outputs={'Out': [output]},
attrs={
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册