未验证 提交 88584396 编写于 作者: Y Yulong Ao 提交者: GitHub

[Phi] Add yaml for assign_value (#44596)

* [Phi] Add yaml for assign_value

* [Phi] Fix the bug of the assign api and modify the unittest

* [Phi] Fix the bug when the tensor does not have the backend info

* [Phi] Replace the functional-style cast init by the brace-init

* [Phi] Cast the data explicitly
上级 856f741a
...@@ -56,6 +56,7 @@ yaml_types_mapping = { ...@@ -56,6 +56,7 @@ yaml_types_mapping = {
'Scalar(int64_t)' : 'paddle::experimental::Scalar', 'Scalar(int64_t)' : 'paddle::experimental::Scalar',
'Scalar(float)' : 'paddle::experimental::Scalar', 'Scalar(float)' : 'paddle::experimental::Scalar',
'Scalar(double)' : 'paddle::experimental::Scalar', 'Scalar(double)' : 'paddle::experimental::Scalar',
'Scalar[]' : 'std::vector<phi::Scalar>',
'IntArray' : 'paddle::experimental::IntArray' 'IntArray' : 'paddle::experimental::IntArray'
} }
......
...@@ -45,6 +45,7 @@ atype_to_parsing_function = { ...@@ -45,6 +45,7 @@ atype_to_parsing_function = {
"std::vector<double>": "CastPyArg2Float64s", "std::vector<double>": "CastPyArg2Float64s",
"std::vector<std::string>": "CastPyArg2Strings", "std::vector<std::string>": "CastPyArg2Strings",
"paddle::experimental::Scalar": "CastPyArg2Scalar", "paddle::experimental::Scalar": "CastPyArg2Scalar",
"std::vector<phi::Scalar>": "CastPyArg2ScalarArray",
"paddle::experimental::IntArray": "CastPyArg2IntArray", "paddle::experimental::IntArray": "CastPyArg2IntArray",
"paddle::Place": "CastPyArg2Place", "paddle::Place": "CastPyArg2Place",
"paddle::experimental::DataType": "CastPyArg2DataType", "paddle::experimental::DataType": "CastPyArg2DataType",
...@@ -87,6 +88,7 @@ no_amp_list = [ ...@@ -87,6 +88,7 @@ no_amp_list = [
'rmsprop', 'rmsprop',
'sgd_', 'sgd_',
'sgd', 'sgd',
'assign_value_',
'sparse_momentum_', 'sparse_momentum_',
'sparse_momentum', 'sparse_momentum',
] ]
......
...@@ -1253,6 +1253,54 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, ...@@ -1253,6 +1253,54 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
return paddle::experimental::Scalar(1.0); return paddle::experimental::Scalar(1.0);
} }
std::vector<phi::Scalar> CastPyArg2ScalarArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
if (obj == Py_None) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"a list of int, float, or bool, but got %s",
op_type,
arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name;
if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr;
item = PyList_GetItem(obj, 0);
if (PyObject_CheckFloatOrToFloat(&item)) {
std::vector<phi::Scalar> value;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
value.emplace_back(phi::Scalar{PyFloat_AsDouble(item)});
}
return value;
} else if (PyObject_CheckLongOrToLong(&item)) {
std::vector<phi::Scalar> value;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
value.emplace_back(
phi::Scalar{static_cast<int64_t>(PyLong_AsLong(item))});
}
return value;
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"a list of int, float, or bool, but got %s",
op_type,
arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
// Fake a ScalarArray
return std::vector<phi::Scalar>({phi::Scalar(1.0)});
}
paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj, paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type, const std::string& op_type,
ssize_t arg_pos) { ssize_t arg_pos) {
......
...@@ -191,6 +191,10 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, ...@@ -191,6 +191,10 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj,
const std::string& op_type, const std::string& op_type,
ssize_t arg_pos); ssize_t arg_pos);
std::vector<phi::Scalar> CastPyArg2ScalarArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj, paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type, const std::string& op_type,
ssize_t arg_pos); ssize_t arg_pos);
......
...@@ -53,7 +53,7 @@ bool HasAllocation(const phi::TensorBase& t) { ...@@ -53,7 +53,7 @@ bool HasAllocation(const phi::TensorBase& t) {
} }
BackendSet GetTensorBackendSet(const phi::TensorBase& t) { BackendSet GetTensorBackendSet(const phi::TensorBase& t) {
if (HasAllocation(t)) { if (HasAllocation(t) && t.place().GetType() != AllocationType::UNDEFINED) {
BackendSet backend_set(phi::TransToPhiBackend(t.place())); BackendSet backend_set(phi::TransToPhiBackend(t.place()));
switch (t.layout()) { switch (t.layout()) {
case DataLayout::MKLDNN: case DataLayout::MKLDNN:
......
...@@ -125,6 +125,7 @@ class BaseAPI(object): ...@@ -125,6 +125,7 @@ class BaseAPI(object):
'Scalar(int64_t)': 'const Scalar&', 'Scalar(int64_t)': 'const Scalar&',
'Scalar(float)': 'const Scalar&', 'Scalar(float)': 'const Scalar&',
'Scalar(dobule)': 'const Scalar&', 'Scalar(dobule)': 'const Scalar&',
'Scalar[]': 'const std::vector<phi::Scalar>&',
'int': 'int', 'int': 'int',
'int32_t': 'int32_t', 'int32_t': 'int32_t',
'int64_t': 'int64_t', 'int64_t': 'int64_t',
...@@ -648,6 +649,10 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d ...@@ -648,6 +649,10 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
if 'IntArray' in self.attrs['attr_info'][param][0]: if 'IntArray' in self.attrs['attr_info'][param][0]:
kernel_args_type_list.append('const phi::IntArray&') kernel_args_type_list.append('const phi::IntArray&')
param = 'phi::IntArray(' + param + ')' param = 'phi::IntArray(' + param + ')'
elif 'vector<phi::Scalar>' in self.attrs['attr_info'][param][0]:
kernel_args_type_list.append(
'const std::vector<phi::Scalar>&')
param = param
elif 'Scalar' in self.attrs['attr_info'][param][0]: elif 'Scalar' in self.attrs['attr_info'][param][0]:
kernel_args_type_list.append('const phi::Scalar&') kernel_args_type_list.append('const phi::Scalar&')
param = 'phi::Scalar(' + param + ')' param = 'phi::Scalar(' + param + ')'
......
...@@ -31,6 +31,7 @@ attr_types_map = { ...@@ -31,6 +31,7 @@ attr_types_map = {
'Scalar(int)': 'const Scalar&', 'Scalar(int)': 'const Scalar&',
'Scalar(int64_t)': 'const Scalar&', 'Scalar(int64_t)': 'const Scalar&',
'Scalar(float)': 'const Scalar&', 'Scalar(float)': 'const Scalar&',
'Scalar[]': 'const std::vector<Scalar>&',
'Place': 'Place', 'Place': 'Place',
'DataLayout': 'DataLayout', 'DataLayout': 'DataLayout',
'DataType': 'DataType', 'DataType': 'DataType',
...@@ -58,6 +59,7 @@ opmaker_attr_types_map = { ...@@ -58,6 +59,7 @@ opmaker_attr_types_map = {
'Scalar(int)': 'int', 'Scalar(int)': 'int',
'Scalar(int64_t)': 'int64_t', 'Scalar(int64_t)': 'int64_t',
'Scalar(float)': 'float', 'Scalar(float)': 'float',
'Scalar[]': 'std::vector<Scalar>',
'Place': 'int', 'Place': 'int',
'DataLayout': 'int', 'DataLayout': 'int',
'DataType': 'int', 'DataType': 'int',
...@@ -83,7 +85,8 @@ output_type_map = {'Tensor': 'Tensor', 'Tensor[]': 'std::vector<Tensor>'} ...@@ -83,7 +85,8 @@ output_type_map = {'Tensor': 'Tensor', 'Tensor[]': 'std::vector<Tensor>'}
phi_attr_types_map = attr_types_map.copy() phi_attr_types_map = attr_types_map.copy()
phi_attr_types_map.update({ phi_attr_types_map.update({
'IntArray': 'const phi::IntArray&', 'IntArray': 'const phi::IntArray&',
'Scalar': 'const phi::Scalar&' 'Scalar': 'const phi::Scalar&',
'Scalar[]': 'std::vector<phi::Scalar>&'
}) })
#--------------------------- phi dense tensor --------------------------- #--------------------------- phi dense tensor ---------------------------
......
...@@ -246,6 +246,20 @@ ...@@ -246,6 +246,20 @@
inplace : (output -> out) inplace : (output -> out)
backward : assign_out__grad backward : assign_out__grad
# assgin_value
- api : assign_value_
args : (Tensor output, int[] shape, DataType dtype, Scalar[] values, Place place = {})
output : Tensor(out)
inplace: (output -> out)
infer_meta :
func : AssignValueInferMeta
param : [shape, dtype]
kernel :
func : assign_value
param : [shape, dtype, values]
data_type : dtype
backend : place > output
# atan # atan
- api : atan - api : atan
args : (Tensor x) args : (Tensor x)
......
...@@ -209,6 +209,8 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> { ...@@ -209,6 +209,8 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
std::vector<double>); std::vector<double>);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF( PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(
std::vector<std::string>); std::vector<std::string>);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(
std::vector<Scalar>);
template <typename... Tail> template <typename... Tail>
struct InferMetaFnCallHelper<MetaTensor*, Tail...> { struct InferMetaFnCallHelper<MetaTensor*, Tail...> {
......
...@@ -690,12 +690,20 @@ def assign(input, output=None): ...@@ -690,12 +690,20 @@ def assign(input, output=None):
if input.size > 1024 * 1024: if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider " raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it") "saving it to file and 'load_op' to load it")
if in_dygraph_mode():
if output is None: if output is None:
output = helper.create_variable_for_type_inference(dtype=dtype) output = zeros(list(input.shape), dtype)
if _non_static_mode(): _C_ops.final_state_assign_value_(output, list(input.shape), dtype,
values, _current_expected_place())
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_C_ops.assign_value(output, 'shape', list(input.shape), 'dtype', _C_ops.assign_value(output, 'shape', list(input.shape), 'dtype',
dtype, value_name, values) dtype, value_name, values)
else: else:
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(type='assign_value', helper.append_op(type='assign_value',
outputs={'Out': [output]}, outputs={'Out': [output]},
attrs={ attrs={
......
...@@ -23,6 +23,7 @@ from paddle.fluid.op import Operator ...@@ -23,6 +23,7 @@ from paddle.fluid.op import Operator
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
import paddle.fluid.framework as framework
class TestAssignOp(op_test.OpTest): class TestAssignOp(op_test.OpTest):
...@@ -35,14 +36,20 @@ class TestAssignOp(op_test.OpTest): ...@@ -35,14 +36,20 @@ class TestAssignOp(op_test.OpTest):
self.outputs = {'Out': x} self.outputs = {'Out': x}
def test_forward(self): def test_forward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_output(check_eager=True) self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self): def test_backward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignFP16Op(op_test.OpTest): class TestAssignFP16Op(op_test.OpTest):
...@@ -55,19 +62,26 @@ class TestAssignFP16Op(op_test.OpTest): ...@@ -55,19 +62,26 @@ class TestAssignFP16Op(op_test.OpTest):
self.outputs = {'Out': x} self.outputs = {'Out': x}
def test_forward(self): def test_forward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_output(check_eager=True) self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self): def test_backward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignOpWithLoDTensorArray(unittest.TestCase): class TestAssignOpWithLoDTensorArray(unittest.TestCase):
def test_assign_LoDTensorArray(self): def test_assign_LoDTensorArray(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
...@@ -97,11 +111,13 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase): ...@@ -97,11 +111,13 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase):
fetch_list=[sums.name, x.grad_name]) fetch_list=[sums.name, x.grad_name])
self.assertTrue(np.allclose(res[0], feed_add)) self.assertTrue(np.allclose(res[0], feed_add))
self.assertTrue(np.allclose(res[1], ones / 1000.0)) self.assertTrue(np.allclose(res[1], ones / 1000.0))
paddle.disable_static()
class TestAssignOpError(unittest.TestCase): class TestAssignOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
# The type of input must be Variable or numpy.ndarray. # The type of input must be Variable or numpy.ndarray.
x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
...@@ -110,11 +126,13 @@ class TestAssignOpError(unittest.TestCase): ...@@ -110,11 +126,13 @@ class TestAssignOpError(unittest.TestCase):
# When the type of input is numpy.ndarray, the dtype of input must be float32, int32. # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
x2 = np.array([[2.5, 2.5]], dtype='uint8') x2 = np.array([[2.5, 2.5]], dtype='uint8')
self.assertRaises(TypeError, fluid.layers.assign, x2) self.assertRaises(TypeError, fluid.layers.assign, x2)
paddle.disable_static()
class TestAssignOApi(unittest.TestCase): class TestAssignOApi(unittest.TestCase):
def test_assign_LoDTensorArray(self): def test_assign_LoDTensorArray(self):
paddle.enable_static()
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program): with program_guard(main_program):
...@@ -142,6 +160,7 @@ class TestAssignOApi(unittest.TestCase): ...@@ -142,6 +160,7 @@ class TestAssignOApi(unittest.TestCase):
fetch_list=[sums.name, x.grad_name]) fetch_list=[sums.name, x.grad_name])
self.assertTrue(np.allclose(res[0], feed_add)) self.assertTrue(np.allclose(res[0], feed_add))
self.assertTrue(np.allclose(res[1], ones / 1000.0)) self.assertTrue(np.allclose(res[1], ones / 1000.0))
paddle.disable_static()
def test_assign_NumpyArray(self): def test_assign_NumpyArray(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -172,24 +191,19 @@ class TestAssignOApi(unittest.TestCase): ...@@ -172,24 +191,19 @@ class TestAssignOApi(unittest.TestCase):
self.assertTrue(np.allclose(result1.numpy(), array)) self.assertTrue(np.allclose(result1.numpy(), array))
def test_assign_List(self): def test_assign_List(self):
paddle.disable_static()
l = [1, 2, 3] l = [1, 2, 3]
result = paddle.assign(l) result = paddle.assign(l)
self.assertTrue(np.allclose(result.numpy(), np.array(l))) self.assertTrue(np.allclose(result.numpy(), np.array(l)))
paddle.enable_static()
def test_assign_BasicTypes(self): def test_assign_BasicTypes(self):
paddle.disable_static()
result1 = paddle.assign(2) result1 = paddle.assign(2)
result2 = paddle.assign(3.0) result2 = paddle.assign(3.0)
result3 = paddle.assign(True) result3 = paddle.assign(True)
self.assertTrue(np.allclose(result1.numpy(), np.array([2]))) self.assertTrue(np.allclose(result1.numpy(), np.array([2])))
self.assertTrue(np.allclose(result2.numpy(), np.array([3.0]))) self.assertTrue(np.allclose(result2.numpy(), np.array([3.0])))
self.assertTrue(np.allclose(result3.numpy(), np.array([1]))) self.assertTrue(np.allclose(result3.numpy(), np.array([1])))
paddle.enable_static()
def test_clone(self): def test_clone(self):
paddle.disable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.python_api = paddle.clone self.python_api = paddle.clone
...@@ -216,11 +230,13 @@ class TestAssignOApi(unittest.TestCase): ...@@ -216,11 +230,13 @@ class TestAssignOApi(unittest.TestCase):
fetch_list=[clone_x])[0] fetch_list=[clone_x])[0]
self.assertTrue(np.array_equal(y_np, x_np), True) self.assertTrue(np.array_equal(y_np, x_np), True)
paddle.disable_static()
class TestAssignOpErrorApi(unittest.TestCase): class TestAssignOpErrorApi(unittest.TestCase):
def test_errors(self): def test_errors(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
# The type of input must be Variable or numpy.ndarray. # The type of input must be Variable or numpy.ndarray.
...@@ -231,6 +247,7 @@ class TestAssignOpErrorApi(unittest.TestCase): ...@@ -231,6 +247,7 @@ class TestAssignOpErrorApi(unittest.TestCase):
x2 = np.array([[2.5, 2.5]], dtype='uint8') x2 = np.array([[2.5, 2.5]], dtype='uint8')
self.assertRaises(TypeError, paddle.assign, x2) self.assertRaises(TypeError, paddle.assign, x2)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
def test_type_error(self): def test_type_error(self):
paddle.enable_static() paddle.enable_static()
...@@ -238,8 +255,8 @@ class TestAssignOpErrorApi(unittest.TestCase): ...@@ -238,8 +255,8 @@ class TestAssignOpErrorApi(unittest.TestCase):
x = [paddle.randn([3, 3]), paddle.randn([3, 3])] x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
# not support to assign list(var) # not support to assign list(var)
self.assertRaises(TypeError, paddle.assign, x) self.assertRaises(TypeError, paddle.assign, x)
paddle.disable_static()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -18,10 +18,13 @@ import unittest ...@@ -18,10 +18,13 @@ import unittest
import numpy import numpy
import op_test import op_test
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
paddle.enable_static()
class TestAssignValueOp(op_test.OpTest): class TestAssignValueOp(op_test.OpTest):
......
...@@ -1569,13 +1569,20 @@ def assign(x, output=None): ...@@ -1569,13 +1569,20 @@ def assign(x, output=None):
if input.size > 1024 * 1024: if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider " raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it") "saving it to file and 'load_op' to load it")
if in_dygraph_mode():
if output is None: if output is None:
output = helper.create_variable_for_type_inference( output = zeros(list(input.shape), dtype)
dtype=input.dtype) _C_ops.final_state_assign_value_(output, list(input.shape), dtype,
if _non_static_mode(): values, _current_expected_place())
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_C_ops.assign_value(output, 'shape', list(input.shape), 'dtype', _C_ops.assign_value(output, 'shape', list(input.shape), 'dtype',
dtype, value_name, values) dtype, value_name, values)
else: else:
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(type='assign_value', helper.append_op(type='assign_value',
outputs={'Out': [output]}, outputs={'Out': [output]},
attrs={ attrs={
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册