diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 0bf5822425c3ad452fd1a6178c2ddeeff159a0fd..3c57efc50b06fdee0e597ec3aafc71c1ba763a99 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -184,7 +184,7 @@ def gelu(x, approximate=False, name=None): return _C_ops.gelu(x, approximate) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'gelu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'gelu' ) helper = LayerHelper("gelu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index fef9b399db2ecacf8d6ee1b66656b9713be2b095..01c323d6596fad4bfb6e6ef5ccc0c348835865f3 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -1150,7 +1150,7 @@ def dropout( else: helper = LayerHelper('dropout', **locals()) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'dropout' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'dropout' ) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -1191,7 +1191,7 @@ def dropout( else: # sometimes called dropout_nd #TODO: optimize with c++ if not in_dynamic_mode(): check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'dropout' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'dropout' ) dtype = x.dtype keep_prob = 1 - p @@ -1407,7 +1407,7 @@ def alpha_dropout(x, p=0.5, training=True, name=None): if not in_dynamic_mode(): check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'alpha_dropout' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'alpha_dropout' ) if training: diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index c0e86267f34846ef972d7a0eab6d0844d698116f..b9d226374ae541a024434eff2ece69a8d8a8ff93 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -409,7 +409,10 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): else: helper = LayerHelper("temporal_shift", **locals()) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'temporal_shift' + x, + 'x', + ['float16', 'uint16', 'float32', 'float64'], + 'temporal_shift', ) check_type(seg_num, 'seg_num', int, 'temporal_shift') check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift') diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 133bdd87530b30aa54f07ce7a84e675ee528f235..aff98a97b11f0ba3e44b51ec69f7be5b03d4a235 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -397,7 +397,9 @@ def avg_pool2d( else: op_type = 'pool2d' helper = LayerHelper(op_type, **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d') + check_variable_and_dtype( + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'avg_pool2d' + ) dtype = helper.input_dtype(input_param_name='x') pool_out = helper.create_variable_for_type_inference(dtype) @@ -1259,7 +1261,7 @@ def max_pool2d( op_type = 'max_pool2d_with_index' if return_mask else "pool2d" helper = LayerHelper(op_type, **locals()) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'max_pool2d' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'max_pool2d' ) dtype = helper.input_dtype(input_param_name='x') pool_out = helper.create_variable_for_type_inference(dtype) @@ -1419,7 +1421,9 @@ def max_pool3d( else: op_type = "max_pool3d_with_index" if return_mask else "pool3d" helper = LayerHelper(op_type, **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d') + check_variable_and_dtype( + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'max_pool3d' + ) dtype = helper.input_dtype(input_param_name='x') pool_out = helper.create_variable_for_type_inference(dtype) mask = helper.create_variable_for_type_inference('int32') diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 43e6b9c6d860638553d20c1eb08489ff61af6049..5c722dc3507d001ee0c8400711c1c6a45f0934e1 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -333,7 +333,7 @@ def linspace(start, stop, num, dtype=None, name=None): check_dtype( start.dtype, 'start', - ['float32', 'float64', 'int32', 'int64', 'float16', 'bfloat16'], + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], 'linspace', ) else: @@ -343,7 +343,7 @@ def linspace(start, stop, num, dtype=None, name=None): check_dtype( stop.dtype, 'stop', - ['float32', 'float64', 'int32', 'int64', 'float16', 'bfloat16'], + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], 'linspace', ) else: @@ -353,7 +353,7 @@ def linspace(start, stop, num, dtype=None, name=None): check_dtype( dtype, 'dtype', - ['int32', 'int64', 'float32', 'float64', 'float16', 'bfloat16'], + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], 'linspace', ) if ( @@ -1787,7 +1787,7 @@ def diag(x, offset=0, padding_value=0, name=None): check_dtype( x.dtype, 'x', - ['float16', 'float32', 'float64', 'int32', 'int64'], + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], 'diag_v2', ) check_type(offset, 'offset', (int), 'diag_v2') diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index df80c61e3abe4ad7479d1401be97a835b5067adb..9d29517730de470ad89600f88d137c3bdf952ee1 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -1112,10 +1112,16 @@ def dot(x, y, name=None): assert y is not None, f'y cannot be None in {op_type}' check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type + x, + 'x', + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], + op_type, ) check_variable_and_dtype( - y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type + y, + 'y', + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], + op_type, ) helper = LayerHelper(op_type, **locals()) @@ -1374,13 +1380,13 @@ def cross(x, y, axis=9, name=None): check_variable_and_dtype( x, 'x', - ['float16', 'float32', 'float64', "int32", "int64"], + ['float16', 'uint16', 'float32', 'float64', "int32", "int64"], 'cross', ) check_variable_and_dtype( y, 'y', - ['float16', 'float32', 'float64', "int32", "int64"], + ['float16', 'uint16', 'float32', 'float64', "int32", "int64"], 'cross', ) helper = LayerHelper("cross", **locals()) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 6ce6587d44e777647a3b03db6850f08e24142c49..e2b91197f2d09346814cd8fb35c455b5d72da74c 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -1971,12 +1971,12 @@ def split(x, num_or_sections, axis=0, name=None): [ 'bool', 'float16', + 'uint16', 'float32', 'float64', 'int32', 'int64', 'uint8', - 'uint16', 'int8', ], 'split', @@ -2804,7 +2804,15 @@ def unbind(input, axis=0): check_dtype( dtype, 'unbind', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + [ + 'bool', + 'float16', + 'uint16', + 'float32', + 'float64', + 'int32', + 'int64', + ], 'unbind', ) outs = [ diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 65d94c11385626f74825fcb1b20d1afcfcfe78c8..c88cf84ec5086484ba96ee2da6a44785bcb5bde0 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -513,7 +513,15 @@ def _elementwise_op(helper): 'complex128', ] else: - data_type = ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'] + data_type = [ + 'float16', + 'uint16', + 'float32', + 'float64', + 'int32', + 'int64', + 'bool', + ] check_variable_and_dtype( x, 'x', @@ -3118,7 +3126,15 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None): check_dtype( x.dtype, 'Input', - ['bool', 'int32', 'int64', 'float16', 'float32', 'float64'], + [ + 'bool', + 'int32', + 'int64', + 'float16', + 'uint16', + 'float32', + 'float64', + ], 'diagonal', ) @@ -3277,7 +3293,7 @@ def cumsum(x, axis=None, dtype=None, name=None): check_variable_and_dtype( x, 'x', - ['float16', 'float32', 'float64', 'int32', 'int64'], + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], 'cumsum', ) check_type(x, 'x', (Variable), 'cumsum') @@ -3425,7 +3441,16 @@ def cumprod(x, dim=None, dtype=None, name=None): check_variable_and_dtype( x, "x", - ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], + [ + 'complex64', + 'complex128', + 'float16', + 'uint16', + 'float32', + 'float64', + 'int32', + 'int64', + ], 'cumprod', ) check_type(dim, 'dim', int, 'cumprod') @@ -3995,6 +4020,7 @@ def conj(x, name=None): 'complex64', 'complex128', 'float16', + 'uint16', 'float32', 'float64', 'int32',