diff --git a/python/paddle/fluid/tests/unittests/test_unique.py b/python/paddle/fluid/tests/unittests/test_unique.py index 8e31f377bba5e7142d7ed2c5faed5d44c637f724..97566042720f7d50f8447b0ccf5b683aec592daa 100644 --- a/python/paddle/fluid/tests/unittests/test_unique.py +++ b/python/paddle/fluid/tests/unittests/test_unique.py @@ -86,7 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase): def test_dtype(): data = paddle.static.data( - shape=[10], dtype="float16", name="input" + shape=[10], dtype="int16", name="input" ) paddle.unique(data) @@ -424,7 +424,7 @@ class TestUniqueError(unittest.TestCase): paddle.static.Program(), paddle.static.Program() ): x = paddle.static.data( - name='x', shape=[10, 10], dtype='float16' + name='x', shape=[10, 10], dtype='int16' ) result = paddle.unique(x) diff --git a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py index fe56cb09297c142c9618485f93a012e351a84be7..2690d6180247682b4abf24db58f9d20a3c60c745 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py +++ b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py @@ -85,7 +85,7 @@ class TestUniqueWithCountsRaiseError(unittest.TestCase): def test_dtype(): data = paddle.static.data( - shape=[10], dtype="float16", name="input" + shape=[10], dtype="int16", name="input" ) paddle.unique(data) diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 9372d7e0f63f10d41ac6a42aec0d9b1c2201fff7..430b82c07ae0cf8c25989a181b378cac3050a62f 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -817,7 +817,7 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None): helper = LayerHelper("accuracy", **locals()) check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64'], 'accuracy' + input, 'input', ['float16', 'uint16', 'float32', 'float64'], 'accuracy' ) topk_out, topk_indices = paddle.topk(input, k=k) acc_out = helper.create_variable_for_type_inference(dtype="float32") diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 3c57efc50b06fdee0e597ec3aafc71c1ba763a99..d89ce9cb4537a07763254b012327bb6629b0bb81 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -62,7 +62,7 @@ def celu(x, alpha=1.0, name=None): return _C_ops.celu(x, alpha) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'celu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'celu' ) helper = LayerHelper("celu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -114,7 +114,7 @@ def elu(x, alpha=1.0, name=None): else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'elu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'elu' ) helper = LayerHelper("elu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -234,7 +234,7 @@ def hardshrink(x, threshold=0.5, name=None): return _C_ops.hardshrink(x, threshold) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'hardshrink' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'hardshrink' ) helper = LayerHelper('hardshrink', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -339,7 +339,7 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None): return _C_ops.hardsigmoid(x, slope, offset) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'hardsigmoid' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'hardsigmoid' ) helper = LayerHelper('hardsigmoid', **locals()) @@ -390,7 +390,7 @@ def hardswish(x, name=None): return _C_ops.hardswish(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'hardswish' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'hardswish' ) helper = LayerHelper('hardswish', **locals()) @@ -439,7 +439,7 @@ def leaky_relu(x, negative_slope=0.01, name=None): return _C_ops.leaky_relu(x, negative_slope) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'leaky_relu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'leaky_relu' ) helper = LayerHelper('leaky_relu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -661,7 +661,7 @@ def rrelu(x, lower=1.0 / 8.0, upper=1.0 / 3.0, training=True, name=None): return _C_ops.rrelu(x, lower, upper, is_test) else: check_variable_and_dtype( - x, 'X', ['float16', 'float32', 'float64'], 'rrelu' + x, 'X', ['float16', 'uint16', 'float32', 'float64'], 'rrelu' ) helper = LayerHelper('rrelu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -709,7 +709,7 @@ def relu(x, name=None): return _C_ops.relu(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'relu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'relu' ) helper = LayerHelper('relu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -874,7 +874,9 @@ def relu6(x, name=None): if in_dynamic_mode(): return _legacy_C_ops.relu6(x, 'threshold', threshold) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6') + check_variable_and_dtype( + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'relu6' + ) helper = LayerHelper('relu6', **locals()) out = helper.create_variable_for_type_inference(x.dtype) helper.append_op( @@ -983,7 +985,7 @@ def silu(x, name=None): return _C_ops.silu(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'silu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'silu' ) helper = LayerHelper("silu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1197,7 +1199,7 @@ def softplus(x, beta=1, threshold=20, name=None): return _C_ops.softplus(x, beta, threshold) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'softplus' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softplus' ) helper = LayerHelper('softplus', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1256,7 +1258,7 @@ def softshrink(x, threshold=0.5, name=None): return _C_ops.softshrink(x, threshold) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'softshrink' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softshrink' ) helper = LayerHelper('softshrink', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1302,7 +1304,7 @@ def softsign(x, name=None): return _legacy_C_ops.softsign(x) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'softsign' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softsign' ) helper = LayerHelper('softsign', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1341,7 +1343,7 @@ def swish(x, name=None): return _C_ops.swish(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'swish' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'swish' ) helper = LayerHelper('swish', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1387,7 +1389,7 @@ def mish(x, name=None): return _C_ops.mish(x, 20) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'mish' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'mish' ) helper = LayerHelper('mish', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1426,7 +1428,7 @@ def tanhshrink(x, name=None): return _C_ops.tanh_shrink(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'tanhshrink' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'tanhshrink' ) helper = LayerHelper('tanh_shrink', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1476,7 +1478,10 @@ def thresholded_relu(x, threshold=1.0, name=None): return _C_ops.thresholded_relu(x, threshold) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'thresholded_relu' + x, + 'x', + ['float16', 'uint16', 'float32', 'float64'], + 'thresholded_relu', ) helper = LayerHelper('thresholded_relu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1554,7 +1559,10 @@ def log_softmax(x, axis=-1, dtype=None, name=None): else: if dtype is None: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'log_softmax' + x, + 'x', + ['float16', 'uint16', 'float32', 'float64'], + 'log_softmax', ) else: check_dtype( diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 1bab6176a1dbe5bd2233ae0773feb2c0180cb6aa..bd3100a644c0d09dd378158b9179cd9ebdb957dc 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -237,7 +237,7 @@ def _conv_nd( "data_format": data_format, } check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], op_type + x, 'x', ['float16', 'uint16', 'float32', 'float64'], op_type ) helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') @@ -1344,7 +1344,10 @@ def conv2d_transpose( 'data_format': data_format, } check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'conv2d_transpose' + x, + 'x', + ['float16', 'uint16', 'float32', 'float64'], + 'conv2d_transpose', ) helper = LayerHelper(op_type, **locals()) pre_bias = helper.create_variable_for_type_inference(x.dtype) diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 3c81335c44433d59a01ed1695e1eb5390cc4f45b..9567b8244acf028b14604f0b5ca12f4c4f5630a2 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -213,7 +213,7 @@ def batch_norm( else: check_variable_and_dtype( - x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm' + x, 'input', ['float16', 'uint16', 'float32', 'float64'], 'BatchNorm' ) # for static need dict diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 3421fe4f9b00c9c84f967b7acc6ff37fd3c4701c..9facd8e917273b3f420404a5687b9714e3319408 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -1561,7 +1561,10 @@ class SyncBatchNorm(_BatchNormBase): return sync_batch_norm_out check_variable_and_dtype( - x, 'input', ['float16', 'float32', 'float64'], 'SyncBatchNorm' + x, + 'input', + ['float16', 'uint16', 'float32', 'float64'], + 'SyncBatchNorm', ) attrs = { diff --git a/python/paddle/static/nn/metric.py b/python/paddle/static/nn/metric.py index 54377d824dae6cdcff1b71a612bb5a1d72dd62be..7b86afb5c1608d8dfd231d125ebe8f8c2fcc3dbf 100644 --- a/python/paddle/static/nn/metric.py +++ b/python/paddle/static/nn/metric.py @@ -89,7 +89,7 @@ def accuracy(input, label, k=1, correct=None, total=None): helper = LayerHelper("accuracy", **locals()) check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64'], 'accuracy' + input, 'input', ['float16', 'uint16', 'float32', 'float64'], 'accuracy' ) topk_out = helper.create_variable_for_type_inference(dtype=input.dtype) topk_indices = helper.create_variable_for_type_inference(dtype="int64") diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 5c722dc3507d001ee0c8400711c1c6a45f0934e1..3248aa30103f7d1ffae9eb586b1e825f91280c0e 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -1354,7 +1354,7 @@ def _tril_triu_op(helper): check_variable_and_dtype( x, 'x', - ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64', 'bool'], op_type, ) if len(x.shape) < 2: diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 9d29517730de470ad89600f88d137c3bdf952ee1..3dcbc7c6ac63b26d6d63f09e13a2dff24f29bfe5 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -415,7 +415,10 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype( - input, 'input', ['float32', 'float64'], 'p_norm' + input, + 'input', + ['float16', 'uint16', 'float32', 'float64'], + 'p_norm', ) attrs = { diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index e2b91197f2d09346814cd8fb35c455b5d72da74c..7d2726e1f401e5160f154cb8505cf108ef6e5efa 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -2201,6 +2201,7 @@ def squeeze(x, axis=None, name=None): 'input', [ 'float16', + 'uint16', 'float32', 'float64', 'bool', @@ -2477,7 +2478,10 @@ def unique( return tuple(outs) else: check_variable_and_dtype( - x, "input", ['float32', 'float64', 'int32', 'int64'], 'unique' + x, + "input", + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], + 'unique', ) check_type(return_index, 'return_index', bool, 'unique') check_type(return_inverse, 'return_inverse', bool, 'unique') @@ -2597,6 +2601,7 @@ def unsqueeze(x, axis, name=None): 'input', [ 'float16', + 'uint16', 'float32', 'float64', 'bool', @@ -3867,7 +3872,15 @@ def strided_slice(x, axes, starts, ends, strides, name=None): check_variable_and_dtype( x, 'x', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + [ + 'bool', + 'float16', + 'uint16', + 'float32', + 'float64', + 'int32', + 'int64', + ], 'strided_slice', ) check_type(axes, 'axes', (list, tuple), 'strided_slice') diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index c88cf84ec5086484ba96ee2da6a44785bcb5bde0..ebbcbad581afef04c9445876dc828ba32127b926 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -288,7 +288,7 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): return _C_ops.stanh(x, scale_a, scale_b) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'stanh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'stanh' ) helper = LayerHelper('stanh', **locals()) @@ -2717,7 +2717,7 @@ def log1p(x, name=None): return _C_ops.log1p(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], "log1p" + x, 'x', ['float16', 'uint16', 'float32', 'float64'], "log1p" ) inputs = {'X': [x]} helper = LayerHelper('log1p', **locals()) @@ -2769,7 +2769,7 @@ def log2(x, name=None): return _C_ops.log2(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], "log2" + x, 'x', ['float16', 'uint16', 'float32', 'float64'], "log2" ) inputs = {'X': [x]} helper = LayerHelper('log2', **locals()) @@ -2821,7 +2821,7 @@ def log10(x, name=None): return _C_ops.log10(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], "log10" + x, 'x', ['float16', 'uint16', 'float32', 'float64'], "log10" ) inputs = {'X': [x]} helper = LayerHelper('log10', **locals()) @@ -4252,7 +4252,7 @@ def logit(x, eps=None, name=None): return _C_ops.logit(x, eps) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'logit' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'logit' ) helper = LayerHelper("logit", **locals()) out = helper.create_variable_for_type_inference(x.dtype) diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index c8cdb4e4b4e305847574aef6a1b2c8209d20f497..5fd9372c9cd9ca93881e4eef473b0196acacddcb 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -221,7 +221,7 @@ def acos(x, name=None): return _C_ops.acos(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'acos' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'acos' ) helper = LayerHelper('acos', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -258,7 +258,7 @@ def acosh(x, name=None): return _C_ops.acosh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'acosh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'acosh' ) helper = LayerHelper('acosh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -295,7 +295,7 @@ def asin(x, name=None): return _C_ops.asin(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'asin' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'asin' ) helper = LayerHelper('asin', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -332,7 +332,7 @@ def asinh(x, name=None): return _C_ops.asinh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'asinh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'asinh' ) helper = LayerHelper('asinh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -369,7 +369,7 @@ def atan(x, name=None): return _C_ops.atan(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'atan' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'atan' ) helper = LayerHelper('atan', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -406,7 +406,7 @@ def atanh(x, name=None): return _C_ops.atanh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'atanh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'atanh' ) helper = LayerHelper('atanh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -444,7 +444,7 @@ def ceil(x, name=None): return _C_ops.ceil(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'ceil' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'ceil' ) helper = LayerHelper('ceil', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -522,7 +522,7 @@ def cosh(x, name=None): return _C_ops.cosh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'cosh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'cosh' ) helper = LayerHelper('cosh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -611,7 +611,7 @@ def expm1(x, name=None): return _C_ops.expm1(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'expm1' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'expm1' ) helper = LayerHelper('expm1', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -649,7 +649,7 @@ def floor(x, name=None): return _C_ops.floor(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'floor' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'floor' ) helper = LayerHelper('floor', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -687,7 +687,7 @@ def reciprocal(x, name=None): return _C_ops.reciprocal(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'reciprocal' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'reciprocal' ) helper = LayerHelper('reciprocal', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -734,7 +734,7 @@ def round(x, name=None): return _C_ops.round(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'round' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'round' ) helper = LayerHelper('round', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -773,7 +773,7 @@ def rsqrt(x, name=None): return _C_ops.rsqrt(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'rsqrt' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'rsqrt' ) helper = LayerHelper('rsqrt', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -848,7 +848,7 @@ def sin(x, name=None): return _C_ops.sin(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'sin' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'sin' ) helper = LayerHelper('sin', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -885,7 +885,7 @@ def sinh(x, name=None): return _C_ops.sinh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'sinh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'sinh' ) helper = LayerHelper('sinh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -1010,7 +1010,7 @@ def tan(x, name=None): return _C_ops.tan(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'tan' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'tan' ) helper = LayerHelper('tan', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype)