From bd66f2d9b714941ec225fafbfe5e08f7d07d0178 Mon Sep 17 00:00:00 2001 From: Zhang Zheng <32410583+ZzSean@users.noreply.github.com> Date: Fri, 21 Apr 2023 15:16:00 +0800 Subject: [PATCH] [Cherry-Pick] Add check_dtype for some API (part 2) (#53137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 【Part II】补充API静态图中的check_dtype支持对float16和bfloat16的检查 --- .../fluid/tests/unittests/test_unique.py | 4 +- .../unittests/test_unique_with_counts.py | 2 +- python/paddle/metric/metrics.py | 2 +- python/paddle/nn/functional/activation.py | 44 +++++++++++-------- python/paddle/nn/functional/conv.py | 7 ++- python/paddle/nn/functional/norm.py | 2 +- python/paddle/nn/layer/norm.py | 5 ++- python/paddle/static/nn/metric.py | 2 +- python/paddle/tensor/creation.py | 2 +- python/paddle/tensor/linalg.py | 5 ++- python/paddle/tensor/manipulation.py | 17 ++++++- python/paddle/tensor/math.py | 10 ++--- python/paddle/tensor/ops.py | 32 +++++++------- 13 files changed, 82 insertions(+), 52 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_unique.py b/python/paddle/fluid/tests/unittests/test_unique.py index 8e31f377bba..97566042720 100644 --- a/python/paddle/fluid/tests/unittests/test_unique.py +++ b/python/paddle/fluid/tests/unittests/test_unique.py @@ -86,7 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase): def test_dtype(): data = paddle.static.data( - shape=[10], dtype="float16", name="input" + shape=[10], dtype="int16", name="input" ) paddle.unique(data) @@ -424,7 +424,7 @@ class TestUniqueError(unittest.TestCase): paddle.static.Program(), paddle.static.Program() ): x = paddle.static.data( - name='x', shape=[10, 10], dtype='float16' + name='x', shape=[10, 10], dtype='int16' ) result = paddle.unique(x) diff --git a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py index fe56cb09297..2690d618024 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py +++ b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py @@ -85,7 +85,7 @@ class TestUniqueWithCountsRaiseError(unittest.TestCase): def test_dtype(): data = paddle.static.data( - shape=[10], dtype="float16", name="input" + shape=[10], dtype="int16", name="input" ) paddle.unique(data) diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 13a916abec5..87bdc1aaad0 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -817,7 +817,7 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None): helper = LayerHelper("accuracy", **locals()) check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64'], 'accuracy' + input, 'input', ['float16', 'uint16', 'float32', 'float64'], 'accuracy' ) topk_out, topk_indices = paddle.topk(input, k=k) acc_out = helper.create_variable_for_type_inference(dtype="float32") diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index c146ac7a348..8fd603f4379 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -62,7 +62,7 @@ def celu(x, alpha=1.0, name=None): return _C_ops.celu(x, alpha) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'celu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'celu' ) helper = LayerHelper("celu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -114,7 +114,7 @@ def elu(x, alpha=1.0, name=None): else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'elu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'elu' ) helper = LayerHelper("elu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -234,7 +234,7 @@ def hardshrink(x, threshold=0.5, name=None): return _C_ops.hardshrink(x, threshold) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'hardshrink' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'hardshrink' ) helper = LayerHelper('hardshrink', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -339,7 +339,7 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None): return _C_ops.hardsigmoid(x, slope, offset) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'hardsigmoid' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'hardsigmoid' ) helper = LayerHelper('hardsigmoid', **locals()) @@ -390,7 +390,7 @@ def hardswish(x, name=None): return _C_ops.hardswish(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'hardswish' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'hardswish' ) helper = LayerHelper('hardswish', **locals()) @@ -439,7 +439,7 @@ def leaky_relu(x, negative_slope=0.01, name=None): return _C_ops.leaky_relu(x, negative_slope) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'leaky_relu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'leaky_relu' ) helper = LayerHelper('leaky_relu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -658,7 +658,7 @@ def rrelu(x, lower=1.0 / 8.0, upper=1.0 / 3.0, training=True, name=None): return _C_ops.rrelu(x, lower, upper, is_test) else: check_variable_and_dtype( - x, 'X', ['float16', 'float32', 'float64'], 'rrelu' + x, 'X', ['float16', 'uint16', 'float32', 'float64'], 'rrelu' ) helper = LayerHelper('rrelu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -706,7 +706,7 @@ def relu(x, name=None): return _C_ops.relu(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'relu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'relu' ) helper = LayerHelper('relu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -871,7 +871,9 @@ def relu6(x, name=None): if in_dynamic_mode(): return _legacy_C_ops.relu6(x, 'threshold', threshold) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6') + check_variable_and_dtype( + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'relu6' + ) helper = LayerHelper('relu6', **locals()) out = helper.create_variable_for_type_inference(x.dtype) helper.append_op( @@ -980,7 +982,7 @@ def silu(x, name=None): return _C_ops.silu(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'silu' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'silu' ) helper = LayerHelper("silu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1194,7 +1196,7 @@ def softplus(x, beta=1, threshold=20, name=None): return _C_ops.softplus(x, beta, threshold) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'softplus' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softplus' ) helper = LayerHelper('softplus', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1253,7 +1255,7 @@ def softshrink(x, threshold=0.5, name=None): return _C_ops.softshrink(x, threshold) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'softshrink' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softshrink' ) helper = LayerHelper('softshrink', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1299,7 +1301,7 @@ def softsign(x, name=None): return _legacy_C_ops.softsign(x) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'softsign' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softsign' ) helper = LayerHelper('softsign', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1338,7 +1340,7 @@ def swish(x, name=None): return _C_ops.swish(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'swish' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'swish' ) helper = LayerHelper('swish', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1384,7 +1386,7 @@ def mish(x, name=None): return _C_ops.mish(x, 20) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'mish' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'mish' ) helper = LayerHelper('mish', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1423,7 +1425,7 @@ def tanhshrink(x, name=None): return _C_ops.tanh_shrink(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'tanhshrink' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'tanhshrink' ) helper = LayerHelper('tanh_shrink', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1473,7 +1475,10 @@ def thresholded_relu(x, threshold=1.0, name=None): return _C_ops.thresholded_relu(x, threshold) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'thresholded_relu' + x, + 'x', + ['float16', 'uint16', 'float32', 'float64'], + 'thresholded_relu', ) helper = LayerHelper('thresholded_relu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -1551,7 +1556,10 @@ def log_softmax(x, axis=-1, dtype=None, name=None): else: if dtype is None: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'log_softmax' + x, + 'x', + ['float16', 'uint16', 'float32', 'float64'], + 'log_softmax', ) else: check_dtype( diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 1bab6176a1d..bd3100a644c 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -237,7 +237,7 @@ def _conv_nd( "data_format": data_format, } check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], op_type + x, 'x', ['float16', 'uint16', 'float32', 'float64'], op_type ) helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') @@ -1344,7 +1344,10 @@ def conv2d_transpose( 'data_format': data_format, } check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'conv2d_transpose' + x, + 'x', + ['float16', 'uint16', 'float32', 'float64'], + 'conv2d_transpose', ) helper = LayerHelper(op_type, **locals()) pre_bias = helper.create_variable_for_type_inference(x.dtype) diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 3c81335c444..9567b8244ac 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -213,7 +213,7 @@ def batch_norm( else: check_variable_and_dtype( - x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm' + x, 'input', ['float16', 'uint16', 'float32', 'float64'], 'BatchNorm' ) # for static need dict diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 3421fe4f9b0..9facd8e9172 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -1561,7 +1561,10 @@ class SyncBatchNorm(_BatchNormBase): return sync_batch_norm_out check_variable_and_dtype( - x, 'input', ['float16', 'float32', 'float64'], 'SyncBatchNorm' + x, + 'input', + ['float16', 'uint16', 'float32', 'float64'], + 'SyncBatchNorm', ) attrs = { diff --git a/python/paddle/static/nn/metric.py b/python/paddle/static/nn/metric.py index a9a81a2f1cf..c31b5be5fa8 100644 --- a/python/paddle/static/nn/metric.py +++ b/python/paddle/static/nn/metric.py @@ -89,7 +89,7 @@ def accuracy(input, label, k=1, correct=None, total=None): helper = LayerHelper("accuracy", **locals()) check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64'], 'accuracy' + input, 'input', ['float16', 'uint16', 'float32', 'float64'], 'accuracy' ) topk_out = helper.create_variable_for_type_inference(dtype=input.dtype) topk_indices = helper.create_variable_for_type_inference(dtype="int64") diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 5c722dc3507..3248aa30103 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -1354,7 +1354,7 @@ def _tril_triu_op(helper): check_variable_and_dtype( x, 'x', - ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64', 'bool'], op_type, ) if len(x.shape) < 2: diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 418e30a5beb..dacbeb7f308 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -415,7 +415,10 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype( - input, 'input', ['float32', 'float64'], 'p_norm' + input, + 'input', + ['float16', 'uint16', 'float32', 'float64'], + 'p_norm', ) attrs = { diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index b3f615cf635..32e05851c95 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -2201,6 +2201,7 @@ def squeeze(x, axis=None, name=None): 'input', [ 'float16', + 'uint16', 'float32', 'float64', 'bool', @@ -2477,7 +2478,10 @@ def unique( return tuple(outs) else: check_variable_and_dtype( - x, "input", ['float32', 'float64', 'int32', 'int64'], 'unique' + x, + "input", + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], + 'unique', ) check_type(return_index, 'return_index', bool, 'unique') check_type(return_inverse, 'return_inverse', bool, 'unique') @@ -2597,6 +2601,7 @@ def unsqueeze(x, axis, name=None): 'input', [ 'float16', + 'uint16', 'float32', 'float64', 'bool', @@ -3867,7 +3872,15 @@ def strided_slice(x, axes, starts, ends, strides, name=None): check_variable_and_dtype( x, 'x', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + [ + 'bool', + 'float16', + 'uint16', + 'float32', + 'float64', + 'int32', + 'int64', + ], 'strided_slice', ) check_type(axes, 'axes', (list, tuple), 'strided_slice') diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index c88cf84ec50..ebbcbad581a 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -288,7 +288,7 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): return _C_ops.stanh(x, scale_a, scale_b) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'stanh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'stanh' ) helper = LayerHelper('stanh', **locals()) @@ -2717,7 +2717,7 @@ def log1p(x, name=None): return _C_ops.log1p(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], "log1p" + x, 'x', ['float16', 'uint16', 'float32', 'float64'], "log1p" ) inputs = {'X': [x]} helper = LayerHelper('log1p', **locals()) @@ -2769,7 +2769,7 @@ def log2(x, name=None): return _C_ops.log2(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], "log2" + x, 'x', ['float16', 'uint16', 'float32', 'float64'], "log2" ) inputs = {'X': [x]} helper = LayerHelper('log2', **locals()) @@ -2821,7 +2821,7 @@ def log10(x, name=None): return _C_ops.log10(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], "log10" + x, 'x', ['float16', 'uint16', 'float32', 'float64'], "log10" ) inputs = {'X': [x]} helper = LayerHelper('log10', **locals()) @@ -4252,7 +4252,7 @@ def logit(x, eps=None, name=None): return _C_ops.logit(x, eps) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'logit' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'logit' ) helper = LayerHelper("logit", **locals()) out = helper.create_variable_for_type_inference(x.dtype) diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index c8cdb4e4b4e..5fd9372c9cd 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -221,7 +221,7 @@ def acos(x, name=None): return _C_ops.acos(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'acos' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'acos' ) helper = LayerHelper('acos', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -258,7 +258,7 @@ def acosh(x, name=None): return _C_ops.acosh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'acosh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'acosh' ) helper = LayerHelper('acosh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -295,7 +295,7 @@ def asin(x, name=None): return _C_ops.asin(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'asin' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'asin' ) helper = LayerHelper('asin', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -332,7 +332,7 @@ def asinh(x, name=None): return _C_ops.asinh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'asinh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'asinh' ) helper = LayerHelper('asinh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -369,7 +369,7 @@ def atan(x, name=None): return _C_ops.atan(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'atan' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'atan' ) helper = LayerHelper('atan', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -406,7 +406,7 @@ def atanh(x, name=None): return _C_ops.atanh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'atanh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'atanh' ) helper = LayerHelper('atanh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -444,7 +444,7 @@ def ceil(x, name=None): return _C_ops.ceil(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'ceil' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'ceil' ) helper = LayerHelper('ceil', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -522,7 +522,7 @@ def cosh(x, name=None): return _C_ops.cosh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'cosh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'cosh' ) helper = LayerHelper('cosh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -611,7 +611,7 @@ def expm1(x, name=None): return _C_ops.expm1(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'expm1' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'expm1' ) helper = LayerHelper('expm1', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -649,7 +649,7 @@ def floor(x, name=None): return _C_ops.floor(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'floor' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'floor' ) helper = LayerHelper('floor', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -687,7 +687,7 @@ def reciprocal(x, name=None): return _C_ops.reciprocal(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'reciprocal' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'reciprocal' ) helper = LayerHelper('reciprocal', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -734,7 +734,7 @@ def round(x, name=None): return _C_ops.round(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'round' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'round' ) helper = LayerHelper('round', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -773,7 +773,7 @@ def rsqrt(x, name=None): return _C_ops.rsqrt(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'rsqrt' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'rsqrt' ) helper = LayerHelper('rsqrt', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -848,7 +848,7 @@ def sin(x, name=None): return _C_ops.sin(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'sin' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'sin' ) helper = LayerHelper('sin', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -885,7 +885,7 @@ def sinh(x, name=None): return _C_ops.sinh(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'sinh' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'sinh' ) helper = LayerHelper('sinh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -1010,7 +1010,7 @@ def tan(x, name=None): return _C_ops.tan(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'tan' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'tan' ) helper = LayerHelper('tan', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) -- GitLab