未验证 提交 4efca9fb 编写于 作者: Z Zhang Zheng 提交者: GitHub

[AMP OP&Test] Add check_dtype for some API (part 2) (#53136)

* [AMP OP&Test] Add check_dtype for some API (part 2)

* fix ci

* fix ci
上级 0f99debd
......@@ -86,7 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase):
def test_dtype():
data = paddle.static.data(
shape=[10], dtype="float16", name="input"
shape=[10], dtype="int16", name="input"
)
paddle.unique(data)
......@@ -424,7 +424,7 @@ class TestUniqueError(unittest.TestCase):
paddle.static.Program(), paddle.static.Program()
):
x = paddle.static.data(
name='x', shape=[10, 10], dtype='float16'
name='x', shape=[10, 10], dtype='int16'
)
result = paddle.unique(x)
......
......@@ -85,7 +85,7 @@ class TestUniqueWithCountsRaiseError(unittest.TestCase):
def test_dtype():
data = paddle.static.data(
shape=[10], dtype="float16", name="input"
shape=[10], dtype="int16", name="input"
)
paddle.unique(data)
......
......@@ -817,7 +817,7 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
helper = LayerHelper("accuracy", **locals())
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], 'accuracy'
input, 'input', ['float16', 'uint16', 'float32', 'float64'], 'accuracy'
)
topk_out, topk_indices = paddle.topk(input, k=k)
acc_out = helper.create_variable_for_type_inference(dtype="float32")
......
......@@ -62,7 +62,7 @@ def celu(x, alpha=1.0, name=None):
return _C_ops.celu(x, alpha)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'celu'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'celu'
)
helper = LayerHelper("celu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -114,7 +114,7 @@ def elu(x, alpha=1.0, name=None):
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'elu'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'elu'
)
helper = LayerHelper("elu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -234,7 +234,7 @@ def hardshrink(x, threshold=0.5, name=None):
return _C_ops.hardshrink(x, threshold)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'hardshrink'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'hardshrink'
)
helper = LayerHelper('hardshrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -339,7 +339,7 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
return _C_ops.hardsigmoid(x, slope, offset)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'hardsigmoid'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'hardsigmoid'
)
helper = LayerHelper('hardsigmoid', **locals())
......@@ -390,7 +390,7 @@ def hardswish(x, name=None):
return _C_ops.hardswish(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'hardswish'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'hardswish'
)
helper = LayerHelper('hardswish', **locals())
......@@ -439,7 +439,7 @@ def leaky_relu(x, negative_slope=0.01, name=None):
return _C_ops.leaky_relu(x, negative_slope)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'leaky_relu'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'leaky_relu'
)
helper = LayerHelper('leaky_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -661,7 +661,7 @@ def rrelu(x, lower=1.0 / 8.0, upper=1.0 / 3.0, training=True, name=None):
return _C_ops.rrelu(x, lower, upper, is_test)
else:
check_variable_and_dtype(
x, 'X', ['float16', 'float32', 'float64'], 'rrelu'
x, 'X', ['float16', 'uint16', 'float32', 'float64'], 'rrelu'
)
helper = LayerHelper('rrelu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -709,7 +709,7 @@ def relu(x, name=None):
return _C_ops.relu(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'relu'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'relu'
)
helper = LayerHelper('relu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -874,7 +874,9 @@ def relu6(x, name=None):
if in_dynamic_mode():
return _legacy_C_ops.relu6(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
check_variable_and_dtype(
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'relu6'
)
helper = LayerHelper('relu6', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
......@@ -983,7 +985,7 @@ def silu(x, name=None):
return _C_ops.silu(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'silu'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'silu'
)
helper = LayerHelper("silu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -1197,7 +1199,7 @@ def softplus(x, beta=1, threshold=20, name=None):
return _C_ops.softplus(x, beta, threshold)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'softplus'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softplus'
)
helper = LayerHelper('softplus', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -1256,7 +1258,7 @@ def softshrink(x, threshold=0.5, name=None):
return _C_ops.softshrink(x, threshold)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'softshrink'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softshrink'
)
helper = LayerHelper('softshrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -1302,7 +1304,7 @@ def softsign(x, name=None):
return _legacy_C_ops.softsign(x)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'softsign'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softsign'
)
helper = LayerHelper('softsign', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -1341,7 +1343,7 @@ def swish(x, name=None):
return _C_ops.swish(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'swish'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'swish'
)
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -1387,7 +1389,7 @@ def mish(x, name=None):
return _C_ops.mish(x, 20)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'mish'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'mish'
)
helper = LayerHelper('mish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -1426,7 +1428,7 @@ def tanhshrink(x, name=None):
return _C_ops.tanh_shrink(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'tanhshrink'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'tanhshrink'
)
helper = LayerHelper('tanh_shrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -1476,7 +1478,10 @@ def thresholded_relu(x, threshold=1.0, name=None):
return _C_ops.thresholded_relu(x, threshold)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'thresholded_relu'
x,
'x',
['float16', 'uint16', 'float32', 'float64'],
'thresholded_relu',
)
helper = LayerHelper('thresholded_relu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......@@ -1554,7 +1559,10 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
else:
if dtype is None:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'log_softmax'
x,
'x',
['float16', 'uint16', 'float32', 'float64'],
'log_softmax',
)
else:
check_dtype(
......
......@@ -237,7 +237,7 @@ def _conv_nd(
"data_format": data_format,
}
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], op_type
x, 'x', ['float16', 'uint16', 'float32', 'float64'], op_type
)
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
......@@ -1344,7 +1344,10 @@ def conv2d_transpose(
'data_format': data_format,
}
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'conv2d_transpose'
x,
'x',
['float16', 'uint16', 'float32', 'float64'],
'conv2d_transpose',
)
helper = LayerHelper(op_type, **locals())
pre_bias = helper.create_variable_for_type_inference(x.dtype)
......
......@@ -213,7 +213,7 @@ def batch_norm(
else:
check_variable_and_dtype(
x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
x, 'input', ['float16', 'uint16', 'float32', 'float64'], 'BatchNorm'
)
# for static need dict
......
......@@ -1561,7 +1561,10 @@ class SyncBatchNorm(_BatchNormBase):
return sync_batch_norm_out
check_variable_and_dtype(
x, 'input', ['float16', 'float32', 'float64'], 'SyncBatchNorm'
x,
'input',
['float16', 'uint16', 'float32', 'float64'],
'SyncBatchNorm',
)
attrs = {
......
......@@ -89,7 +89,7 @@ def accuracy(input, label, k=1, correct=None, total=None):
helper = LayerHelper("accuracy", **locals())
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], 'accuracy'
input, 'input', ['float16', 'uint16', 'float32', 'float64'], 'accuracy'
)
topk_out = helper.create_variable_for_type_inference(dtype=input.dtype)
topk_indices = helper.create_variable_for_type_inference(dtype="int64")
......
......@@ -1354,7 +1354,7 @@ def _tril_triu_op(helper):
check_variable_and_dtype(
x,
'x',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
['float16', 'uint16', 'float32', 'float64', 'int32', 'int64', 'bool'],
op_type,
)
if len(x.shape) < 2:
......
......@@ -415,7 +415,10 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
if axis is not None:
check_type(axis, 'axis', (int), 'p_norm')
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'p_norm'
input,
'input',
['float16', 'uint16', 'float32', 'float64'],
'p_norm',
)
attrs = {
......
......@@ -2201,6 +2201,7 @@ def squeeze(x, axis=None, name=None):
'input',
[
'float16',
'uint16',
'float32',
'float64',
'bool',
......@@ -2477,7 +2478,10 @@ def unique(
return tuple(outs)
else:
check_variable_and_dtype(
x, "input", ['float32', 'float64', 'int32', 'int64'], 'unique'
x,
"input",
['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
'unique',
)
check_type(return_index, 'return_index', bool, 'unique')
check_type(return_inverse, 'return_inverse', bool, 'unique')
......@@ -2597,6 +2601,7 @@ def unsqueeze(x, axis, name=None):
'input',
[
'float16',
'uint16',
'float32',
'float64',
'bool',
......@@ -3867,7 +3872,15 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
check_variable_and_dtype(
x,
'x',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
[
'bool',
'float16',
'uint16',
'float32',
'float64',
'int32',
'int64',
],
'strided_slice',
)
check_type(axes, 'axes', (list, tuple), 'strided_slice')
......
......@@ -288,7 +288,7 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
return _C_ops.stanh(x, scale_a, scale_b)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'stanh'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'stanh'
)
helper = LayerHelper('stanh', **locals())
......@@ -2717,7 +2717,7 @@ def log1p(x, name=None):
return _C_ops.log1p(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], "log1p"
x, 'x', ['float16', 'uint16', 'float32', 'float64'], "log1p"
)
inputs = {'X': [x]}
helper = LayerHelper('log1p', **locals())
......@@ -2769,7 +2769,7 @@ def log2(x, name=None):
return _C_ops.log2(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], "log2"
x, 'x', ['float16', 'uint16', 'float32', 'float64'], "log2"
)
inputs = {'X': [x]}
helper = LayerHelper('log2', **locals())
......@@ -2821,7 +2821,7 @@ def log10(x, name=None):
return _C_ops.log10(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], "log10"
x, 'x', ['float16', 'uint16', 'float32', 'float64'], "log10"
)
inputs = {'X': [x]}
helper = LayerHelper('log10', **locals())
......@@ -4252,7 +4252,7 @@ def logit(x, eps=None, name=None):
return _C_ops.logit(x, eps)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'logit'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'logit'
)
helper = LayerHelper("logit", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
......
......@@ -221,7 +221,7 @@ def acos(x, name=None):
return _C_ops.acos(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'acos'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'acos'
)
helper = LayerHelper('acos', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -258,7 +258,7 @@ def acosh(x, name=None):
return _C_ops.acosh(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'acosh'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'acosh'
)
helper = LayerHelper('acosh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -295,7 +295,7 @@ def asin(x, name=None):
return _C_ops.asin(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'asin'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'asin'
)
helper = LayerHelper('asin', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -332,7 +332,7 @@ def asinh(x, name=None):
return _C_ops.asinh(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'asinh'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'asinh'
)
helper = LayerHelper('asinh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -369,7 +369,7 @@ def atan(x, name=None):
return _C_ops.atan(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'atan'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'atan'
)
helper = LayerHelper('atan', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -406,7 +406,7 @@ def atanh(x, name=None):
return _C_ops.atanh(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'atanh'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'atanh'
)
helper = LayerHelper('atanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -444,7 +444,7 @@ def ceil(x, name=None):
return _C_ops.ceil(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'ceil'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'ceil'
)
helper = LayerHelper('ceil', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -522,7 +522,7 @@ def cosh(x, name=None):
return _C_ops.cosh(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'cosh'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'cosh'
)
helper = LayerHelper('cosh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -611,7 +611,7 @@ def expm1(x, name=None):
return _C_ops.expm1(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'expm1'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'expm1'
)
helper = LayerHelper('expm1', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -649,7 +649,7 @@ def floor(x, name=None):
return _C_ops.floor(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'floor'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'floor'
)
helper = LayerHelper('floor', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -687,7 +687,7 @@ def reciprocal(x, name=None):
return _C_ops.reciprocal(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'reciprocal'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'reciprocal'
)
helper = LayerHelper('reciprocal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -734,7 +734,7 @@ def round(x, name=None):
return _C_ops.round(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'round'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'round'
)
helper = LayerHelper('round', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -773,7 +773,7 @@ def rsqrt(x, name=None):
return _C_ops.rsqrt(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'rsqrt'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'rsqrt'
)
helper = LayerHelper('rsqrt', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -848,7 +848,7 @@ def sin(x, name=None):
return _C_ops.sin(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sin'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'sin'
)
helper = LayerHelper('sin', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -885,7 +885,7 @@ def sinh(x, name=None):
return _C_ops.sinh(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sinh'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'sinh'
)
helper = LayerHelper('sinh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -1010,7 +1010,7 @@ def tan(x, name=None):
return _C_ops.tan(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'tan'
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'tan'
)
helper = LayerHelper('tan', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册