未验证 提交 6d231b02 编写于 作者: 骑马小猫 提交者: GitHub

[Function optimization] support uint16 python op in d2s (#52809)

* support uint16 python op in d2s

* convert uint16 -> bfloat16 in docstring
上级 25bd5ed8
......@@ -1065,12 +1065,12 @@ def softmax(x, axis=-1, dtype=None, name=None):
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
x (Tensor): The input Tensor with data type bfloat16, float16, float32, float64.
axis (int, optional): The axis along which to perform softmax
calculations. It should be in range [-D, D), where D is the
rank of ``x`` . If ``axis`` < 0, it works the same way as
:math:`axis + D` . Default is -1.
dtype (str, optional): The data type of the output tensor, can be float32, float64.
dtype (str, optional): The data type of the output tensor, can be bfloat16, float16, float32, float64.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
......@@ -1110,15 +1110,15 @@ def softmax(x, axis=-1, dtype=None, name=None):
use_cudnn = True
if dtype is None:
check_variable_and_dtype(
x, 'x', ['float16', 'bfloat16', 'float32', 'float64'], 'softmax'
x, 'x', ['uint16', 'float16', 'float32', 'float64'], 'softmax'
)
else:
check_dtype(
dtype,
'dtype',
['float16', 'bfloat16', 'float32', 'float64'],
['uint16', 'float16', 'float32', 'float64'],
'softmax',
'If dtype is not None, it only support float16, bfloat16, float32 or float64.',
'If dtype is not None, it only support uint16, float16, float32 or float64.',
)
helper = LayerHelper("softmax", **locals())
......
......@@ -1823,7 +1823,7 @@ def linear(x, weight, bias=None, name=None):
:math:`[out\_features]` and will be added to the output.
Parameters:
x (Tensor): Input tensor. The data type should be float16, float32 or float64.
x (Tensor): Input tensor. The data type should be bfloat16, float16, float32 or float64.
weight (Tensor): Weight tensor. The data type should be float16, float32 or float64.
bias (Tensor, optional): Bias tensor. The data type should be float16, float32 or float64.
If it is set to None, no bias will be added to the output units.
......@@ -1861,9 +1861,14 @@ def linear(x, weight, bias=None, name=None):
dtype = x.dtype
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'linear'
x, 'x', ["uint16", 'float16', 'float32', 'float64'], 'linear'
)
check_dtype(
dtype,
'dtype',
["uint16", 'float16', 'float32', 'float64'],
'linear',
)
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear')
inputs = {'X': [x], 'Y': [weight]}
attrs = {'trans_x': False, 'trans_y': False}
......
......@@ -280,7 +280,7 @@ def layer_norm(
For more information, please refer to :ref:`api_paddle_nn_LayerNorm` .
Parameters:
x(Tensor): Input Tensor. It's data type should be float32, float64.
x(Tensor): Input Tensor. It's data type should be bfloat16, float16, float32, float64.
normalized_shape(int|list|tuple): Input shape from an expected input of
size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
If it is a single integer, this module will normalize over the last dimension
......
......@@ -78,7 +78,7 @@ def shape(input):
input.shape = [3, 2]
Args:
input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, float16, float32, float64, int32, int64.
input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, bfloat16, float16, float32, float64, int32, int64.
If input variable is type of SelectedRows, returns the shape of it's inner tensor.
Returns:
......@@ -113,6 +113,7 @@ def shape(input):
'input',
[
'bool',
'uint16',
'float16',
'float32',
'float64',
......
......@@ -246,6 +246,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
val,
name,
[
'uint16',
'float16',
'float32',
'float64',
......
......@@ -3698,7 +3698,7 @@ def tanh(x, name=None):
out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}
Args:
x (Tensor): Input of Tanh operator, an N-D Tensor, with data type float32, float64 or float16.
x (Tensor): Input of Tanh operator, an N-D Tensor, with data type bfloat16, float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
......@@ -3719,7 +3719,7 @@ def tanh(x, name=None):
return _C_ops.tanh(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'tanh'
x, 'x', ['uint16', 'float16', 'float32', 'float64'], 'tanh'
)
check_type(x, 'x', (Variable), 'tanh')
helper = LayerHelper('tanh', **locals())
......
......@@ -616,8 +616,8 @@ def where(condition, x=None, y=None, name=None):
Args:
condition (Tensor): The condition to choose x or y. When True (nonzero), yield x, otherwise yield y.
x (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is True with data type of float16, float32, float64, int32 or int64. Either both or neither of x and y should be given.
y (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is False with data type of float16, float32, float64, int32 or int64. Either both or neither of x and y should be given.
x (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is True with data type of bfloat16, float16, float32, float64, int32 or int64. Either both or neither of x and y should be given.
y (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is False with data type of bfloat16, float16, float32, float64, int32 or int64. Either both or neither of x and y should be given.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
......@@ -681,10 +681,16 @@ def where(condition, x=None, y=None, name=None):
else:
check_variable_and_dtype(condition, 'condition', ['bool'], 'where')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'where'
x,
'x',
['uint16', 'float16', 'float32', 'float64', 'int32', 'int64'],
'where',
)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'where'
y,
'y',
['uint16', 'float16', 'float32', 'float64', 'int32', 'int64'],
'where',
)
helper = LayerHelper("where", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -727,7 +733,7 @@ def index_sample(x, index):
Args:
x (Tensor): The source input tensor with 2-D shape. Supported data type is
int32, int64, float16, float32, float64.
int32, int64, bfloat16, float16, float32, float64.
index (Tensor): The index input tensor with 2-D shape, first dimension should be same with X.
Data type is int32 or int64.
......@@ -782,7 +788,7 @@ def index_sample(x, index):
check_variable_and_dtype(
x,
'x',
['float16', 'float32', 'float64', 'int32', 'int64'],
['uint16', 'float16', 'float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.index_sample',
)
check_variable_and_dtype(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册