未验证 提交 6d231b02 编写于 作者: 骑马小猫 提交者: GitHub

[Function optimization] support uint16 python op in d2s (#52809)

* support uint16 python op in d2s

* convert uint16 -> bfloat16 in docstring
上级 25bd5ed8
...@@ -1065,12 +1065,12 @@ def softmax(x, axis=-1, dtype=None, name=None): ...@@ -1065,12 +1065,12 @@ def softmax(x, axis=-1, dtype=None, name=None):
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]] [0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Parameters: Parameters:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type bfloat16, float16, float32, float64.
axis (int, optional): The axis along which to perform softmax axis (int, optional): The axis along which to perform softmax
calculations. It should be in range [-D, D), where D is the calculations. It should be in range [-D, D), where D is the
rank of ``x`` . If ``axis`` < 0, it works the same way as rank of ``x`` . If ``axis`` < 0, it works the same way as
:math:`axis + D` . Default is -1. :math:`axis + D` . Default is -1.
dtype (str, optional): The data type of the output tensor, can be float32, float64. dtype (str, optional): The data type of the output tensor, can be bfloat16, float16, float32, float64.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None. name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns: Returns:
...@@ -1110,15 +1110,15 @@ def softmax(x, axis=-1, dtype=None, name=None): ...@@ -1110,15 +1110,15 @@ def softmax(x, axis=-1, dtype=None, name=None):
use_cudnn = True use_cudnn = True
if dtype is None: if dtype is None:
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'bfloat16', 'float32', 'float64'], 'softmax' x, 'x', ['uint16', 'float16', 'float32', 'float64'], 'softmax'
) )
else: else:
check_dtype( check_dtype(
dtype, dtype,
'dtype', 'dtype',
['float16', 'bfloat16', 'float32', 'float64'], ['uint16', 'float16', 'float32', 'float64'],
'softmax', 'softmax',
'If dtype is not None, it only support float16, bfloat16, float32 or float64.', 'If dtype is not None, it only support uint16, float16, float32 or float64.',
) )
helper = LayerHelper("softmax", **locals()) helper = LayerHelper("softmax", **locals())
......
...@@ -1823,7 +1823,7 @@ def linear(x, weight, bias=None, name=None): ...@@ -1823,7 +1823,7 @@ def linear(x, weight, bias=None, name=None):
:math:`[out\_features]` and will be added to the output. :math:`[out\_features]` and will be added to the output.
Parameters: Parameters:
x (Tensor): Input tensor. The data type should be float16, float32 or float64. x (Tensor): Input tensor. The data type should be bfloat16, float16, float32 or float64.
weight (Tensor): Weight tensor. The data type should be float16, float32 or float64. weight (Tensor): Weight tensor. The data type should be float16, float32 or float64.
bias (Tensor, optional): Bias tensor. The data type should be float16, float32 or float64. bias (Tensor, optional): Bias tensor. The data type should be float16, float32 or float64.
If it is set to None, no bias will be added to the output units. If it is set to None, no bias will be added to the output units.
...@@ -1861,9 +1861,14 @@ def linear(x, weight, bias=None, name=None): ...@@ -1861,9 +1861,14 @@ def linear(x, weight, bias=None, name=None):
dtype = x.dtype dtype = x.dtype
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'linear' x, 'x', ["uint16", 'float16', 'float32', 'float64'], 'linear'
)
check_dtype(
dtype,
'dtype',
["uint16", 'float16', 'float32', 'float64'],
'linear',
) )
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear')
inputs = {'X': [x], 'Y': [weight]} inputs = {'X': [x], 'Y': [weight]}
attrs = {'trans_x': False, 'trans_y': False} attrs = {'trans_x': False, 'trans_y': False}
......
...@@ -280,7 +280,7 @@ def layer_norm( ...@@ -280,7 +280,7 @@ def layer_norm(
For more information, please refer to :ref:`api_paddle_nn_LayerNorm` . For more information, please refer to :ref:`api_paddle_nn_LayerNorm` .
Parameters: Parameters:
x(Tensor): Input Tensor. It's data type should be float32, float64. x(Tensor): Input Tensor. It's data type should be bfloat16, float16, float32, float64.
normalized_shape(int|list|tuple): Input shape from an expected input of normalized_shape(int|list|tuple): Input shape from an expected input of
size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`. size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
If it is a single integer, this module will normalize over the last dimension If it is a single integer, this module will normalize over the last dimension
......
...@@ -78,7 +78,7 @@ def shape(input): ...@@ -78,7 +78,7 @@ def shape(input):
input.shape = [3, 2] input.shape = [3, 2]
Args: Args:
input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, float16, float32, float64, int32, int64. input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, bfloat16, float16, float32, float64, int32, int64.
If input variable is type of SelectedRows, returns the shape of it's inner tensor. If input variable is type of SelectedRows, returns the shape of it's inner tensor.
Returns: Returns:
...@@ -113,6 +113,7 @@ def shape(input): ...@@ -113,6 +113,7 @@ def shape(input):
'input', 'input',
[ [
'bool', 'bool',
'uint16',
'float16', 'float16',
'float32', 'float32',
'float64', 'float64',
......
...@@ -246,6 +246,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): ...@@ -246,6 +246,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
val, val,
name, name,
[ [
'uint16',
'float16', 'float16',
'float32', 'float32',
'float64', 'float64',
......
...@@ -3698,7 +3698,7 @@ def tanh(x, name=None): ...@@ -3698,7 +3698,7 @@ def tanh(x, name=None):
out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}
Args: Args:
x (Tensor): Input of Tanh operator, an N-D Tensor, with data type float32, float64 or float16. x (Tensor): Input of Tanh operator, an N-D Tensor, with data type bfloat16, float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
...@@ -3719,7 +3719,7 @@ def tanh(x, name=None): ...@@ -3719,7 +3719,7 @@ def tanh(x, name=None):
return _C_ops.tanh(x) return _C_ops.tanh(x)
else: else:
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'tanh' x, 'x', ['uint16', 'float16', 'float32', 'float64'], 'tanh'
) )
check_type(x, 'x', (Variable), 'tanh') check_type(x, 'x', (Variable), 'tanh')
helper = LayerHelper('tanh', **locals()) helper = LayerHelper('tanh', **locals())
......
...@@ -616,8 +616,8 @@ def where(condition, x=None, y=None, name=None): ...@@ -616,8 +616,8 @@ def where(condition, x=None, y=None, name=None):
Args: Args:
condition (Tensor): The condition to choose x or y. When True (nonzero), yield x, otherwise yield y. condition (Tensor): The condition to choose x or y. When True (nonzero), yield x, otherwise yield y.
x (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is True with data type of float16, float32, float64, int32 or int64. Either both or neither of x and y should be given. x (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is True with data type of bfloat16, float16, float32, float64, int32 or int64. Either both or neither of x and y should be given.
y (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is False with data type of float16, float32, float64, int32 or int64. Either both or neither of x and y should be given. y (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is False with data type of bfloat16, float16, float32, float64, int32 or int64. Either both or neither of x and y should be given.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None. name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns: Returns:
...@@ -681,10 +681,16 @@ def where(condition, x=None, y=None, name=None): ...@@ -681,10 +681,16 @@ def where(condition, x=None, y=None, name=None):
else: else:
check_variable_and_dtype(condition, 'condition', ['bool'], 'where') check_variable_and_dtype(condition, 'condition', ['bool'], 'where')
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'where' x,
'x',
['uint16', 'float16', 'float32', 'float64', 'int32', 'int64'],
'where',
) )
check_variable_and_dtype( check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'where' y,
'y',
['uint16', 'float16', 'float32', 'float64', 'int32', 'int64'],
'where',
) )
helper = LayerHelper("where", **locals()) helper = LayerHelper("where", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
...@@ -727,7 +733,7 @@ def index_sample(x, index): ...@@ -727,7 +733,7 @@ def index_sample(x, index):
Args: Args:
x (Tensor): The source input tensor with 2-D shape. Supported data type is x (Tensor): The source input tensor with 2-D shape. Supported data type is
int32, int64, float16, float32, float64. int32, int64, bfloat16, float16, float32, float64.
index (Tensor): The index input tensor with 2-D shape, first dimension should be same with X. index (Tensor): The index input tensor with 2-D shape, first dimension should be same with X.
Data type is int32 or int64. Data type is int32 or int64.
...@@ -782,7 +788,7 @@ def index_sample(x, index): ...@@ -782,7 +788,7 @@ def index_sample(x, index):
check_variable_and_dtype( check_variable_and_dtype(
x, x,
'x', 'x',
['float16', 'float32', 'float64', 'int32', 'int64'], ['uint16', 'float16', 'float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.index_sample', 'paddle.tensor.search.index_sample',
) )
check_variable_and_dtype( check_variable_and_dtype(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册