未验证 提交 f8ac6c2c 编写于 作者: W wangchaochaohu 提交者: GitHub

fix fp16 support of assgin Op and squeeze Op test=develop (#24862)

上级 8abab77d
......@@ -6175,7 +6175,7 @@ def squeeze(input, axes, name=None):
Out.shape = [1,3,5]
Args:
input (Variable): The input Tensor. Support data type: float32, float64, int8, int32, int64.
input (Variable): The input Tensor. Support data type: float16, float32, float64, int8, int32, int64.
axes (list): One integer or List of integers, indicating the dimensions to be squeezed.
Axes range is :math:`[-rank(input), rank(input))`.
If axes is negative, :math:`axes=axes+rank(input)`.
......@@ -6195,9 +6195,9 @@ def squeeze(input, axes, name=None):
"""
helper = LayerHelper("squeeze", **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int8', 'int32', 'int64'],
'squeeze')
check_variable_and_dtype(
input, 'input',
['float16', 'float32', 'float64', 'int8', 'int32', 'int64'], 'squeeze')
check_type(axes, 'axes', list, 'squeeze')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
......
......@@ -75,7 +75,7 @@ def create_parameter(shape,
is_bias=False,
default_initializer=None):
"""
:api_attr: Static Graph
:api_attr: Static Graph
This function creates a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
......@@ -197,9 +197,9 @@ def create_global_var(shape,
def cast(x, dtype):
"""
:alias_main: paddle.cast
:alias: paddle.cast,paddle.tensor.cast,paddle.tensor.manipulation.cast
:old_api: paddle.fluid.layers.cast
:alias_main: paddle.cast
:alias: paddle.cast,paddle.tensor.cast,paddle.tensor.manipulation.cast
:old_api: paddle.fluid.layers.cast
This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it
to the output with :attr:`dtype`. It's meaningless if the output dtype
......@@ -263,9 +263,9 @@ def cast(x, dtype):
def concat(input, axis=0, name=None):
"""
:alias_main: paddle.concat
:alias: paddle.concat,paddle.tensor.concat,paddle.tensor.manipulation.concat
:old_api: paddle.fluid.layers.concat
:alias_main: paddle.concat
:alias: paddle.concat,paddle.tensor.concat,paddle.tensor.manipulation.concat
:old_api: paddle.fluid.layers.concat
**Concat**
......@@ -545,15 +545,15 @@ def sums(input, out=None):
def assign(input, output=None):
"""
:alias_main: paddle.nn.functional.assign
:alias: paddle.nn.functional.assign,paddle.nn.functional.common.assign
:old_api: paddle.fluid.layers.assign
:alias_main: paddle.nn.functional.assign
:alias: paddle.nn.functional.assign,paddle.nn.functional.common.assign
:old_api: paddle.fluid.layers.assign
The OP copies the :attr:`input` to the :attr:`output`.
Parameters:
input (Variable|numpy.ndarray): A tensor or numpy ndarray, its data type supports
float32, float64, int32 and int64.
float16, float32, float64, int32 and int64.
output (Variable, optional): A tensor. If :attr:`output` is None, a new tensor will
be created as :attr:`output`. Default: None.
......@@ -574,9 +574,10 @@ def assign(input, output=None):
helper = LayerHelper('assign', **locals())
check_type(input, 'input', (Variable, numpy.ndarray), 'assign')
if isinstance(input, Variable):
check_dtype(input.dtype, 'input',
['float32', 'float64', 'int32', 'int64', 'bool'], 'assign',
'(When the type of input in assign is Variable.)')
check_dtype(
input.dtype, 'input',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'assign', '(When the type of input in assign is Variable.)')
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
......@@ -621,9 +622,9 @@ def assign(input, output=None):
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
"""
:alias_main: paddle.fill_constant
:alias: paddle.fill_constant,paddle.tensor.fill_constant,paddle.tensor.creation.fill_constant
:old_api: paddle.fluid.layers.fill_constant
:alias_main: paddle.fill_constant
:alias: paddle.fill_constant,paddle.tensor.fill_constant,paddle.tensor.creation.fill_constant
:old_api: paddle.fluid.layers.fill_constant
This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specified by `value`.
......@@ -805,9 +806,9 @@ def fill_constant_batch_size_like(input,
def argmin(x, axis=0):
"""
:alias_main: paddle.argmin
:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
:old_api: paddle.fluid.layers.argmin
:alias_main: paddle.argmin
:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
:old_api: paddle.fluid.layers.argmin
**argmin**
......@@ -935,9 +936,9 @@ def argmax(x, axis=0):
def argsort(input, axis=-1, descending=False, name=None):
"""
:alias_main: paddle.argsort
:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
:old_api: paddle.fluid.layers.argsort
:alias_main: paddle.argsort
:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
:old_api: paddle.fluid.layers.argsort
This OP sorts the input along the given axis, and returns sorted output
data Varibale and its corresponding index Variable with the same shape as
......@@ -1087,9 +1088,9 @@ def zeros(shape, dtype, force_cpu=False):
def reverse(x, axis):
"""
:alias_main: paddle.reverse
:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
:old_api: paddle.fluid.layers.reverse
:alias_main: paddle.reverse
:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
:old_api: paddle.fluid.layers.reverse
The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
......@@ -1201,9 +1202,9 @@ def load_combine(out, file_path):
def has_inf(x):
"""
:alias_main: paddle.has_inf
:alias: paddle.has_inf,paddle.tensor.has_inf,paddle.tensor.search.has_inf
:old_api: paddle.fluid.layers.has_inf
:alias_main: paddle.has_inf
:alias: paddle.has_inf,paddle.tensor.has_inf,paddle.tensor.search.has_inf
:old_api: paddle.fluid.layers.has_inf
Test if any of x contains an infinity number
......@@ -1230,9 +1231,9 @@ def has_inf(x):
def has_nan(x):
"""
:alias_main: paddle.has_nan
:alias: paddle.has_nan,paddle.tensor.has_nan,paddle.tensor.search.has_nan
:old_api: paddle.fluid.layers.has_nan
:alias_main: paddle.has_nan
:alias: paddle.has_nan,paddle.tensor.has_nan,paddle.tensor.search.has_nan
:old_api: paddle.fluid.layers.has_nan
Test if any of x contains a NAN
......@@ -1259,9 +1260,9 @@ def has_nan(x):
def isfinite(x):
"""
:alias_main: paddle.isfinite
:alias: paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite
:old_api: paddle.fluid.layers.isfinite
:alias_main: paddle.isfinite
:alias: paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite
:old_api: paddle.fluid.layers.isfinite
Test if any of x contains an infinity/NAN number. If all the elements are finite,
returns true, else false.
......@@ -1460,9 +1461,9 @@ def zeros_like(x, out=None):
def diag(diagonal):
"""
:alias_main: paddle.diag
:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
:old_api: paddle.fluid.layers.diag
:alias_main: paddle.diag
:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
:old_api: paddle.fluid.layers.diag
This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
......@@ -1507,9 +1508,9 @@ def diag(diagonal):
def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
"""
:alias_main: paddle.eye
:alias: paddle.eye,paddle.tensor.eye,paddle.tensor.creation.eye
:old_api: paddle.fluid.layers.eye
:alias_main: paddle.eye
:alias: paddle.eye,paddle.tensor.eye,paddle.tensor.creation.eye
:old_api: paddle.fluid.layers.eye
**eye**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册