未验证 提交 9c395d38 编写于 作者: H heyanru 提交者: GitHub

[Fluid Clean] remove unfold, deformable_roi_pooling, shard_index, hard_swish,...

[Fluid Clean] remove unfold, deformable_roi_pooling, shard_index, hard_swish, mish, uniform_random, unbind (#48451)
上级 f88713e1
......@@ -23,7 +23,7 @@ from paddle.fluid.framework import (
_non_static_mode,
in_dygraph_mode,
)
from paddle.fluid.layers import nn, tensor
from paddle.fluid.layers import tensor
from paddle.tensor import random
......@@ -187,7 +187,7 @@ class Uniform(distribution.Distribution):
return output
else:
output_shape = shape + batch_shape
output = nn.uniform_random(
output = paddle.uniform(
output_shape, dtype=self.dtype, min=0.0, max=1.0, seed=seed
) * (
tensor.zeros(output_shape, dtype=self.dtype)
......
......@@ -90,13 +90,6 @@ __all__ = [
'mul',
'merge_selected_rows',
'get_tensor_from_selected_rows',
'unfold',
'deformable_roi_pooling',
'shard_index',
'hard_swish',
'mish',
'uniform_random',
'unbind',
]
OP_NAMEMAPPING = {
......@@ -3564,667 +3557,3 @@ def get_tensor_from_selected_rows(x, name=None):
attrs={},
)
return out
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
r"""
This op returns a col buffer of sliding local blocks of input x, also known
as im2col for batched 2D image tensors. For each block under the convolution filter,
all element will be rearranged as a column. While the convolution filter sliding over
the input feature map, a series of such columns will be formed.
For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
can be calculated as following.
.. math::
dkernel[0] &= dilations[0] \times (kernel\_sizes[0] - 1) + 1
dkernel[1] &= dilations[1] \times (kernel\_sizes[1] - 1) + 1
hout &= \frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1
wout &= \frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1
Cout &= C \times kernel\_sizes[0] \times kernel\_sizes[1]
Lout &= hout \times wout
Parameters:
x(Tensor): 4-D Tensor, input tensor of format [N, C, H, W],
data type can be float32 or float64
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k].
strides(int|list): The strides, should be [stride_h, stride_w]
or an integer stride treated as [sride, stride].
For default, strides will be [1, 1].
paddings(int|list): The paddings of each dimension, should be
[padding_top, padding_left, padding_bottom, padding_right]
or [padding_h, padding_w] or an integer padding.
If [padding_h, padding_w] was given, it will expanded to
[padding_h, padding_w, padding_h, padding_w]. If an integer
padding was given, [padding, padding, padding, padding] will
be used. For default, paddings will be [0, 0, 0, 0]
dilations(int|list): the dilations of convolution kernel, should be
[dilation_h, dilation_w], or an integer dilation treated as
[dilation, dilation]. For default, it will be [1, 1].
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The tensor corresponding to the sliding local blocks.
The output shape is [N, Cout, Lout] as decriabled above.
Cout is the total number of values within each block,
and Lout is the total number of such blocks.
The data type of output is the same as the input :math:`x`
Return Type:
Tensor
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.randn((100,3,224,224))
y = F.unfold(x, [3, 3], 1, 1, 1)
"""
return paddle.nn.functional.unfold(
x, kernel_sizes, strides, paddings, dilations, name
)
def deformable_roi_pooling(
input,
rois,
trans,
no_trans=False,
spatial_scale=1.0,
group_size=[1, 1],
pooled_height=1,
pooled_width=1,
part_size=None,
sample_per_part=1,
trans_std=0.1,
position_sensitive=False,
name=None,
):
r"""
Deformable ROI Pooling Layer
Performs deformable region-of-interest pooling on inputs. As described
in `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_, it will get offset for each bin after
roi pooling so that pooling at correct region. Batch_size will change to the number of region bounding boxes after deformable_roi_pooling.
The operation has three steps:
1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height.
2. Add offset to pixel in ROI to get new location and the new value which are computed directly through
bilinear interpolation with four nearest pixel.
3. Sample several points in each bin to get average values as output.
Args:
input (Variable):The input of deformable roi pooling and it is tensor which value type is float32. The shape of input is
[N, C, H, W]. Where N is batch size, C is number of input channels,
H is height of the feature, and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) with type float32 to pool over. It should be
a 2-D LoDTensor of shape (num_rois, 4), and the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates, which value type is float32.
trans (Variable): Offset of features on ROIs while pooling which value type is float32. The format is [N, C, H, W], where
N is number of ROIs, C is number of channels, which indicate the offset distance
in the x and y directions, H is pooled height, and W is pooled width.
no_trans (bool): Whether to add offset to get new value or not while roi pooling, which value with type bool is True or False.
If value is True, no offset will be added in operation. Default: False.
spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width), which value type is float32.
Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
group_size (list|tuple): The number of groups which input channels are divided and the input is list or tuple, which value type is int32. (eg.number of input channels
is k1 * k2 * (C + 1), which k1 and k2 are group width and height and C+1 is number of output
channels.) eg.(4, 6), which 4 is height of group and 6 is width of group. Default: [1, 1].
pooled_height (int): The pooled output height which value type is int32. Default: 1.
pooled_width (int): The pooled output width which value type is int32. Default: 1.
part_size (list|tuple): The height and width of offset which values in list or tuple is int32, eg.(4, 6), which height is 4 and width is 6, and values always equal to pooled_height \
and pooled_width. Default: if None, default value is [pooled_height, pooled_width].
sample_per_part (int): The number of samples in each bin which value type is int32. If value is bigger, it will consume more performance. Default: 1.
trans_std (float): Coefficient of offset which value type is float32. It controls weight of offset. Default: 0.1.
position_sensitive (bool): Whether to choose deformable psroi pooling mode or not, and value type is bool(True or False). If value is False, input dimension equals to output dimension. \
If value is True, input dimension should be output dimension * pooled_height * pooled_width. Default: False.
name (str|None): Name of layer. Default: None.
Returns:
Variable: Output of deformable roi pooling is that, if position sensitive is False, input dimension equals to output dimension. If position sensitive is True,\
input dimension should be the result of output dimension divided by pooled height and pooled width.
Examples:
.. code-block:: python
# position_sensitive=True
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=True)
# position_sensitive=False
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=False)
"""
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'deformable_roi_pooling'
)
check_variable_and_dtype(
rois, 'rois', ['float32', 'float64'], 'deformable_roi_pooling'
)
check_variable_and_dtype(
trans, 'trans', ['float32', 'float64'], 'deformable_roi_pooling'
)
check_type(
group_size, 'group_size', (list, tuple), 'deformable_roi_pooling'
)
if part_size is not None:
check_type(
part_size, 'part_size', (list, tuple), 'deformable_roi_pooling'
)
input_channels = input.shape[1]
if position_sensitive is False:
output_channels = input_channels
else:
output_channels = input_channels / pooled_height / pooled_width
if part_size is None:
part_height = pooled_height
part_width = pooled_width
part_size = [part_height, part_width]
part_size = utils.convert_to_list(part_size, 2, 'part_size')
group_size = utils.convert_to_list(group_size, 2, 'group_size')
helper = LayerHelper('deformable_psroi_pooling', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
top_count = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="deformable_psroi_pooling",
inputs={"Input": input, "ROIs": rois, "Trans": trans},
outputs={"Output": output, "TopCount": top_count},
attrs={
"no_trans": no_trans,
"spatial_scale": spatial_scale,
"output_dim": output_channels,
"group_size": group_size,
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"part_size": part_size,
"sample_per_part": sample_per_part,
"trans_std": trans_std,
},
)
return output
@deprecated(since="2.0.0", update_to="paddle.shard_index")
def shard_index(input, index_num, nshards, shard_id, ignore_value=-1):
"""
Reset the values of `input` according to the shard it beloning to.
Every value in `input` must be a non-negative integer, and
the parameter `index_num` represents the integer above the maximum
value of `input`. Thus, all values in `input` must be in the range
[0, index_num) and each value can be regarded as the offset to the beginning
of the range. The range is further split into multiple shards. Specifically,
we first compute the `shard_size` according to the following formula,
which represents the number of integers each shard can hold. So for the
i'th shard, it can hold values in the range [i*shard_size, (i+1)*shard_size).
::
shard_size = (index_num + nshards - 1) // nshards
For each value `v` in `input`, we reset it to a new value according to the
following formula:
::
v = v - shard_id * shard_size if shard_id * shard_size <= v < (shard_id+1) * shard_size else ignore_value
That is, the value `v` is set to the new offset within the range represented by the shard `shard_id`
if it in the range. Otherwise, we reset it to be `ignore_value`.
Args:
input (Tensor): Input tensor with data type int64 or int32. It's last dimension must be 1.
index_num (int): An integer represents the integer above the maximum value of `input`.
nshards (int): The number of shards.
shard_id (int): The index of the current shard.
ignore_value (int): An integer value out of sharded index range.
Returns:
Tensor.
Examples:
.. code-block:: python
import paddle
label = paddle.to_tensor([[16], [1]], "int64")
shard_label = paddle.shard_index(input=label,
index_num=20,
nshards=2,
shard_id=0)
print(shard_label)
# [[-1], [1]]
"""
if in_dygraph_mode():
return _C_ops.shard_index(
input, index_num, nshards, shard_id, ignore_value
)
check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index')
op_type = 'shard_index'
helper = LayerHelper(op_type, **locals())
if shard_id < 0 or shard_id >= nshards:
raise ValueError(
'The shard_id(%d) should be in [0, %d)' % (shard_id, nshards)
)
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'X': [input]},
outputs={'Out': out},
attrs={
'index_num': index_num,
'nshards': nshards,
'shard_id': shard_id,
'ignore_value': ignore_value,
},
stop_gradient=True,
)
return out
@templatedoc()
def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
r"""
This operator implements the hard_swish activation function.
Hard_swish is proposed in MobileNetV3, and performs better in computational stability and efficiency compared to swish function.
For more details please refer to: https://arxiv.org/pdf/1905.02244.pdf
The formula is as follows:
.. math::
out = \\frac{x * (min(max(0, x+offset), threshold))}{scale}
In the above equation:
``threshold`` and ``scale`` should be positive, ``offset`` can be positive or negative. It is recommended to use default parameters.
Args:
x (Variable): Input feature, multi-dimensional Tensor. The data type should be float32 or float64.
threshold (float, optional): The threshold in Relu function. Default: 6.0
scale (float, optional): The scale factor. Default: 6.0
offset (float, optional): The offset factor. Default: 3.0
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output tensor with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy as np
paddle.enable_static()
DATATYPE='float32'
x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
y = fluid.layers.hard_swish(x)
place = fluid.CPUPlace()
#place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
print(out) # [[0.66666667, 1.66666667,3., 4.]]
"""
if _non_static_mode():
return _legacy_C_ops.hard_swish(
x, 'threshold', threshold, 'scale', scale, 'offset', offset
)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'hard_swish'
)
helper = LayerHelper('hard_swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='hard_swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold, 'scale': scale, 'offset': offset},
)
return out
@templatedoc()
def mish(x, threshold=20, name=None):
r"""
This operator implements the mish activation function.
Refer to `Mish: A Self Regularized Non-Monotonic Neural
Activation Function <https://arxiv.org/abs/1908.08681>`_
The formula is as follows if :attr:`threshold` is :code:`None` or negative:
.. math::
out = x * \\tanh(\\ln(1 + e^{x}))
The formula is as follows if :attr:`threshold` is set as positive value:
.. math::
out = \\begin{cases}
x \\ast \\tanh(x), \\text{if } x > \\text{threshold} \\\\
x \\ast \\tanh(e^{x}), \\text{if } x < -\\text{threshold} \\\\
x \\ast \\tanh(\\ln(1 + e^{x})), \\text{otherwise}
\\end{cases}
Args:
x (Variable): Input feature, multi-dimensional Tensor. The data type
should be float16, float32 or float64.
threshold (float|None): threshold for softplus in Mish operator.
Approximate value of softplus will be used if absolute value
of input is greater than :attr:threshold and :attr:threshold
is set as positive value. For none or negative threshold,
approximate value is not used. Default 20.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`
Returns:
Variable: The output tensor with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
DATATYPE='float32'
x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
y = fluid.layers.mish(x)
place = fluid.CPUPlace()
# place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
print(out) # [[0.66666667, 1.66666667, 3., 4.]]
"""
if in_dygraph_mode():
return _C_ops.mish(x, threshold)
if _in_legacy_dygraph():
return _legacy_C_ops.mish(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'mish')
check_type(threshold, 'threshold', (float, int), 'mish')
assert (
threshold > 0
), "threshold of mish should be greater than 0, " "but got {}".format(
threshold
)
helper = LayerHelper('mish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='mish',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold},
)
return out
@deprecated(since="2.0.0", update_to="paddle.uniform")
@templatedoc()
def uniform_random(
shape, dtype='float32', min=-1.0, max=1.0, seed=0, name=None
):
"""
This OP returns a Tensor filled with random values sampled from a uniform
distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
Examples:
::
Input:
shape = [1, 2]
Output:
result=[[0.8505902, 0.8397286]]
Args:
shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of
the output Tensor. Supported data types: float32, float64.
Default is float32.
min(float|int, optional): The lower bound on the range of random values
to generate, ``min`` is included in the range. Default is -1.0.
max(float|int, optional): The upper bound on the range of random values
to generate, ``max`` is excluded in the range. Default is 1.0.
seed(int, optional): Random seed used for generating samples. 0 means
use a seed generated by the system. Note that if seed is not 0,
this operator will always generate the same random numbers every
time. Default is 0.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a uniform
distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
Raises:
TypeError: If ``shape`` is not list, tuple, Tensor.
TypeError: If ``dtype`` is not float32, float64.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
# example 1:
# attr shape is a list which doesn't contain Tensor.
result_1 = fluid.layers.uniform_random(shape=[3, 4])
# [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357],
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249],
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]]
# example 2:
# attr shape is a list which contains Tensor.
dim_1 = fluid.layers.fill_constant([1], "int64", 2)
dim_2 = fluid.layers.fill_constant([1], "int32", 3)
result_2 = fluid.layers.uniform_random(shape=[dim_1, dim_2])
# [[-0.9951253, 0.30757582, 0.9899647 ],
# [ 0.5864527, 0.6607096, -0.8886161 ]]
# example 3:
# attr shape is a Tensor, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = fluid.layers.uniform_random(var_shape)
# if var_shape's value is [2, 3]
# result_3 is:
# [[-0.8517412, -0.4006908, 0.2551912 ],
# [ 0.3364414, 0.36278176, -0.16085452]]
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return _C_ops.uniform(
shape,
dtype,
float(min),
float(max),
seed,
_current_expected_place(),
)
elif _in_legacy_dygraph():
shape = utils.convert_shape_to_list(shape)
return _legacy_C_ops.uniform_random(
'shape',
shape,
'min',
float(min),
'max',
float(max),
'seed',
seed,
'dtype',
dtype,
)
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand')
check_dtype(
dtype, 'dtype', ('float32', 'float64', 'uint16'), 'uniform_random/rand'
)
check_type(min, 'min', (float, int, Variable), 'uniform_random/rand')
check_type(max, 'max', (float, int, Variable), 'uniform_random/rand')
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random/rand'
)
helper = LayerHelper("uniform_random", **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="uniform_random", inputs=inputs, attrs=attrs, outputs={"Out": out}
)
utils.try_set_static_shape_tensor(out, shape)
return out
def unbind(input, axis=0):
"""
Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.
Args:
input (Variable): The input variable which is an N-D Tensor, data type being float32, float64, int32 or int64.
axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind. If :math:`axis < 0`, the
dimension to unbind along is :math:`rank(input) + axis`. Default is 0.
Returns:
list(Variable): The list of segmented Tensor variables.
Example:
.. code-block:: python
import paddle
# input is a variable which shape is [3, 4, 5]
input = paddle.fluid.data(
name="input", shape=[3, 4, 5], dtype="float32")
[x0, x1, x2] = paddle.tensor.unbind(input, axis=0)
# x0.shape [4, 5]
# x1.shape [4, 5]
# x2.shape [4, 5]
[x0, x1, x2, x3] = paddle.tensor.unbind(input, axis=1)
# x0.shape [3, 5]
# x1.shape [3, 5]
# x2.shape [3, 5]
# x3.shape [3, 5]
"""
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(
dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'], 'unbind'
)
if not isinstance(axis, (int)):
raise TypeError(
"The type of 'axis' must be int, but received %s." % (type(axis))
)
if isinstance(axis, np.generic):
axis = np.asscalar(axis)
input_shape = input.shape
axis_ = axis if axis >= 0 else len(input_shape) + axis
num = input_shape[axis_]
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type="unbind",
inputs={"X": input},
outputs={"Out": outs},
attrs={"axis": axis},
)
return outs
......@@ -1097,7 +1097,6 @@ set_tests_properties(test_bilinear_interp_v2_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_svd_op PROPERTIES TIMEOUT 80)
set_tests_properties(test_einsum_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_qr_op PROPERTIES TIMEOUT 60)
set_tests_properties(test_deformable_psroi_pooling PROPERTIES TIMEOUT 120)
set_tests_properties(test_trilinear_interp_v2_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_imperative_static_runner_mnist PROPERTIES TIMEOUT 120)
set_tests_properties(test_masked_select_op PROPERTIES TIMEOUT 120)
......
......@@ -102,7 +102,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_HardSwish(
):
def set_params(self):
self.operand = paddle.add
self.act = fluid.layers.hard_swish
self.act = paddle.nn.functional.hardswish
class ElementwiseActivationMkldnnFusePassTest_Add_SQRT(
......@@ -202,7 +202,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_HardSwish(
):
def set_params(self):
self.operand = paddle.subtract
self.act = fluid.layers.hard_swish
self.act = paddle.nn.functional.hardswish
class ElementwiseActivationMkldnnFusePassTest_Sub_ABS(
......@@ -294,7 +294,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_HardSwish(
):
def set_params(self):
self.operand = paddle.multiply
self.act = fluid.layers.hard_swish
self.act = paddle.nn.functional.hardswish
class ElementwiseActivationMkldnnFusePassTest_Mul_SQRT(
......
......@@ -88,7 +88,7 @@ class TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest):
class TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.hard_swish(x)
return paddle.nn.functional.hardswish(x)
class TensorRTSubgraphPassHardSigmoidTest(TensorRTSubgraphPassActivationTest):
......@@ -100,7 +100,7 @@ class TensorRTSubgraphPassHardSwishPluginTest(
TensorRTSubgraphPassActivationTest
):
def append_act(self, x):
return fluid.layers.hard_swish(x, threshold=4.0, scale=8.0)
return paddle.nn.functional.hardswish(x)
class TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest):
......@@ -166,7 +166,7 @@ class TensorRTSubgraphPassMishTest(TensorRTSubgraphPassActivationTest):
)
def append_act(self, x):
return fluid.layers.mish(x)
return paddle.nn.functional.mish(x)
class TensorRTSubgraphPassMishFp16SerializeTest(
......@@ -179,7 +179,7 @@ class TensorRTSubgraphPassMishFp16SerializeTest(
)
def append_act(self, x):
return fluid.layers.mish(x)
return paddle.nn.functional.mish(x)
class TensorRTSubgraphPassDynamicMishFp16SerializeTest(
......@@ -200,7 +200,7 @@ class TensorRTSubgraphPassDynamicMishFp16SerializeTest(
)
def append_act(self, x):
return fluid.layers.mish(x)
return paddle.nn.functional.mish(x)
class TensorRTSubgraphPassPreluAllTest(TensorRTSubgraphPassActivationTest):
......
......@@ -2073,7 +2073,7 @@ class TestHardswishAPI(unittest.TestCase):
def test_fluid_api(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.hard_swish(x)
out = paddle.nn.functional.hardswish(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_hardswish(self.x_np)
......@@ -2081,7 +2081,7 @@ class TestHardswishAPI(unittest.TestCase):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.fluid.layers.hard_swish(x)
out = paddle.nn.functional.hardswish(x)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
......@@ -3414,7 +3414,7 @@ def ref_mish(x, threshold=20.0):
class TestMish(TestActivation):
def setUp(self):
self.op_type = "mish"
self.python_api = paddle.fluid.layers.nn.mish
self.python_api = paddle.nn.functional.mish
self.init_dtype()
self.init_shape()
......@@ -3480,7 +3480,7 @@ class TestMishAPI(unittest.TestCase):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.mish(x)
out = paddle.nn.functional.mish(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_mish(self.x_np)
......
......@@ -41,15 +41,9 @@ class TestGeneratorSeed(unittest.TestCase):
gen.manual_seed(111111111)
st = paddle.get_cuda_rng_state()
x = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x_again = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x_third = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
x_again = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
x_third = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
print("x: {}".format(x.numpy()))
print("x_again: {}".format(x_again.numpy()))
x = x + x_again + x_third
......@@ -57,15 +51,9 @@ class TestGeneratorSeed(unittest.TestCase):
paddle.set_cuda_rng_state(st)
x1 = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x1_again = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x1_third = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x1 = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
x1_again = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
x1_third = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
x1 = x1 + x1_again + x1_third
y1 = fluid.layers.dropout(x1, 0.5)
y_np = y.numpy()
......@@ -128,7 +116,7 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
x = fluid.layers.uniform_random(shape=[2, 10])
x = paddle.uniform(shape=[2, 10])
result_1 = fluid.layers.fc(
input=x,
size=10,
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
def set_input(input, rois, trans):
inputs = {'Input': input, "ROIs": rois, "Trans": trans}
return inputs
def set_attrs(
no_trans,
spatial_scale,
output_channels,
group_size,
pooled_height,
pooled_width,
part_size,
sample_per_part,
trans_std,
):
attrs = {
'no_trans': no_trans,
'spatial_scale': spatial_scale,
'output_dim': output_channels,
'group_size': group_size,
'pooled_height': pooled_height,
'pooled_width': pooled_width,
'part_size': part_size,
'sample_per_part': sample_per_part,
'trans_std': trans_std,
}
return attrs
def set_outputs(output, top_count):
outputs = {
'Output': output.astype('float32'),
'TopCount': top_count.astype('float32'),
}
return outputs
class TestDeformablePSROIPoolOp(OpTest):
def set_data(self):
self.start_test1()
self.start_test2()
self.start_test3()
self.start_test4()
def start_test1(self):
self.init_test_case1()
self.make_rois()
self.calc_deformable_psroi_pooling()
inputs = self.input
rois = (self.rois[:, 1:5], self.rois_lod)
trans = self.trans
self.inputs = set_input(inputs, rois, trans)
no_trans = self.no_trans
spatial_scale = self.spatial_scale
output_channels = self.output_channels
group_size = self.group_size
pooled_height = self.pooled_height
pooled_width = self.pooled_width
part_size = self.part_size
sample_per_part = self.sample_per_part
trans_std = self.trans_std
self.attrs = set_attrs(
no_trans,
spatial_scale,
output_channels,
group_size,
pooled_height,
pooled_width,
part_size,
sample_per_part,
trans_std,
)
output = self.out.astype('float32')
top_count = self.top_count.astype('float32')
self.outputs = set_outputs(output, top_count)
def start_test2(self):
self.init_test_case2()
self.make_rois()
self.calc_deformable_psroi_pooling()
inputs = self.input
rois = (self.rois[:, 1:5], self.rois_lod)
trans = self.trans
self.inputs = set_input(inputs, rois, trans)
no_trans = self.no_trans
spatial_scale = self.spatial_scale
output_channels = self.output_channels
group_size = self.group_size
pooled_height = self.pooled_height
pooled_width = self.pooled_width
part_size = self.part_size
sample_per_part = self.sample_per_part
trans_std = self.trans_std
self.attrs = set_attrs(
no_trans,
spatial_scale,
output_channels,
group_size,
pooled_height,
pooled_width,
part_size,
sample_per_part,
trans_std,
)
output = self.out.astype('float32')
top_count = self.top_count.astype('float32')
self.outputs = set_outputs(output, top_count)
def start_test3(self):
self.init_test_case3()
self.make_rois()
self.calc_deformable_psroi_pooling()
inputs = self.input
rois = (self.rois[:, 1:5], self.rois_lod)
trans = self.trans
self.inputs = set_input(inputs, rois, trans)
no_trans = self.no_trans
spatial_scale = self.spatial_scale
output_channels = self.output_channels
group_size = self.group_size
pooled_height = self.pooled_height
pooled_width = self.pooled_width
part_size = self.part_size
sample_per_part = self.sample_per_part
trans_std = self.trans_std
self.attrs = set_attrs(
no_trans,
spatial_scale,
output_channels,
group_size,
pooled_height,
pooled_width,
part_size,
sample_per_part,
trans_std,
)
output = self.out.astype('float32')
top_count = self.top_count.astype('float32')
self.outputs = set_outputs(output, top_count)
def start_test4(self):
self.init_test_case4()
self.make_rois()
self.calc_deformable_psroi_pooling()
inputs = self.input
rois = (self.rois[:, 1:5], self.rois_lod)
trans = self.trans
self.inputs = set_input(inputs, rois, trans)
no_trans = self.no_trans
spatial_scale = self.spatial_scale
output_channels = self.output_channels
group_size = self.group_size
pooled_height = self.pooled_height
pooled_width = self.pooled_width
part_size = self.part_size
sample_per_part = self.sample_per_part
trans_std = self.trans_std
self.attrs = set_attrs(
no_trans,
spatial_scale,
output_channels,
group_size,
pooled_height,
pooled_width,
part_size,
sample_per_part,
trans_std,
)
output = self.out.astype('float32')
top_count = self.top_count.astype('float32')
self.outputs = set_outputs(output, top_count)
def init_test_case1(self):
self.batch_size = 3
self.channels = 3 * 2 * 2
self.height = 12
self.width = 12
self.input_dim = [
self.batch_size,
self.channels,
self.height,
self.width,
]
self.no_trans = False
self.spatial_scale = 1.0 / 4.0
self.output_channels = 12
self.group_size = [1, 1]
self.pooled_height = 4
self.pooled_width = 4
self.part_size = [4, 4]
self.sample_per_part = 2
self.trans_std = 0.1
self.input = np.random.random(self.input_dim).astype('float32')
def init_test_case2(self):
self.batch_size = 2
self.channels = 3 * 2 * 2
self.height = 12
self.width = 12
self.input_dim = [
self.batch_size,
self.channels,
self.height,
self.width,
]
self.no_trans = True
self.spatial_scale = 1.0 / 2.0
self.output_channels = 12
self.group_size = [1, 1]
self.pooled_height = 7
self.pooled_width = 7
self.part_size = [7, 7]
self.sample_per_part = 4
self.trans_std = 0.1
self.input = np.random.random(self.input_dim).astype('float32')
def init_test_case3(self):
self.batch_size = 2
self.channels = 3 * 2 * 2
self.height = 12
self.width = 12
self.input_dim = [
self.batch_size,
self.channels,
self.height,
self.width,
]
self.no_trans = False
self.spatial_scale = 1.0 / 4.0
self.output_channels = 12
self.group_size = [1, 1]
self.pooled_height = 3
self.pooled_width = 3
self.part_size = [3, 3]
self.sample_per_part = 3
self.trans_std = 0.2
self.input = np.random.random(self.input_dim).astype('float32')
def init_test_case4(self):
self.batch_size = 2
self.channels = 3 * 2 * 2
self.height = 12
self.width = 12
self.input_dim = [
self.batch_size,
self.channels,
self.height,
self.width,
]
self.no_trans = True
self.spatial_scale = 1.0 / 2.0
self.output_channels = 12
self.group_size = [1, 1]
self.pooled_height = 6
self.pooled_width = 2
self.part_size = [6, 6]
self.sample_per_part = 6
self.trans_std = 0.4
self.input = np.random.random(self.input_dim).astype('float32')
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x_1 = np.random.randint(
0, self.width // self.spatial_scale - self.pooled_width
)
y_1 = np.random.randint(
0, self.height // self.spatial_scale - self.pooled_height
)
x_2 = np.random.randint(
x_1 + self.pooled_width, self.width // self.spatial_scale
)
y_2 = np.random.randint(
y_1 + self.pooled_height, self.height // self.spatial_scale
)
roi = [bno, x_1, y_1, x_2, y_2]
rois.append(roi)
self.rois_num = len(rois)
self.rois = np.array(rois).astype("float32")
def dmc_bilinear(self, data_im, p_h, p_w):
h_low = int(np.floor(p_h))
w_low = int(np.floor(p_w))
h_high = h_low + 1
w_high = w_low + 1
l_h = p_h - h_low
l_w = p_w - w_low
h_h = 1 - l_h
h_w = 1 - l_w
v_1 = 0
if h_low >= 0 and w_low >= 0:
v_1 = data_im[h_low, w_low]
v_2 = 0
if h_low >= 0 and w_high <= self.width - 1:
v_2 = data_im[h_low, w_high]
v_3 = 0
if h_high <= self.height - 1 and w_low >= 0:
v_3 = data_im[h_high, w_low]
v_4 = 0
if h_high <= self.height - 1 and w_high <= self.width - 1:
v_4 = data_im[h_high, w_high]
w_1, w_2, w_3, w_4 = h_h * h_w, h_h * l_w, l_h * h_w, l_h * l_w
val = w_1 * v_1 + w_2 * v_2 + w_3 * v_3 + w_4 * v_4
return val
def calc_deformable_psroi_pooling(self):
output_shape = (
self.rois_num,
self.output_channels,
self.pooled_height,
self.pooled_width,
)
self.out = np.zeros(output_shape)
self.trans = np.random.rand(
self.rois_num, 2, self.part_size[0], self.part_size[1]
).astype('float32')
self.top_count = np.random.random((output_shape)).astype('float32')
count = (
self.rois_num
* self.output_channels
* self.pooled_height
* self.pooled_width
)
for index in range(count):
p_w = int(index % self.pooled_width)
p_h = int(index / self.pooled_width % self.pooled_height)
ctop = int(
index
/ self.pooled_width
/ self.pooled_height
% self.output_channels
)
n_out = int(
index
/ self.pooled_width
/ self.pooled_height
/ self.output_channels
)
roi = self.rois[n_out]
roi_batch_id = int(roi[0])
roi_start_w = int(np.round(roi[1])) * self.spatial_scale - 0.5
roi_start_h = int(np.round(roi[2])) * self.spatial_scale - 0.5
roi_end_w = int(np.round(roi[3] + 1)) * self.spatial_scale - 0.5
roi_end_h = int(np.round(roi[4] + 1)) * self.spatial_scale - 0.5
roi_width = max(roi_end_w - roi_start_w, 0.1)
roi_height = max(roi_end_h - roi_start_h, 0.1)
bin_size_h = float(roi_height) / float(self.pooled_height)
bin_size_w = float(roi_width) / float(self.pooled_width)
sub_bin_size_h = bin_size_h / self.sample_per_part
sub_bin_size_w = bin_size_w / self.sample_per_part
part_h = int(np.floor(p_h) / self.pooled_height * self.part_size[0])
part_w = int(np.floor(p_w) / self.pooled_width * self.part_size[1])
if self.no_trans:
trans_x = 0
trans_y = 0
else:
trans_x = self.trans[n_out][0][part_h][part_w] * self.trans_std
trans_y = self.trans[n_out][1][part_h][part_w] * self.trans_std
wstart = p_w * bin_size_w + roi_start_w
wstart = wstart + trans_x * roi_width
hstart = p_h * bin_size_h + roi_start_h
hstart = hstart + trans_y * roi_height
sum = 0
num_sample = 0
g_w = np.floor(p_w * self.group_size[0] / self.pooled_height)
g_h = np.floor(p_h * self.group_size[1] / self.pooled_width)
g_w = min(max(g_w, 0), self.group_size[0] - 1)
g_h = min(max(g_h, 0), self.group_size[1] - 1)
input_i = self.input[roi_batch_id]
for i_w in range(self.sample_per_part):
for i_h in range(self.sample_per_part):
w_sample = wstart + i_w * sub_bin_size_w
h_sample = hstart + i_h * sub_bin_size_h
if (
w_sample < -0.5
or w_sample > self.width - 0.5
or h_sample < -0.5
or h_sample > self.height - 0.5
):
continue
w_sample = min(max(w_sample, 0.0), self.width - 1.0)
h_sample = min(max(h_sample, 0.0), self.height - 1.0)
c_sample = int(
(ctop * self.group_size[0] + g_h) * self.group_size[1]
+ g_w
)
val = self.dmc_bilinear(
input_i[c_sample], h_sample, w_sample
)
sum = sum + val
num_sample = num_sample + 1
if num_sample == 0:
self.out[n_out][ctop][p_h][p_w] = 0
else:
self.out[n_out][ctop][p_h][p_w] = sum / num_sample
self.top_count[n_out][ctop][p_h][p_w] = num_sample
def setUp(self):
self.op_type = "deformable_psroi_pooling"
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['Input'], 'Output')
class TestDeformablePSROIPoolOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input1 = fluid.data(
name="input1", shape=[2, 192, 64, 64], dtype='float32'
)
rois1 = fluid.data(
name="rois1", shape=[-1, 4], dtype='float32', lod_level=1
)
trans1 = fluid.data(
name="trans1", shape=[2, 384, 64, 64], dtype='float32'
)
# The `input` must be Variable and the data type of `input` Tensor must be one of float32 and float64.
def test_input_type():
fluid.layers.deformable_roi_pooling(
input=[3, 4],
rois=rois1,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True,
)
self.assertRaises(TypeError, test_input_type)
def test_input_tensor_dtype():
input2 = fluid.data(
name="input2", shape=[2, 192, 64, 64], dtype='int32'
)
fluid.layers.deformable_roi_pooling(
input=input2,
rois=rois1,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True,
)
self.assertRaises(TypeError, test_input_tensor_dtype)
# The `rois` must be Variable and the data type of `rois` Tensor must be one of float32 and float64.
def test_rois_type():
fluid.layers.deformable_roi_pooling(
input=input1,
rois=2,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True,
)
self.assertRaises(TypeError, test_rois_type)
def test_rois_tensor_dtype():
rois2 = fluid.data(
name="rois2", shape=[-1, 4], dtype='int32', lod_level=1
)
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois2,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True,
)
self.assertRaises(TypeError, test_rois_tensor_dtype)
# The `trans` must be Variable and the data type of `trans` Tensor must be one of float32 and float64.
def test_trans_type():
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois1,
trans=[2],
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True,
)
self.assertRaises(TypeError, test_trans_type)
def test_trans_tensor_dtype():
trans2 = fluid.data(
name="trans2", shape=[2, 384, 64, 64], dtype='int32'
)
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois1,
trans=trans2,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True,
)
self.assertRaises(TypeError, test_trans_tensor_dtype)
# The `group_size` must be one of list and tuple.
# Each element must be int.
def test_group_size_type():
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois1,
trans=trans1,
group_size=1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True,
)
self.assertRaises(TypeError, test_group_size_type)
# The `part_size` must be one of list, tuple and None.
# Each element must be int.
def test_part_size_type():
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois1,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=8,
sample_per_part=4,
position_sensitive=True,
)
self.assertRaises(TypeError, test_part_size_type)
if __name__ == '__main__':
unittest.main()
......@@ -408,9 +408,7 @@ class TestDygraphGradientClip(unittest.TestCase):
def test_gradient_clip(self):
with fluid.dygraph.guard():
linear = paddle.nn.Linear(5, 5)
inputs = fluid.layers.uniform_random(
[16, 5], min=-10, max=10
).astype('float32')
inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
out = linear(fluid.dygraph.to_variable(inputs))
loss = paddle.mean(out)
loss.backward()
......@@ -552,9 +550,9 @@ class TestDygraphGradientClipFP16(unittest.TestCase):
models=model, optimizers=sgd_optimizer, level='O2'
)
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
inputs = fluid.layers.uniform_random(
[1, 5], min=-10, max=10
).astype('float32')
inputs = paddle.uniform([1, 5], min=-10, max=10).astype(
'float32'
)
with paddle.amp.auto_cast(level='O2'):
out = model(fluid.dygraph.to_variable(inputs))
loss = paddle.mean(out)
......@@ -600,9 +598,7 @@ class TestDygraphGradientClipFP16(unittest.TestCase):
class TestDygraphGradientClipFP64(unittest.TestCase):
def test_gradient_clip(self):
with fluid.dygraph.guard():
inputs = fluid.layers.uniform_random(
[16, 5], min=-10, max=10
).astype('float32')
inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
linear = paddle.nn.Linear(5, 5)
out = linear(fluid.dygraph.to_variable(inputs))
loss = paddle.mean(out)
......
......@@ -1843,7 +1843,7 @@ class TestLayer(LayerTest):
def test_shard_index(self):
with self.static_graph():
x = fluid.layers.data(name="label", shape=[4, 1], dtype='int64')
shard_label = fluid.layers.shard_index(
shard_label = paddle.shard_index(
input=x, index_num=20, nshards=2, shard_id=0
)
......@@ -2342,7 +2342,7 @@ class TestBook(LayerTest):
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.mish(input, name='mish')
out = paddle.nn.functional.mish(input, name='mish')
return out
def make_cross_entropy(self):
......@@ -2794,7 +2794,7 @@ class TestBook(LayerTest):
def test_unfold(self):
with self.static_graph():
x = layers.data(name='x', shape=[3, 20, 20], dtype='float32')
out = layers.unfold(x, [3, 3], 1, 1, 1)
out = paddle.nn.functional.unfold(x, [3, 3], 1, 1, 1)
return out
def test_partial_concat(self):
......@@ -2809,40 +2809,6 @@ class TestBook(LayerTest):
)
return concat1, concat2
def test_deform_roi_pooling(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = layers.data(
name='input',
shape=[2, 3, 32, 32],
dtype='float32',
append_batch_size=False,
)
rois = layers.data(
name="rois", shape=[4], dtype='float32', lod_level=1
)
trans = layers.data(
name="trans",
shape=[2, 3, 32, 32],
dtype='float32',
append_batch_size=False,
)
out = layers.deformable_roi_pooling(
input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
)
return out
def test_addmm(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
......
......@@ -35,23 +35,17 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.enable_dygraph()
gen = paddle.seed(12312321111)
x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0)
x = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
st1 = gen.get_state()
x1 = fluid.layers.uniform_random(
[10], dtype="float32", min=0.0, max=1.0
)
x1 = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
gen.set_state(st1)
print(gen.get_state())
x2 = fluid.layers.uniform_random(
[10], dtype="float32", min=0.0, max=1.0
)
x2 = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
paddle.seed(12312321111)
x3 = fluid.layers.uniform_random(
[10], dtype="float32", min=0.0, max=1.0
)
x3 = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
x_np = x.numpy()
x1_np = x1.numpy()
......@@ -72,8 +66,8 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1 = fluid.layers.uniform_random(shape=[3, 4])
result_2 = fluid.layers.uniform_random(shape=[3, 4])
result_1 = paddle.uniform(shape=[3, 4])
result_2 = paddle.uniform(shape=[3, 4])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
......@@ -102,15 +96,11 @@ class TestGeneratorSeed(unittest.TestCase):
gen = paddle.seed(111111111)
st = gen.get_state()
# x = np.arange(1,101).reshape(2,50).astype("float32")
x = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
y = fluid.layers.dropout(x, 0.5)
gen.manual_seed(111111111)
# gen.set_state(st)
x1 = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x1 = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
y1 = fluid.layers.dropout(x1, 0.5)
y_np = y.numpy()
y1_np = y1.numpy()
......@@ -129,7 +119,7 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
x_1 = fluid.layers.uniform_random(shape=[2, 10])
x_1 = paddle.uniform(shape=[2, 10])
y_1 = fluid.layers.dropout(x_1, 0.5)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
......@@ -235,8 +225,8 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1 = fluid.layers.uniform_random(shape=[3, 4])
result_2 = fluid.layers.uniform_random(shape=[3, 4])
result_1 = paddle.uniform(shape=[3, 4])
result_2 = paddle.uniform(shape=[3, 4])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
......@@ -384,7 +374,7 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
x = fluid.layers.uniform_random(shape=[2, 10])
x = paddle.uniform(shape=[2, 10])
result_1 = fluid.layers.fc(
input=x,
size=10,
......
......@@ -263,7 +263,7 @@ class TestRegularizer(unittest.TestCase):
regularizer=paddle.regularizer.L1Decay()
)
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.layers.uniform_random([2, 2, 3])
x = paddle.uniform([2, 2, 3])
out = fluid.layers.fc(x, 5, param_attr=fc_param_attr)
loss = paddle.sum(out)
sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
......
......@@ -173,7 +173,7 @@ class TestRegularizer(unittest.TestCase):
regularizer=paddle.regularizer.L1Decay()
)
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.layers.uniform_random([2, 2, 3])
x = paddle.uniform([2, 2, 3])
out = fluid.layers.fc(x, 5, param_attr=fc_param_attr)
loss = paddle.sum(out)
sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
......
......@@ -15,6 +15,7 @@
import unittest
import paddle
from paddle.fluid.layers.utils import try_set_static_shape_tensor
class StaticShapeInferrenceTest(unittest.TestCase):
......@@ -24,7 +25,8 @@ class StaticShapeInferrenceTest(unittest.TestCase):
name="x", shape=[-1, 2], dtype='float32'
)
shape = paddle.shape(data) # shape should be [-1, 2]
x = paddle.fluid.layers.uniform_random(shape)
x = paddle.uniform(shape)
try_set_static_shape_tensor(x, shape)
self.assertEqual(x.shape, data.shape)
paddle.disable_static()
......
......@@ -66,7 +66,7 @@ class TestLayersUnbind(unittest.TestCase):
def test_layers_unbind(self):
x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1')
[out_0, out_1] = fluid.layers.unbind(input=x_1, axis=0)
[out_0, out_1] = paddle.unbind(input=x_1, axis=0)
input_1 = np.random.random([2, 3]).astype("float32")
axis = fluid.data(shape=[1], dtype='int32', name='axis')
exe = fluid.Executor(place=fluid.CPUPlace())
......
......@@ -162,23 +162,6 @@ class TestUniformRandomOpBF16SelectedRowsWithDiagInit(
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01)
class TestUniformRandomOpBF16AttrTensorAPI(unittest.TestCase):
def test_attr_tensor_API(self):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
dim_tensor = fluid.layers.fill_constant([1], "int64", 3)
ret = fluid.layers.nn.uniform_random(
[1, dim_tensor, 2], dtype=np.uint16
)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
outs = exe.run(train_program, fetch_list=[ret])
class TestUniformRandomOpAPISeed(unittest.TestCase):
def test_attr_tensor_API(self):
_seed = 10
......@@ -189,12 +172,8 @@ class TestUniformRandomOpAPISeed(unittest.TestCase):
_min = 5
_max = 10
ret = fluid.layers.nn.uniform_random(
[2, 3, 2], min=_min, max=_max, seed=_seed
)
ret_2 = fluid.layers.nn.uniform_random(
[2, 3, 2], min=_min, max=_max, seed=_seed
)
ret = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed)
ret_2 = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed)
res = paddle.equal(ret, ret_2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
......
......@@ -199,26 +199,18 @@ class TestUniformRandomOpError(unittest.TestCase):
x1 = fluid.create_lod_tensor(
np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace()
)
fluid.layers.uniform_random(x1)
paddle.uniform(x1)
self.assertRaises(TypeError, test_Variable)
def test_Variable2():
x1 = np.zeros((4, 784))
fluid.layers.uniform_random(x1)
paddle.uniform(x1)
self.assertRaises(TypeError, test_Variable2)
def test_dtype():
x2 = fluid.layers.data(
name='x2', shape=[4, 784], dtype='float32'
)
fluid.layers.uniform_random(x2, 'int32')
self.assertRaises(TypeError, test_dtype)
def test_out_dtype():
out = fluid.layers.uniform_random(shape=[3, 4], dtype='float64')
out = paddle.uniform(shape=[3, 4], dtype='float64')
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_out_dtype()
......@@ -323,7 +315,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase):
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
dim_tensor = fluid.layers.fill_constant([1], "int64", 3)
ret = fluid.layers.nn.uniform_random([1, dim_tensor, 2])
ret = paddle.uniform([1, dim_tensor, 2])
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
......@@ -339,7 +331,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase):
with fluid.program_guard(train_program, startup_program):
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 2)
ret = fluid.layers.nn.uniform_random([1, dim_1, dim_2])
ret = paddle.uniform([1, dim_1, dim_2])
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
......@@ -354,7 +346,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase):
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
shape = fluid.data(name='shape_tensor', shape=[2], dtype="int32")
ret = fluid.layers.nn.uniform_random(shape)
ret = paddle.uniform(shape)
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
......@@ -377,12 +369,8 @@ class TestUniformRandomOp_API_seed(unittest.TestCase):
_min = 5
_max = 10
ret = fluid.layers.nn.uniform_random(
[2, 3, 2], min=_min, max=_max, seed=_seed
)
ret_2 = fluid.layers.nn.uniform_random(
[2, 3, 2], min=_min, max=_max, seed=_seed
)
ret = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed)
ret_2 = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed)
res = paddle.equal(ret, ret_2)
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
......@@ -464,9 +452,7 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase):
class TestUniformRandomDygraphMode(unittest.TestCase):
def test_check_output(self):
with fluid.dygraph.guard():
x = fluid.layers.uniform_random(
[10], dtype="float32", min=0.0, max=1.0
)
x = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
x_np = x.numpy()
for i in range(10):
self.assertTrue((x_np[i] > 0 and x_np[i] < 1.0))
......@@ -698,9 +684,7 @@ class TestUniformMinMaxTensor(UnittestBase):
min_v = paddle.to_tensor([0.1])
max_v = paddle.to_tensor([0.9])
y = paddle.uniform([2, 3, 10], min=min_v, max=max_v)
z = paddle.fluid.layers.uniform_random(
[2, 3, 10], min=min_v, max=max_v
)
z = paddle.uniform([2, 3, 10], min=min_v, max=max_v)
out = feat + y + z
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册