未验证 提交 8d9fdd8b 编写于 作者: Y yunyaoXYY 提交者: GitHub

[Clean fluid] Clean maxout, space_to_depth, affine_channel, similarity_focus...

[Clean fluid] Clean maxout, space_to_depth, affine_channel, similarity_focus and add_position_encoding (#48410)

* Clean fluid maxout

* Clean fluid space_to_depth

* Clean fluid affine_channel and related tests

* Clean fluid similarity_focus and related tests

* Clean fluid add_position_encoding and related tests

* Fix code style
上级 d5387de2
...@@ -119,14 +119,9 @@ __all__ = [ ...@@ -119,14 +119,9 @@ __all__ = [
'clip_by_norm', 'clip_by_norm',
'mean', 'mean',
'mul', 'mul',
'maxout',
'space_to_depth',
'affine_channel',
'similarity_focus',
'hash', 'hash',
'grid_sampler', 'grid_sampler',
'log_loss', 'log_loss',
'add_position_encoding',
'bilinear_tensor_product', 'bilinear_tensor_product',
'merge_selected_rows', 'merge_selected_rows',
'get_tensor_from_selected_rows', 'get_tensor_from_selected_rows',
...@@ -7606,343 +7601,6 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): ...@@ -7606,343 +7601,6 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
return out return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.maxout")
@templatedoc()
def maxout(x, groups, name=None, axis=1):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
groups(int): ${groups_comment}
axis(int, optional): ${axis_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: ${out_comment}
Raises:
ValueError: If `axis` is not 1, -1 or 3.
ValueError: If the number of input channels can not be divisible by `groups`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
input = fluid.data(
name='data',
shape=[None, 256, 32, 32],
dtype='float32')
out = fluid.layers.maxout(input, groups=2)
"""
return paddle.nn.functional.maxout(**locals())
def space_to_depth(x, blocksize, name=None):
r"""
Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width]
This op rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of \
theinput LoDtensor where values from the height and width dimensions are moved to the channel \
dimension.
The attr blocksize indicates the input block size.
space_to_depth will reorganize the elements of input with shape[batch, channel, height, width] \
according to blocksize to construct output with shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]:
- Non-overlapping blocks of size block_size x block size are rearranged into depth at each location.
- The Y, X coordinates within each block of the input become the high order component of the output channel index
- channel should be divisible by square of blocksize
- height, width should be divsible by blocksize
This OP is useful for resizing the activations between convolutions \
(but keeping all data)
.. code-block:: text
Given the input x with the shape [1, 1, 4, 4]:
x.data = [[[[1, 2, 5, 6],
[3, 4, 7, 8],
[9, 10, 13, 14],
[11, 12, 15, 16]]]]
blocksize = 2
then get the output with the shape [1, 4, 2, 2]:
out.data = [[[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]],
[[13, 14], [15, 16]]]]
Args:
x (Variable): The input, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel, height, width]
blocksize (int): The blocksize to select the element on each feature map should be > 2
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns:
Tensor, The output, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import numpy as np
import paddle
paddle.enable_static()
data = fluid.data(
name='data', shape=[1, 4, 2, 2], dtype='float32')
space_to_depthed = fluid.layers.space_to_depth(
x=data, blocksize=2)
exe = fluid.Executor(fluid.CPUPlace())
data_np = np.arange(0,16).reshape((1,4,2,2)).astype('float32')
print(data_np)
#array([[[[ 0., 1.], [ 2., 3.]],
# [[ 4., 5.], [ 6., 7.]],
# [[ 8., 9.], [10., 11.]],
# [[12., 13.], [14., 15.]]]], dtype=float32)
out_main = exe.run(fluid.default_main_program(),
feed={'data': data_np},
fetch_list=[space_to_depthed])
print(out_main)
#[array([[[[ 0.]], [[ 4.]], [[ 1.]], [[ 5.]],
# [[ 8.]], [[12.]], [[ 9.]], [[13.]],
# [[ 2.]], [[ 6.]], [[ 3.]], [[ 7.]],
# [[10.]], [[14.]], [[11.]], [[15.]]]], dtype=float32)]
"""
helper = LayerHelper("space_to_depth", **locals())
if not (isinstance(blocksize, int)):
raise ValueError("blocksize must be a python Int")
check_variable_and_dtype(
x,
'x',
['float16', 'float32', 'float64', 'int32', 'int64'],
'space_to_depth',
)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="space_to_depth",
inputs={"X": x},
attrs={"blocksize": blocksize},
outputs={"Out": out},
)
return out
def affine_channel(
x, scale=None, bias=None, data_layout='NCHW', name=None, act=None
):
"""
Applies a separate affine transformation to each channel of the input.
Useful for replacing spatial batch norm with its equivalent fixed
transformation. The input also can be 2D tensor and applies a affine
transformation in second dimension.
Args:
x (Variable): Feature map input can be a 4D tensor with order NCHW
or NHWC. It also can be a 2D tensor and the affine transformation
is applied in the second dimension.The data type is float32 or float64.
scale (Variable): 1D input of shape (C), the c-th element is the scale
factor of the affine transformation for the c-th channel of
the input.The data type is float32 or float64.
bias (Variable): 1D input of shape (C), the c-th element is the bias
of the affine transformation for the c-th channel of the input.
The data type is float32 or float64.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. If input is 2D Tensor, you can ignore
data_layout.
name (str, default None): The name of this layer. For more information,
please refer to :ref:`api_guide_Name` .
act (str, default None): Activation to be applied to the output of this layer.
Returns:
Variable: A tensor which has the same shape, data layout and data type with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
import paddle.fluid as fluid
import paddle
paddle.enable_static()
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
data = fluid.data(name='data', shape=[None, 1, 2, 2], dtype='float32')
input_scale = fluid.layers.create_parameter(shape=[1], dtype="float32",
default_initializer=fluid.initializer.Constant(2.0))
input_bias = fluid.layers.create_parameter(shape=[1],dtype="float32",
default_initializer=fluid.initializer.Constant(0.5))
out = fluid.layers.affine_channel(data,scale=input_scale,
bias=input_bias)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_array] = exe.run(test_program,
fetch_list=out,
feed={'data': np.ones([1,1,2,2]).astype('float32')})
# out_array is [[[[2.5, 2.5],
# [2.5, 2.5]]]] with shape: [1, 1, 2, 2]
"""
helper = LayerHelper("affine_channel", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'affine_channel')
check_type(scale, 'scale', (Variable, type(None)), 'affine_channel')
check_type(bias, 'bias', (Variable, type(None)), 'affine_channel')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="affine_channel",
inputs={"X": x, 'Scale': scale, 'Bias': bias},
attrs={"data_layout": data_layout},
outputs={"Out": out},
)
return helper.append_activation(out)
def similarity_focus(input, axis, indexes, name=None):
r"""
SimilarityFocus Operator
Generate a similarity focus mask with the same shape of input using the following method:
1. Extract the 3-D tensor(here the first dimension is BatchSize) corresponding
to the axis according to the indexes. For example, if axis=1 and indexes=[a],
it will get the matrix T=X[:, a, :, :]. In this case, if the shape of input X
is (BatchSize, A, B, C), the shape of tensor T is (BatchSize, B, C).
2. For each index, find the largest numbers in the tensor T, so that the same
row and same column has at most one number(what it means is that if the
largest number has been found in the i-th row and the j-th column, then
the numbers in the i-th row or j-th column will be skipped. And then the
next largest number will be selected from the remaining numbers. Obviously
there will be min(B, C) numbers), and mark the corresponding position of the
3-D similarity focus mask as 1, otherwise as 0. Do elementwise-or for
each index.
3. Broadcast the 3-D similarity focus mask to the same shape of input X.
Refer to `Similarity Focus Layer <http://www.aclweb.org/anthology/N16-1108>`_
.. code-block:: text
* Example :
Given a 4-D tensor x with the shape (BatchSize, C, A, B), where C is
the number of channels and the shape of feature map is (A, B):
x.shape = (2, 3, 2, 2)
x.data = [[[[0.8, 0.1],
[0.4, 0.5]],
[[0.9, 0.7],
[0.9, 0.9]],
[[0.8, 0.9],
[0.1, 0.2]]],
[[[0.2, 0.5],
[0.3, 0.4]],
[[0.9, 0.7],
[0.8, 0.4]],
[[0.0, 0.2],
[0.4, 0.7]]]]
Given axis: 1 (the axis of the channel)
Given indexes: [0]
then we get a 4-D tensor out with the same shape of input x:
out.shape = (2, 3, 2, 2)
out.data = [[[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]]],
[[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]]]]
Args:
input(Variable): The input tensor variable(default float). It should
be a 4-D tensor with shape [BatchSize, A, B, C]. Data type is
float32 or float64.
axis(int): Indicating the dimension to be selected. It can only be
1, 2 or 3.
indexes(list): Indicating the indexes of the selected dimension.
Returns:
Variable: A tensor variable with the same shape and same type \
as the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(
name='data', shape=[-1, 3, 2, 2], dtype='float32')
fluid.layers.similarity_focus(input=data, axis=1, indexes=[0])
"""
helper = LayerHelper('similarity_focus', **locals())
# check attrs
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], "similarity_focus"
)
check_type(axis, 'axis', int, "similarity_focus")
check_type(indexes, 'indexes', list, "similarity_focus")
if axis != 1 and axis != 2 and axis != 3:
raise ValueError("axis must be 1, 2 or 3.")
if len(indexes) == 0:
raise ValueError("indexes can not be empty.")
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='similarity_focus',
inputs={'X': input},
outputs={'Out': out},
attrs={"axis": axis, "indexes": indexes},
)
return out
def hash(input, hash_size, num_hash=1, name=None): def hash(input, hash_size, num_hash=1, name=None):
""" """
...@@ -8156,76 +7814,6 @@ def log_loss(input, label, epsilon=1e-4, name=None): ...@@ -8156,76 +7814,6 @@ def log_loss(input, label, epsilon=1e-4, name=None):
return paddle.nn.functional.log_loss(input, label, epsilon, name) return paddle.nn.functional.log_loss(input, label, epsilon, name)
def add_position_encoding(input, alpha, beta, name=None):
r"""
This operator performs weighted sum of input feature at each position
(position in the sequence) and the corresponding position encoding.
For more details of position encoding, please refer to `Attention Is All You
Need <http://arxiv.org/pdf/1706.03762.pdf>`_ .
The formula is as follows:
.. math::
PE(pos, 2i) &= \\sin{(pos / 10000^{2i / P})} \\\\
PE(pos, 2i + 1) &= \\cos{(pos / 10000^{2i / P})} \\\\
Out(:, pos, i) &= \\alpha * input(:, pos, i) + \\beta * PE(pos, i)
Where:
- :math:`PE(pos, 2i)` : the value at even index `2i` for encoding of position `pos`.
- :math:`PE(pos, 2i + 1)` : the value at odd index `2i+1` for encoding of position `pos`
Args:
input(Variable): A Tensor or LoDTensor (lod level is 1). If it is a
Tensor, the shape should be `[N, M, P]`, where `N` stands for
batch size, `M` for sequence length, `P` for the size of feature
dimension. If it is a LoDTensor, the shape should be `[N, P]`,
where `N` stands for the total sequence lengths in this mini-batch,
`P` for the size of feature. The data type should be float32 or float64.
alpha(float): Indicate the weight coefficient for `input` when performing
weighted sum.
beta(float): Indicate the weight coefficient for position encoding when
performing weighted sum.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: A Tensor or LoDTensor. It has the same shape, data type and lod as `input`.
Examples:
.. code-block:: python
import paddle
tensor = paddle.randn([16, 32, 64])
position_tensor = paddle.fluid.layers.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0)
"""
if _non_static_mode():
return _legacy_C_ops.add_position_encoding(
input, "alpha", alpha, "beta", beta
)
helper = LayerHelper('add_position_encoding', **locals())
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], "add_position_encoding"
)
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type="add_position_encoding",
inputs={"X": input},
outputs={"Out": out},
attrs={"alpha": alpha, "beta": beta},
)
return out
def bilinear_tensor_product( def bilinear_tensor_product(
x, y, size, act=None, name=None, param_attr=None, bias_attr=None x, y, size, act=None, name=None, param_attr=None, bias_attr=None
): ):
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return False
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 32, 32])
self.feed_fp32 = {'data': data.astype(np.float32)}
self.feed_fp16 = {'data': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
self.attrs['data_layout'] = 'NCHW'
@IPUOpTest.static_graph
def build_model(self):
data = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
input_scale = paddle.fluid.layers.create_parameter(
shape=[self.feed_shape[0][1]], dtype="float32"
)
input_bias = paddle.fluid.layers.create_parameter(
shape=[self.feed_shape[0][1]], dtype="float32"
)
out = paddle.fluid.layers.affine_channel(
data, scale=input_scale, bias=input_bias
)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_data_feed(self):
data = np.random.uniform(size=[2, 4, 64, 64])
self.feed_fp32 = {'data': data.astype(np.float32)}
self.feed_fp16 = {'data': data.astype(np.float16)}
@unittest.skip("Only support NCHW")
class TestNHWC(TestBase):
def set_op_attrs(self):
self.attrs = {}
self.attrs['data_layout'] = 'NHWC'
def set_data_feed(self):
data = np.random.uniform(size=[2, 64, 64, 3])
self.feed_fp32 = {'data': data.astype(np.float32)}
self.feed_fp16 = {'data': data.astype(np.float16)}
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TRTAffineChannelTest(InferencePassTest):
def setUp(self):
self.bs = 2
self.channel = 8
self.height = 16
self.width = 16
self.data_layout = 'NCHW'
self.precision = AnalysisConfig.Precision.Float32
self.serialize = False
self.enable_trt = True
def build(self):
# set min_graph_size to 2,
# because affine channel doesn't support nhwc format
self.trt_parameters = InferencePassTest.TensorRTParam(
1 << 30, self.bs, 2, self.precision, self.serialize, False
)
with fluid.program_guard(self.main_program, self.startup_program):
if self.data_layout == 'NCHW':
shape = [-1, self.channel, self.height, self.width]
else:
shape = [-1, self.height, self.width, self.channel]
data = fluid.data(name='in', shape=shape, dtype='float32')
# set scale, bias by constant
scale = fluid.layers.create_parameter(
shape=[self.channel],
dtype='float32',
default_initializer=fluid.initializer.Constant(2.0),
)
bias = fluid.layers.create_parameter(
shape=[self.channel],
dtype='float32',
default_initializer=fluid.initializer.Constant(0.5),
)
affine_channel_out = fluid.layers.affine_channel(
data, scale=scale, bias=bias, data_layout=self.data_layout
)
out = fluid.layers.batch_norm(affine_channel_out, is_test=True)
shape[0] = self.bs
self.feeds = {
'in': np.random.random(shape).astype('float32'),
}
self.fetch_list = [out]
def check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
atol = 1e-5
if self.trt_parameters.precision == AnalysisConfig.Precision.Half:
atol = 2e-2
self.check_output_with_option(use_gpu, atol, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
def run_test(self):
self.build()
self.check_output()
def run_test_all(self):
precision_opt = [
AnalysisConfig.Precision.Float32,
AnalysisConfig.Precision.Half,
]
serialize_opt = [False, True]
if self.data_layout == 'NCHW':
min_shape = [
self.bs,
self.channel,
self.height // 2,
self.width // 2,
]
max_shape = [self.bs, self.channel, self.height * 2, self.width * 2]
opt_shape = [self.bs, self.channel, self.height, self.width]
if self.data_layout == 'NHWC':
min_shape = [
self.bs,
self.height // 2,
self.width // 2,
self.channel,
]
max_shape = [self.bs, self.height * 2, self.width * 2, self.channel]
opt_shape = [self.bs, self.height, self.width, self.channel]
dynamic_shape_profile = InferencePassTest.DynamicShapeParam(
{'in': min_shape}, {'in': max_shape}, {'in': opt_shape}, False
)
dynamic_shape_opt = [None, dynamic_shape_profile]
for precision, serialize, dynamic_shape in itertools.product(
precision_opt, serialize_opt, dynamic_shape_opt
):
self.precision = precision
self.serialize = serialize
self.dynamic_shape_params = dynamic_shape
self.run_test()
def test_base(self):
self.run_test()
def test_fp16(self):
self.precision = AnalysisConfig.Precision.Half
self.run_test()
def test_serialize(self):
self.serialize = True
self.run_test()
def test_dynamic(self):
self.dynamic_shape_params = InferencePassTest.DynamicShapeParam(
{'in': [self.bs, self.channel, self.height // 2, self.width // 2]},
{'in': [self.bs, self.channel, self.height * 2, self.width * 2]},
{'in': [self.bs, self.channel, self.height, self.width]},
False,
)
self.run_test()
def test_nchw_all(self):
self.run_test_all()
def test_nhwc(self):
self.data_layout = 'NHWC'
self.run_test_all()
if __name__ == "__main__":
unittest.main()
...@@ -15,9 +15,6 @@ import unittest ...@@ -15,9 +15,6 @@ import unittest
import numpy as np import numpy as np
import math import math
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
import paddle
from paddle.fluid import Program, program_guard
def add_position_encoding(input, alpha=1.0, beta=1.0): def add_position_encoding(input, alpha=1.0, beta=1.0):
...@@ -151,34 +148,5 @@ class TestAddPositionEncodingLoDTensorOp(OpTest): ...@@ -151,34 +148,5 @@ class TestAddPositionEncodingLoDTensorOp(OpTest):
start += max_length start += max_length
class TestAddPositionEncodingOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input_data = np.random.random((4, 16, 8)).astype("float32")
def test_Variable():
# the input type must be Variable
fluid.layers.add_position_encoding(
input=input_data, alpha=1.0, beta=1.0
)
self.assertRaises(TypeError, test_Variable)
class TestAddPositionEncodingOpDygraph(unittest.TestCase):
def test_dygraph(self):
paddle.disable_static()
tensor = np.random.randn(16, 32, 64)
position_tensor = paddle.fluid.layers.add_position_encoding(
input=paddle.to_tensor(tensor), alpha=1.0, beta=1.0
).numpy()
paddle.enable_static()
position_tensor_np = add_position_encoding(tensor, 1.0, 1.0)
np.testing.assert_allclose(
position_tensor, position_tensor_np, rtol=1e-05
)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,7 +18,6 @@ Unit testing for affine_channel_op ...@@ -18,7 +18,6 @@ Unit testing for affine_channel_op
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
def affine_channel(x, scale, bias, layout): def affine_channel(x, scale, bias, layout):
...@@ -65,41 +64,6 @@ class TestAffineChannelOp(OpTest): ...@@ -65,41 +64,6 @@ class TestAffineChannelOp(OpTest):
self.layout = 'NCHW' self.layout = 'NCHW'
class TestAffineChannelOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program()):
def test_x_type():
input_data = np.random.random(2, 1, 2, 2).astype("float32")
fluid.layers.affine_channel(input_data)
self.assertRaises(TypeError, test_x_type)
def test_x_dtype():
x2 = fluid.layers.data(
name='x2', shape=[None, 1, 2, 2], dtype='int32'
)
fluid.layers.affine_channel(x2)
self.assertRaises(TypeError, test_x_dtype)
def test_scale_type():
x3 = fluid.layers.data(
name='x3', shape=[None, 1, 2, 2], dtype='float32'
)
fluid.layers.affine_channel(x3, scale=1)
self.assertRaises(TypeError, test_scale_type)
def test_bias_type():
x4 = fluid.layers.data(
name='x4', shape=[None, 1, 2, 2], dtype='float32'
)
fluid.layers.affine_channel(x4, bias=1)
self.assertRaises(TypeError, test_bias_type)
class TestAffineChannelNHWC(TestAffineChannelOp): class TestAffineChannelNHWC(TestAffineChannelOp):
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 3, 100] self.shape = [2, 3, 3, 100]
......
...@@ -3220,18 +3220,6 @@ class TestBook(LayerTest): ...@@ -3220,18 +3220,6 @@ class TestBook(LayerTest):
hid = layers.fc(input=data, size=20) hid = layers.fc(input=data, size=20)
return layers.softmax(hid, axis=1) return layers.softmax(hid, axis=1)
def make_space_to_depth(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
data = self._get_data(
name='data',
shape=[32, 9, 6, 6],
append_batch_size=False,
dtype='float32',
)
return layers.space_to_depth(data, 3)
def make_get_places(self): def make_get_places(self):
with program_guard( with program_guard(
fluid.default_main_program(), fluid.default_startup_program() fluid.default_main_program(), fluid.default_startup_program()
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F import paddle.nn.functional as F
from op_test import OpTest from op_test import OpTest
...@@ -122,21 +121,6 @@ class TestMaxoutAPI(unittest.TestCase): ...@@ -122,21 +121,6 @@ class TestMaxoutAPI(unittest.TestCase):
np.testing.assert_allclose(out3_ref, out3.numpy(), rtol=1e-05) np.testing.assert_allclose(out3_ref, out3.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.maxout(x, groups=self.groups, axis=self.axis)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.fluid.layers.maxout(x, groups=self.groups, axis=self.axis)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self): def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable. # The input type must be Variable.
......
...@@ -46,73 +46,6 @@ class TestOpNameConflict(unittest.TestCase): ...@@ -46,73 +46,6 @@ class TestOpNameConflict(unittest.TestCase):
self.assertEqual(n_v[0], 8.0) self.assertEqual(n_v[0], 8.0)
self.assertEqual(p_v[0], 13.0) self.assertEqual(p_v[0], 13.0)
def test_layers(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
place = (
fluid.CUDAPlace(0)
if fluid.core.is_compiled_with_cuda()
else fluid.CPUPlace()
)
exe = fluid.Executor(place)
data = fluid.data(
name='data', shape=[None, 1, 2, 2], dtype='float32'
)
tensor = fluid.data(
name='tensor', shape=[None, 32, 64], dtype='float32'
)
x = fluid.data(
name='x', shape=[None, 1], dtype='float32', lod_level=1
)
input_scale = fluid.layers.create_parameter(
shape=[1],
dtype="float32",
default_initializer=fluid.initializer.Constant(2.0),
)
input_bias = fluid.layers.create_parameter(
shape=[1],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.5),
)
out_affine = fluid.layers.affine_channel(
data, scale=input_scale, bias=input_bias
)
out_similarity = fluid.layers.similarity_focus(
input=data, axis=1, indexes=[0]
)
position_tensor = fluid.layers.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0
)
x_reversed = fluid.layers.sequence_reverse(x)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
x_d = fluid.create_lod_tensor(
np.array([[1.1], [2.2], [3.3], [4.4]]).astype('float32'),
[[1, 3]],
place,
)
outs = exe.run(
test_program,
fetch_list=[
out_affine,
out_similarity,
position_tensor,
x_reversed,
],
feed={
data.name: np.ones([1, 1, 2, 2]).astype('float32'),
tensor.name: np.ones([1, 32, 64]).astype('float32'),
x.name: x_d,
},
return_numpy=False,
)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -15,8 +15,6 @@ ...@@ -15,8 +15,6 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestSimilarityFocusOp(OpTest): class TestSimilarityFocusOp(OpTest):
...@@ -229,35 +227,5 @@ class TestSimilarityFocusOp_axis3(OpTest): ...@@ -229,35 +227,5 @@ class TestSimilarityFocusOp_axis3(OpTest):
self.check_output() self.check_output()
class TestSimilarityFocusOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
data = fluid.data(name='data', shape=[16, 3, 2, 2], dtype='float32')
def test_input_Variable():
input = np.random.rand(16, 3, 2, 2).astype("float32")
out = fluid.layers.similarity_focus(
input=input, axis=1, indexes=[0]
)
self.assertRaises(TypeError, test_input_Variable)
def test_axis_Int():
axis = 1.0
out = fluid.layers.similarity_focus(
input=data, axis=axis, indexes=[0]
)
self.assertRaises(TypeError, test_axis_Int)
def test_indexes_List():
indexes = 0
out = fluid.layers.similarity_focus(
input=data, axis=1, indexes=indexes
)
self.assertRaises(TypeError, test_indexes_List)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -24,7 +24,6 @@ import numpy as np ...@@ -24,7 +24,6 @@ import numpy as np
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid
def affine_channel(x, scale, bias, layout): def affine_channel(x, scale, bias, layout):
...@@ -87,41 +86,6 @@ class TestAffineChannelOp(XPUOpTest): ...@@ -87,41 +86,6 @@ class TestAffineChannelOp(XPUOpTest):
self.layout = 'NCHW' self.layout = 'NCHW'
class TestAffineChannelOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program()):
def test_x_type():
input_data = np.random.random(2, 1, 2, 2).astype("float32")
fluid.layers.affine_channel(input_data)
self.assertRaises(TypeError, test_x_type)
def test_x_dtype():
x2 = fluid.layers.data(
name='x2', shape=[None, 1, 2, 2], dtype='int32'
)
fluid.layers.affine_channel(x2)
self.assertRaises(TypeError, test_x_dtype)
def test_scale_type():
x3 = fluid.layers.data(
name='x3', shape=[None, 1, 2, 2], dtype='float32'
)
fluid.layers.affine_channel(x3, scale=1)
self.assertRaises(TypeError, test_scale_type)
def test_bias_type():
x4 = fluid.layers.data(
name='x4', shape=[None, 1, 2, 2], dtype='float32'
)
fluid.layers.affine_channel(x4, bias=1)
self.assertRaises(TypeError, test_bias_type)
class TestAffineChannelNHWC(TestAffineChannelOp): class TestAffineChannelNHWC(TestAffineChannelOp):
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 3, 100] self.shape = [2, 3, 3, 100]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册