未验证 提交 93027d9f 编写于 作者: H heyanru 提交者: GitHub

[Fluid Clean] remove nn.topk, nn.ctc_greedy_decoder, nn.im2sequence,...

[Fluid Clean] remove nn.topk, nn.ctc_greedy_decoder, nn.im2sequence, nn.multiplex, nn.smooth_l1 (#48289)
上级 d6aa0d43
......@@ -475,7 +475,7 @@ void SwapDim1And2InNarrow(const phi::GPUContext& d,
CeilOrFloor<int, false>(input_long_edge, proposed_tile_long_edge) *
proposed_tile_long_edge;
int num_full_tiles =
int num_full_tiles =
CeilOrFloor<int, false>(input_long_edge, proposed_tile_long_edge);
float cost = num_wasted_threads;
......
......@@ -1688,7 +1688,8 @@ def ssd_loss(
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
smooth_l1_loss = paddle.nn.loss.SmoothL1Loss()
loc_loss = smooth_l1_loss(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
......
......@@ -72,16 +72,11 @@ __all__ = [
'batch_norm',
'dropout',
'split',
'ctc_greedy_decoder',
'l2_normalize',
'matmul',
'topk',
'im2sequence',
'row_conv',
'multiplex',
'layer_norm',
'spectral_norm',
'smooth_l1',
'one_hot',
'autoincreased_step_counter',
'unsqueeze',
......@@ -2751,421 +2746,6 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
return out
def topk(input, k, name=None):
"""
:alias_main: paddle.topk
:alias: paddle.topk,paddle.tensor.topk,paddle.tensor.search.topk
:old_api: paddle.fluid.layers.topk
This OP is used to find values and indices of the k largest entries
for the last dimension.
If the input is a 1-D Tensor, finds the k largest entries and outputs
their values and indices.
If the input is a Tensor with higher rank, this operator computes the top k
entries along the last dimension.
.. code-block:: text
Case 1:
Input:
input.shape = [3, 4]
input.data = [[5, 4, 2, 3],
[9, 7, 10, 25],
[6, 2, 10, 1]]
k = 2
Output:
The first output:
values.shape = [3, 2]
values.data = [[5, 4],
[10, 25],
[6, 10]]
The second output:
indices.shape = [3, 2]
indices.data = [[0, 1],
[2, 3],
[0, 2]]
Args:
input(Variable): The input tensor. Support data types: float32, float64.
k(int | Variable): The number of top elements to look for along the last dimension
of input tensor.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Values (Variable): Input tensor's k largest elements along each last dimensional slice. The dimension is: :math:`input.shape[:-1]+[k]`.
Indices (Variable): Indices of k largest elements alone the last dimension of input. The dimension is same as values.
Raises:
ValueError: If :math:`k < 1` or :math:`k > last dimension of input`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
input = fluid.data(name="input", shape=[None, 13, 11], dtype='float32')
top5_values, top5_indices = layers.topk(input, k=5) # top5_values.shape[None, 13, 5], top5_indices.shape=[None, 13, 5]
# 1D Tensor
input1 = fluid.data(name="input1", shape=[None, 13], dtype='float32')
top5_values, top5_indices = layers.topk(input1, k=5) #top5_values.shape=[None, 5], top5_indices.shape=[None, 5]
# k=Variable
input2 = fluid.data(name="input2", shape=[None, 13, 11], dtype='float32')
vk = fluid.data(name="vk", shape=[None, 1], dtype='int32') # save k in vk.data[0]
vk_values, vk_indices = layers.topk(input2, k=vk) #vk_values.shape=[None, 13, k], vk_indices.shape=[None, 13, k]
"""
if _non_static_mode():
_k = k.numpy().item(0) if isinstance(k, Variable) else k
out, indices = _legacy_C_ops.top_k(input, 'k', _k)
out.stop_gradient = True
indices.stop_gradient = True
return out, indices
inputs = {"X": [input]}
attrs = {}
if isinstance(k, Variable):
inputs['K'] = [k]
else:
attrs = {'k': k}
helper = LayerHelper("top_k", **locals())
values = helper.create_variable_for_type_inference(dtype=input.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="top_k",
inputs=inputs,
outputs={"Out": [values], "Indices": [indices]},
attrs=attrs,
)
values.stop_gradient = True
indices.stop_gradient = True
return values, indices
def ctc_greedy_decoder(
input, blank, input_length=None, padding_value=0, name=None
):
r"""
This op is used to decode sequences by greedy policy by the following steps:
1. Get the indexes of maximum value for each row in input. a.k.a.
numpy.argmax(input, axis=0).
2. For each sequence in result of step1, merge repeated tokens between two
blanks and delete all blanks.
This op is implemented in two modes: lod and padding, either of them can be used.
The input can be either LoDTensor or Tensor, corresponding to lod and padding
mode respectively.
A simple example as below:
.. code-block:: text
Given:
(1) for lod mode:
input.data = [[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1],
[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]
input.lod = [[4, 4]]
Computation:
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]]
step2: merge repeated tokens and remove blank which is 0. Then we get first output sequence:
[[2], [1]]
Finally:
output.data = [[2],
[1],
[3]]
output.lod = [[2, 1]]
(2) for padding mode:
input.data = [[[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1]],
[[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]]
input_length.data = [[4], [4]]
input.shape = [2, 4, 4]
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1]
step2: Change the argmax result to use padding mode, then argmax result is
[[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]]
step3: Apply ctc_align to padding argmax result, padding_value is 0
Finally:
output.data = [[2, 1, 0, 0],
[3, 0, 0, 0]]
output_length.data = [[2], [1]]
Parameters:
input(Variable): the probabilities of variable-length sequences. When in lod mode,
it is a 2-D LoDTensor with LoD information. It's shape is [Lp, num_classes + 1]
where Lp is the sum of all input sequences' length and
num_classes is the true number of classes. When in padding mode,
it is a 3-D Tensor with padding, It's shape is [batch_size, N, num_classes + 1].
(not including the blank label). The data type can be float32 or float64.
blank(int): the blank label index of Connectionist Temporal
Classification (CTC) loss, which is in the half-opened
interval [0, num_classes + 1).
input_length(Variable, optional): 2-D LoDTensor, shape is [batch_size, 1], data type is int64.
It is used for padding mode. In lod mode, input_length is None.
padding_value(int): padding value.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
For lod mode, returns the result of CTC greedy decoder, 2-D LoDTensor, shape is [Lp, 1], \
data type is int64. 'Lp' is the sum of all output sequences' length. If all the sequences \
in result were empty, the result LoDTensor will be [-1] with empty \
LoD [[]].
For padding mode, returns a tuple of (output, output_length), which was described as below:
output, 2-D Tensor, shape is [batch_size, N], data type is int64.
output_length, 2-D Tensor, shape is [batch_size, 1], data type is int64. It is the length of \
each sequence of output for padding mode.
Return type:
For lod mode: Variable
For padding mode: tuple of two Variables (output, output_length).
Examples:
.. code-block:: python
# for lod mode
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1)
cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
# for padding mode
x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32')
x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64')
out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0,
input_length=x_pad_len)
"""
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'ctc_greedy_decoder'
)
helper = LayerHelper("ctc_greedy_decoder", **locals())
_, topk_indices = topk(input, k=1)
# ctc align op
ctc_out = helper.create_variable_for_type_inference(dtype="int64")
if input_length is None:
helper.append_op(
type="ctc_align",
inputs={"Input": [topk_indices]},
outputs={"Output": [ctc_out]},
attrs={"merge_repeated": True, "blank": blank},
)
return ctc_out
else:
ctc_out_len = helper.create_variable_for_type_inference(dtype="int64")
ctc_input = paddle.squeeze(topk_indices, [2])
helper.append_op(
type="ctc_align",
inputs={"Input": [ctc_input], "InputLength": [input_length]},
outputs={"Output": [ctc_out], "OutputLength": [ctc_out_len]},
attrs={
"merge_repeated": True,
"blank": blank,
"padding_value": padding_value,
},
)
return ctc_out, ctc_out_len
def im2sequence(
input,
filter_size=1,
stride=1,
padding=0,
input_image_size=None,
out_stride=1,
name=None,
):
r"""
:api_attr: Static Graph
Extracts image patches from the input tensor to form a tensor of shape
{input.batch_size * output_height * output_width, filter_size_height *
filter_size_width * input.channels}. This op use filter to scan images
and convert these images to sequences. After expanding, the number of time step are
output_height * output_width for an image, in which output_height and
output_width are calculated by below equation:
.. math::
output\_height = 1 + \
(padding\_up + padding\_down + input\_height - filter\_size\_height + stride\_height - 1) / stride\_height \\\\
output\_width = 1 + \
(padding\_left + padding\_right + input\_width - filter\_size\_width + stride\_width - 1) / stride\_width
And the dimension of each time step is filter_size_height * filter_size_width * input.channels.
Parameters:
input (Variable): The input should be a 4-D Tensor in :math:`NCHW` format. The data type is float32.
filter_size(int32 | List[int32]): The filter size. If filter_size is a List,
it must contain two integers, :math:`[filter\_size\_height, filter\_size\_width]` .
Otherwise, the filter size will be a square :math:`[filter\_size, filter\_size]` . Default is 1.
stride(int32 | List[int32]): The stride size. If stride is a List, it must
contain two integers, :math:`[stride\_height, stride\_width]` . Otherwise, the stride size will be a square :math:`[stride\_size, stride\_size]` . Default is 1.
padding(int32 | List[int32]): The padding size. If padding is a List, it can
contain four integers like :math:`[padding\_up, padding\_left, padding\_down, padding\_right]` to indicate
paddings of four direction. Or it can contain two integers :math:`[padding\_height, padding\_width]` which means
padding_up = padding_down = padding_height and
padding_left = padding_right = padding_width. Otherwise, a scalar padding means
padding_up = padding_down = padding_left = padding_right = padding.
Default is 0.
input_image_size(Variable, optional): the input contains image real size.It's dim
is :math:`[batchsize, 2]` . It is just for batch inference when not None. Default is None.
out_stride(int32 | List[int32]): The scaling of image through CNN. It is valid only when input_image_size is not None.
If out_stride is List, it must contain two integers,
:math:`[out\_stride\_height, out\_stride\_W]` . Otherwise,
the out_stride_height = out_stride_width = out_stride. Default is 1.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
The output is a 2-D LoDTensor with shape {input.batch\_size * output\_height * output\_width, \
filter\_size\_height * filter\_size\_width * input.channels}. The data type is float32.
Return Type: Variable
Examples:
.. code-block:: text
Given:
x = [[[[ 6. 2. 1.]
[ 8. 3. 5.]
[ 0. 2. 6.]]
[[ 2. 4. 4.]
[ 6. 3. 0.]
[ 6. 4. 7.]]]
[[[ 6. 7. 1.]
[ 5. 7. 9.]
[ 2. 4. 8.]]
[[ 1. 2. 1.]
[ 1. 3. 5.]
[ 9. 0. 8.]]]]
x.dims = {2, 2, 3, 3}
And:
filter = [2, 2]
stride = [1, 1]
padding = [0, 0]
Then:
output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.]
[ 2. 1. 3. 5. 4. 4. 3. 0.]
[ 8. 3. 0. 2. 6. 3. 6. 4.]
[ 3. 5. 2. 6. 3. 0. 4. 7.]
[ 6. 7. 5. 7. 1. 2. 1. 3.]
[ 7. 1. 7. 9. 2. 1. 3. 5.]
[ 5. 7. 2. 4. 1. 3. 9. 0.]
[ 7. 9. 4. 8. 3. 5. 0. 8.]]
output.dims = {8, 8}
output.lod = [[4, 4]]
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 32, 32],
dtype='float32')
output = fluid.layers.im2sequence(
input=data, stride=[1, 1], filter_size=[2, 2])
"""
assert (
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(input, 'input', ['float32'], 'im2sequence')
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if isinstance(stride, int):
stride = [stride, stride]
if isinstance(padding, int):
padding = [padding, padding]
if len(padding) == 2:
padding.append(padding[0])
padding.append(padding[1])
inputs = {"X": input}
attrs = {"kernels": filter_size, "strides": stride, "paddings": padding}
if input_image_size:
if isinstance(out_stride, int):
out_stride = [out_stride, out_stride]
inputs["Y"] = input_image_size
attrs["out_stride"] = out_stride
helper = LayerHelper('im2sequence', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs
)
return out
@templatedoc()
def row_conv(input, future_context_size, param_attr=None, act=None):
"""
......@@ -3214,165 +2794,6 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
return helper.append_activation(out)
@templatedoc()
def multiplex(inputs, index, name=None):
"""
Based on the given index parameter, the OP selects a specific row from each input Tensor to construct the output Tensor.
If the input of this OP contains :math:`m` Tensors, where :math:`I_{i}` means the i-th input Tensor, :math:`i` between :math:`[0,m)` .
And :math:`O` means the output, where :math:`O[i]` means the i-th row of the output, then the output satisfies that :math:`O[i] = I_{index[i]}[i]` .
For Example:
.. code-block:: text
Given:
inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]],
[[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]],
[[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]],
[[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]]
index = [[3],[0],[1],[2]]
out = [[3,0,3,4], # out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4]
[0,1,3,4], # out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4]
[1,2,4,2], # out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2]
[2,3,3,4]] # out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4]
Args:
inputs (list): The input Tensor list. The list elements are N-D Tensors of data types float32, float64, int32, int64. All input Tensor shapes should be the same and rank must be at least 2.
index (Tensor): Used to select some rows in the input Tensor to construct an index of the output Tensor. It is a 2-D Tensor with data type int32 or int64 and shape [M, 1], where M is the number of input Tensors.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: Output of multiplex OP, with data type being float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle
import numpy as np
img1 = np.array([[1, 2], [3, 4]]).astype(np.float32)
img2 = np.array([[5, 6], [7, 8]]).astype(np.float32)
inputs = [paddle.to_tensor(img1), paddle.to_tensor(img2)]
index = paddle.to_tensor(np.array([[1], [0]]).astype(np.int32))
res = paddle.multiplex(inputs, index)
print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
"""
if _in_legacy_dygraph():
return _legacy_C_ops.multiplex(index, inputs)
if in_dygraph_mode():
return _C_ops.multiplex(inputs, index)
helper = LayerHelper('multiplex', **locals())
check_type(inputs, 'inputs', (list), 'multiplex')
if len(inputs) < 2:
raise ValueError(
"inputs should be a list object with at least 2 elements."
)
for id, x in enumerate(inputs):
check_variable_and_dtype(
x,
'input[' + str(id) + ']',
['float32', 'float64', 'int32', 'int64'],
'multiplex',
)
check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex')
out = helper.create_variable_for_type_inference(inputs[0].dtype)
helper.append_op(
type='multiplex',
inputs={'X': inputs, 'Ids': index},
outputs={'Out': [out]},
)
return out
def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
"""
This layer computes the smooth L1 loss for Variable :attr:`x` and :attr:`y`.
It takes the first dimension of :attr:`x` and :attr:`y` as batch size.
For each instance, it computes the smooth L1 loss element by element first
and then sums all the losses. So the shape of output Variable is
[batch_size, 1].
Args:
x (Variable): A tensor with rank at least 2. The input value of smooth
L1 loss op with shape [batch_size, dim1, ..., dimN].
A LoDTensor or Tensor with type float32.
y (Variable): A tensor with rank at least 2. The target value of smooth
L1 loss op with same shape as :attr:`x`.
A LoDTensor or Tensor with type float32.
inside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the result of (:attr:`x` - :attr:`y`) will be multiplied
by this tensor element by element.
A Tensor with type float32.
outside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the out smooth L1 loss will be multiplied by this tensor
element by element.
A Tensor with type float32.
sigma (float|None): Hyper parameter of smooth L1 loss layer. A float
scalar with default value 1.0.
Returns:
Variable: The output smooth L1 loss with shape [batch_size, 1]. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
data = fluid.data(name="x", shape=[-1, 3], dtype="float32")
label = fluid.data(name="y", shape=[-1, 3], dtype="float32")
result = fluid.layers.smooth_l1(data,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,3).astype("float32")
y = np.random.rand(3,3).astype("float32")
output= exe.run(feed={"x":x, "y":y},
fetch_list=[result])
print(output)
#[array([[0.08220536],
# [0.36652038],
# [0.20541131]], dtype=float32)]
"""
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'smooth_l1_loss')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'smooth_l1_loss')
helper = LayerHelper('smooth_l1_loss', **locals())
diff = helper.create_variable_for_type_inference(dtype=x.dtype)
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='smooth_l1_loss',
inputs={
'X': x,
'Y': y,
'InsideWeight': inside_weight,
'OutsideWeight': outside_weight,
},
outputs={'Diff': diff, 'Out': loss},
attrs={'sigma': sigma if sigma is not None else 1.0},
)
return loss
@deprecated(since='2.0.0', update_to='paddle.nn.functional.one_hot')
def one_hot(input, depth, allow_out_of_range=False):
"""
......
......@@ -1833,8 +1833,8 @@ def fast_decode(
)
logits = paddle.reshape(logits, (-1, trg_vocab_size))
topk_scores, topk_indices = layers.topk(
input=paddle.nn.functional.softmax(logits), k=beam_size
topk_scores, topk_indices = paddle.topk(
x=paddle.nn.functional.softmax(logits), k=beam_size
)
accu_scores = layers.elementwise_add(
x=paddle.log(topk_scores),
......
......@@ -459,9 +459,7 @@ class BaseModel(fluid.dygraph.Layer):
scores = paddle.reshape(
log_probs, [-1, self.beam_size * self.tar_vocab_size]
)
topk_scores, topk_indices = fluid.layers.topk(
input=scores, k=self.beam_size
)
topk_scores, topk_indices = paddle.topk(x=scores, k=self.beam_size)
beam_indices = paddle.floor_divide(topk_indices, vocab_size_tensor)
token_indices = paddle.remainder(topk_indices, vocab_size_tensor)
......
......@@ -853,9 +853,7 @@ class Transformer(Layer):
log_probs, [-1, beam_size * self.trg_vocab_size]
)
scores = log_probs
topk_scores, topk_indices = fluid.layers.topk(
input=scores, k=beam_size
)
topk_scores, topk_indices = paddle.topk(x=scores, k=beam_size)
beam_indices = paddle.floor_divide(topk_indices, vocab_size_tensor)
token_indices = paddle.remainder(topk_indices, vocab_size_tensor)
......
......@@ -31,7 +31,7 @@ class TestTopKOp(IPUOpTest):
self.set_op_attrs()
def set_test_op(self):
self.op = paddle.fluid.layers.topk
self.op = paddle.topk
def set_data_feed(self):
data = np.random.uniform(size=[3, 5])
......
......@@ -138,22 +138,5 @@ class TestSmoothL1LossOp2(OpTest):
)
class TestSmoothL1LossOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
# The input type of accuracy_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.NPUPlace(0)
)
y1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.NPUPlace(0)
)
self.assertRaises(TypeError, fluid.layers.smooth_l1, x1, y1)
# The input dtype of accuracy_op must be float32 or float64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
y2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
self.assertRaises(TypeError, fluid.layers.smooth_l1, x2, y2)
if __name__ == '__main__':
unittest.main()
......@@ -312,7 +312,7 @@ class TestBeamSearchOpError(unittest.TestCase):
name='pre_scores', shape=[1], lod_level=2, dtype='float32'
)
probs = fluid.data(name='probs', shape=[10000], dtype='float32')
topk_scores, topk_indices = fluid.layers.topk(probs, k=4)
topk_scores, topk_indices = paddle.topk(probs, k=4)
accu_scores = fluid.layers.elementwise_add(
x=paddle.log(x=topk_scores),
y=paddle.reshape(pre_scores, shape=[-1]),
......
......@@ -18,7 +18,6 @@ import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
def CTCAlign(input, lod, blank, merge_repeated, padding=0, input_length=None):
......@@ -226,50 +225,6 @@ class TestCTCAlignOpCase5(TestCTCAlignPaddingOp):
)
class TestCTCAlignOpApi(unittest.TestCase):
def test_api(self):
x = fluid.layers.data('x', shape=[4], dtype='float32')
y = fluid.layers.ctc_greedy_decoder(x, blank=0)
x_pad = fluid.layers.data('x_pad', shape=[4, 4], dtype='float32')
x_pad_len = fluid.layers.data('x_pad_len', shape=[1], dtype='int64')
y_pad, y_pad_len = fluid.layers.ctc_greedy_decoder(
x_pad, blank=0, input_length=x_pad_len
)
place = fluid.CPUPlace()
x_tensor = fluid.create_lod_tensor(
np.random.rand(8, 4).astype("float32"), [[4, 4]], place
)
x_pad_tensor = np.random.rand(2, 4, 4).astype("float32")
x_pad_len_tensor = np.array([[4], [4]]).reshape([2, 1]).astype("int64")
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
ret = exe.run(
feed={
'x': x_tensor,
'x_pad': x_pad_tensor,
'x_pad_len': x_pad_len_tensor,
},
fetch_list=[y, y_pad, y_pad_len],
return_numpy=False,
)
class BadInputTestCTCAlignr(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
x = fluid.layers.data(name='x', shape=[8], dtype='int64')
cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
self.assertRaises(TypeError, test_bad_x)
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -1519,8 +1519,8 @@ class TestLayer(LayerTest):
with self.dynamic_graph():
with _test_eager_guard():
input = fluid.dygraph.to_variable(np.random.random((13, 11)))
top5_values1, top5_indices1 = layers.topk(input, k=5)
top5_values2, top5_indices2 = layers.topk(
top5_values1, top5_indices1 = paddle.topk(input, k=5)
top5_values2, top5_indices2 = paddle.topk(
input, k=fluid.dygraph.to_variable(np.array([5]))
)
np.testing.assert_array_equal(
......@@ -1531,8 +1531,8 @@ class TestLayer(LayerTest):
)
input = fluid.dygraph.to_variable(np.random.random((13, 11)))
top5_values1, top5_indices1 = layers.topk(input, k=5)
top5_values2, top5_indices2 = layers.topk(
top5_values1, top5_indices1 = paddle.topk(input, k=5)
top5_values2, top5_indices2 = paddle.topk(
input, k=fluid.dygraph.to_variable(np.array([5]))
)
np.testing.assert_array_equal(
......@@ -3104,7 +3104,7 @@ class TestBook(LayerTest):
x1 = self._get_data(name='x1', shape=[4], dtype='float32')
x2 = self._get_data(name='x2', shape=[4], dtype='float32')
index = self._get_data(name='index', shape=[1], dtype='int32')
out = layers.multiplex(inputs=[x1, x2], index=index)
out = paddle.multiplex(inputs=[x1, x2], index=index)
return out
def make_softmax_with_cross_entropy(self):
......@@ -3144,15 +3144,6 @@ class TestBook(LayerTest):
self.assertIsNotNone(loss4)
return loss4
def make_smooth_l1(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
x = self._get_data(name='x', shape=[4], dtype='float32')
y = self._get_data(name='label', shape=[4], dtype='float32')
loss = layers.smooth_l1(x, y)
return loss
def make_scatter(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
......@@ -3192,7 +3183,7 @@ class TestBook(LayerTest):
fluid.default_main_program(), fluid.default_startup_program()
):
data = self._get_data(name="label", shape=[200], dtype="float32")
values, indices = layers.topk(data, k=5)
values, indices = paddle.topk(data, k=5)
return values
return indices
......@@ -3559,20 +3550,6 @@ class TestBook(LayerTest):
)
)
def test_im2sequence(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = layers.data(name='x', shape=[3, 128, 128], dtype='float32')
y = layers.data(name='y', shape=[], dtype='float32')
output = layers.im2sequence(
input=x,
input_image_size=y,
stride=[1, 1],
filter_size=[2, 2],
out_stride=[1, 1],
)
return output
def test_lod_reset(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
......
......@@ -17,8 +17,6 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
def smooth_l1_loss_forward(val, sigma2):
abs_val = abs(val)
......@@ -124,22 +122,5 @@ class TestSmoothL1LossOp2(OpTest):
)
class TestSmoothL1LossOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
# The input type of accuracy_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace()
)
y1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace()
)
self.assertRaises(TypeError, fluid.layers.smooth_l1, x1, y1)
# The input dtype of accuracy_op must be float32 or float64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
y2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
self.assertRaises(TypeError, fluid.layers.smooth_l1, x2, y2)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册