未验证 提交 2fe92990 编写于 作者: V Vvsmile 提交者: GitHub

[Clean Fluid API]Remove squeeze: use paddle.squeeze to replace paddle.fluid.layers.squeeze (#47938)

* Remove API: squeeze
	Modify the funcation all of squeeze.
	Use paddle.squeeze to replace paddle.fluid.layers.squeeze

* Remove: squeeze
	fix some modifications which are not needless to the original
format

* Fix the function call of 'squeeze' from old style to new style.

* modify the call of squeeze from old style to new style

* Modify the call of squeeze from old style to new style

* replace squeeze with paddle.squeeze

* modify the nn.py based on the review suggestion

remove a redundant comment.

* remove one comment which is not necessary
上级 4edf37d7
......@@ -93,7 +93,6 @@ __all__ = [
'smooth_l1',
'one_hot',
'autoincreased_step_counter',
'squeeze',
'unsqueeze',
'lod_reset',
'lod_append',
......@@ -4095,7 +4094,7 @@ def ctc_greedy_decoder(
return ctc_out
else:
ctc_out_len = helper.create_variable_for_type_inference(dtype="int64")
ctc_input = squeeze(topk_indices, [2])
ctc_input = paddle.squeeze(topk_indices, [2])
helper.append_op(
type="ctc_align",
......@@ -4636,105 +4635,6 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
return counter
def squeeze(input, axes, name=None):
"""
This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal
to one will be deleted.
.. code-block:: text
Case1:
Input:
X.shape = (1, 3, 1, 5)
axes = [0]
Output:
Out.shape = (3, 1, 5)
Case2:
Input:
X.shape = (1, 3, 1, 5)
axes = []
Output:
Out.shape = (3, 5)
Case3:
Input:
X.shape = [1,3,1,5]
axes = [-2]
Output:
Out.shape = [1,3,5]
Args:
input (Variable): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.
axes (list): One integer or List of integers, indicating the dimensions to be squeezed.
Axes range is :math:`[-rank(input), rank(input))`.
If axes is negative, :math:`axes=axes+rank(input)`.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Variable: Output squeezed Tensor. Data type is same as input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x = fluid.data(name='x', shape=[None, 5, 1, 10])
y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10]
"""
if in_dygraph_mode():
return _C_ops.squeeze(input, axes)
if _in_legacy_dygraph():
out, _ = _legacy_C_ops.squeeze2(input, 'axes', axes)
return out
helper = LayerHelper("squeeze", **locals())
check_variable_and_dtype(
input,
'input',
[
'float16',
'float32',
'float64',
'bool',
'int8',
'int32',
'int64',
'complex64',
'complex128',
],
'squeeze',
)
check_type(axes, 'axis/axes', (list, tuple, Variable), 'squeeze')
attrs = {}
if isinstance(axes, Variable):
axes.stop_gradient = True
attrs["axes"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
attrs["axes"] = utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="squeeze2",
inputs={"X": input},
attrs=attrs,
outputs={"Out": out, "XShape": x_shape},
)
return out
def unsqueeze(input, axes, name=None):
"""
Insert single-dimensional entries to the shape of a Tensor. Takes one
......
......@@ -2088,11 +2088,11 @@ class TrainingHelper(DecodeHelper):
def _slice(x): # TODO: use Variable.__getitem__
axes = [0 if self.time_major else 1]
return nn.squeeze(
return paddle.squeeze(
nn.slice(
x, axes=axes, starts=[next_time], ends=[next_time + 1]
),
axes=axes,
axis=axes,
)
next_inputs = map_structure(_slice, self.inputs_)
......
......@@ -295,7 +295,7 @@ class BaseModel(fluid.dygraph.Layer):
loss = fluid.layers.softmax_with_cross_entropy(
logits=dec_output, label=label, soft_label=False
)
loss = fluid.layers.squeeze(loss, axes=[2])
loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = fluid.layers.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
......@@ -811,7 +811,7 @@ class AttentionModel(fluid.dygraph.Layer):
else:
step_input = new_hidden
dec_att = self.attention(step_input, enc_outputs, enc_padding_mask)
dec_att = fluid.layers.squeeze(dec_att, [1])
dec_att = paddle.squeeze(dec_att, [1])
concat_att_out = fluid.layers.concat([dec_att, step_input], 1)
out = self.concat_fc(concat_att_out)
input_feed = out
......@@ -823,7 +823,7 @@ class AttentionModel(fluid.dygraph.Layer):
loss = fluid.layers.softmax_with_cross_entropy(
logits=dec_output, label=label, soft_label=False
)
loss = fluid.layers.squeeze(loss, axes=[2])
loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = fluid.layers.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
......
......@@ -137,7 +137,7 @@ class Conv1D(fluid.dygraph.Layer):
def forward(self, x):
x = fluid.layers.unsqueeze(input=x, axes=[2])
x = self._conv2d(x)
x = fluid.layers.squeeze(input=x, axes=[2])
x = paddle.squeeze(x, axis=[2])
return x
......@@ -275,10 +275,10 @@ class BMN(fluid.dygraph.Layer):
# TEM
xs = paddle.nn.functional.relu(self.ts_conv1(x))
xs = paddle.nn.functional.relu(self.ts_conv2(xs))
xs = fluid.layers.squeeze(xs, axes=[1])
xs = paddle.squeeze(xs, axis=[1])
xe = paddle.nn.functional.relu(self.te_conv1(x))
xe = paddle.nn.functional.relu(self.te_conv2(xe))
xe = fluid.layers.squeeze(xe, axes=[1])
xe = paddle.squeeze(xe, axis=[1])
# PEM
xp = paddle.nn.functional.relu(self.p_conv1(x))
......@@ -287,7 +287,7 @@ class BMN(fluid.dygraph.Layer):
xp = paddle.reshape(xp, shape=[0, 0, -1, self.dscale, self.tscale])
xp = self.p_conv3d1(xp)
xp = fluid.layers.squeeze(xp, axes=[2])
xp = paddle.squeeze(xp, axis=[2])
xp = paddle.nn.functional.relu(self.p_conv2d1(xp))
xp = paddle.nn.functional.relu(self.p_conv2d2(xp))
xp = paddle.nn.functional.relu(self.p_conv2d3(xp))
......@@ -411,11 +411,11 @@ def bmn_loss_func(
loss = -1 * (loss_pos + loss_neg) / num_entries
return loss
pred_bm_reg = fluid.layers.squeeze(
fluid.layers.slice(pred_bm, axes=[1], starts=[0], ends=[1]), axes=[1]
pred_bm_reg = paddle.squeeze(
fluid.layers.slice(pred_bm, axes=[1], starts=[0], ends=[1]), axis=[1]
)
pred_bm_cls = fluid.layers.squeeze(
fluid.layers.slice(pred_bm, axes=[1], starts=[1], ends=[2]), axes=[1]
pred_bm_cls = paddle.squeeze(
fluid.layers.slice(pred_bm, axes=[1], starts=[1], ends=[2]), axis=[1]
)
bm_mask = _get_mask(cfg)
......
......@@ -47,7 +47,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
out = paddle.fluid.layers.squeeze(x, **self.attrs)
out = paddle.squeeze(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
......
......@@ -4097,7 +4097,7 @@ class TestBook(LayerTest):
# TODO(minqiyang): dygraph do not support layers with param now
with self.static_graph():
x = layers.data(name='x', shape=[1, 1, 4], dtype='float32')
out = layers.squeeze(input=x, axes=[2])
out = paddle.squeeze(x, axis=[2])
return out
def test_flatten(self):
......
......@@ -635,7 +635,7 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1):
new_h = layers.matmul(concat_in, w2)
new_h = layers.unsqueeze(new_h, [1])
new_h, _ = dot_attention(new_h, y)
new_h = layers.squeeze(new_h, [1])
new_h = paddle.squeeze(new_h, [1])
rnn.update_memory(pre_h, new_h)
rnn.step_output(new_h)
......
......@@ -80,7 +80,7 @@ class DecoderCell(layers.RNNCell):
attn_scores, encoder_padding_mask
)
attn_scores = layers.softmax(attn_scores)
attn_out = layers.squeeze(
attn_out = paddle.squeeze(
layers.matmul(attn_scores, encoder_output), [1]
)
attn_out = layers.concat([attn_out, hidden], 1)
......
......@@ -97,7 +97,7 @@ class TestSqueeze2AxesTensor(UnittestBase):
# axes is a Variable
axes = paddle.assign([0, 2])
out = paddle.squeeze(feat, axes)
out2 = paddle.fluid.layers.squeeze(feat, axes)
out2 = paddle.squeeze(feat, axes)
sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out))
......@@ -136,7 +136,7 @@ class TestSqueeze2AxesTensorList(UnittestBase):
paddle.full([1], 2, dtype='int32'),
]
out = paddle.squeeze(feat, axes)
out2 = paddle.fluid.layers.squeeze(feat, axes)
out2 = paddle.squeeze(feat, axes)
sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册