未验证 提交 2fe92990 编写于 作者: V Vvsmile 提交者: GitHub

[Clean Fluid API]Remove squeeze: use paddle.squeeze to replace paddle.fluid.layers.squeeze (#47938)

* Remove API: squeeze
	Modify the funcation all of squeeze.
	Use paddle.squeeze to replace paddle.fluid.layers.squeeze

* Remove: squeeze
	fix some modifications which are not needless to the original
format

* Fix the function call of 'squeeze' from old style to new style.

* modify the call of squeeze from old style to new style

* Modify the call of squeeze from old style to new style

* replace squeeze with paddle.squeeze

* modify the nn.py based on the review suggestion

remove a redundant comment.

* remove one comment which is not necessary
上级 4edf37d7
...@@ -93,7 +93,6 @@ __all__ = [ ...@@ -93,7 +93,6 @@ __all__ = [
'smooth_l1', 'smooth_l1',
'one_hot', 'one_hot',
'autoincreased_step_counter', 'autoincreased_step_counter',
'squeeze',
'unsqueeze', 'unsqueeze',
'lod_reset', 'lod_reset',
'lod_append', 'lod_append',
...@@ -4095,7 +4094,7 @@ def ctc_greedy_decoder( ...@@ -4095,7 +4094,7 @@ def ctc_greedy_decoder(
return ctc_out return ctc_out
else: else:
ctc_out_len = helper.create_variable_for_type_inference(dtype="int64") ctc_out_len = helper.create_variable_for_type_inference(dtype="int64")
ctc_input = squeeze(topk_indices, [2]) ctc_input = paddle.squeeze(topk_indices, [2])
helper.append_op( helper.append_op(
type="ctc_align", type="ctc_align",
...@@ -4636,105 +4635,6 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): ...@@ -4636,105 +4635,6 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
return counter return counter
def squeeze(input, axes, name=None):
"""
This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal
to one will be deleted.
.. code-block:: text
Case1:
Input:
X.shape = (1, 3, 1, 5)
axes = [0]
Output:
Out.shape = (3, 1, 5)
Case2:
Input:
X.shape = (1, 3, 1, 5)
axes = []
Output:
Out.shape = (3, 5)
Case3:
Input:
X.shape = [1,3,1,5]
axes = [-2]
Output:
Out.shape = [1,3,5]
Args:
input (Variable): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.
axes (list): One integer or List of integers, indicating the dimensions to be squeezed.
Axes range is :math:`[-rank(input), rank(input))`.
If axes is negative, :math:`axes=axes+rank(input)`.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Variable: Output squeezed Tensor. Data type is same as input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x = fluid.data(name='x', shape=[None, 5, 1, 10])
y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10]
"""
if in_dygraph_mode():
return _C_ops.squeeze(input, axes)
if _in_legacy_dygraph():
out, _ = _legacy_C_ops.squeeze2(input, 'axes', axes)
return out
helper = LayerHelper("squeeze", **locals())
check_variable_and_dtype(
input,
'input',
[
'float16',
'float32',
'float64',
'bool',
'int8',
'int32',
'int64',
'complex64',
'complex128',
],
'squeeze',
)
check_type(axes, 'axis/axes', (list, tuple, Variable), 'squeeze')
attrs = {}
if isinstance(axes, Variable):
axes.stop_gradient = True
attrs["axes"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
attrs["axes"] = utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="squeeze2",
inputs={"X": input},
attrs=attrs,
outputs={"Out": out, "XShape": x_shape},
)
return out
def unsqueeze(input, axes, name=None): def unsqueeze(input, axes, name=None):
""" """
Insert single-dimensional entries to the shape of a Tensor. Takes one Insert single-dimensional entries to the shape of a Tensor. Takes one
......
...@@ -2088,11 +2088,11 @@ class TrainingHelper(DecodeHelper): ...@@ -2088,11 +2088,11 @@ class TrainingHelper(DecodeHelper):
def _slice(x): # TODO: use Variable.__getitem__ def _slice(x): # TODO: use Variable.__getitem__
axes = [0 if self.time_major else 1] axes = [0 if self.time_major else 1]
return nn.squeeze( return paddle.squeeze(
nn.slice( nn.slice(
x, axes=axes, starts=[next_time], ends=[next_time + 1] x, axes=axes, starts=[next_time], ends=[next_time + 1]
), ),
axes=axes, axis=axes,
) )
next_inputs = map_structure(_slice, self.inputs_) next_inputs = map_structure(_slice, self.inputs_)
......
...@@ -295,7 +295,7 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -295,7 +295,7 @@ class BaseModel(fluid.dygraph.Layer):
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=dec_output, label=label, soft_label=False logits=dec_output, label=label, soft_label=False
) )
loss = fluid.layers.squeeze(loss, axes=[2]) loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = fluid.layers.shape(tar)[1] max_tar_seq_len = fluid.layers.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask( tar_mask = fluid.layers.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32' tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
...@@ -811,7 +811,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -811,7 +811,7 @@ class AttentionModel(fluid.dygraph.Layer):
else: else:
step_input = new_hidden step_input = new_hidden
dec_att = self.attention(step_input, enc_outputs, enc_padding_mask) dec_att = self.attention(step_input, enc_outputs, enc_padding_mask)
dec_att = fluid.layers.squeeze(dec_att, [1]) dec_att = paddle.squeeze(dec_att, [1])
concat_att_out = fluid.layers.concat([dec_att, step_input], 1) concat_att_out = fluid.layers.concat([dec_att, step_input], 1)
out = self.concat_fc(concat_att_out) out = self.concat_fc(concat_att_out)
input_feed = out input_feed = out
...@@ -823,7 +823,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -823,7 +823,7 @@ class AttentionModel(fluid.dygraph.Layer):
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=dec_output, label=label, soft_label=False logits=dec_output, label=label, soft_label=False
) )
loss = fluid.layers.squeeze(loss, axes=[2]) loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = fluid.layers.shape(tar)[1] max_tar_seq_len = fluid.layers.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask( tar_mask = fluid.layers.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32' tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
......
...@@ -137,7 +137,7 @@ class Conv1D(fluid.dygraph.Layer): ...@@ -137,7 +137,7 @@ class Conv1D(fluid.dygraph.Layer):
def forward(self, x): def forward(self, x):
x = fluid.layers.unsqueeze(input=x, axes=[2]) x = fluid.layers.unsqueeze(input=x, axes=[2])
x = self._conv2d(x) x = self._conv2d(x)
x = fluid.layers.squeeze(input=x, axes=[2]) x = paddle.squeeze(x, axis=[2])
return x return x
...@@ -275,10 +275,10 @@ class BMN(fluid.dygraph.Layer): ...@@ -275,10 +275,10 @@ class BMN(fluid.dygraph.Layer):
# TEM # TEM
xs = paddle.nn.functional.relu(self.ts_conv1(x)) xs = paddle.nn.functional.relu(self.ts_conv1(x))
xs = paddle.nn.functional.relu(self.ts_conv2(xs)) xs = paddle.nn.functional.relu(self.ts_conv2(xs))
xs = fluid.layers.squeeze(xs, axes=[1]) xs = paddle.squeeze(xs, axis=[1])
xe = paddle.nn.functional.relu(self.te_conv1(x)) xe = paddle.nn.functional.relu(self.te_conv1(x))
xe = paddle.nn.functional.relu(self.te_conv2(xe)) xe = paddle.nn.functional.relu(self.te_conv2(xe))
xe = fluid.layers.squeeze(xe, axes=[1]) xe = paddle.squeeze(xe, axis=[1])
# PEM # PEM
xp = paddle.nn.functional.relu(self.p_conv1(x)) xp = paddle.nn.functional.relu(self.p_conv1(x))
...@@ -287,7 +287,7 @@ class BMN(fluid.dygraph.Layer): ...@@ -287,7 +287,7 @@ class BMN(fluid.dygraph.Layer):
xp = paddle.reshape(xp, shape=[0, 0, -1, self.dscale, self.tscale]) xp = paddle.reshape(xp, shape=[0, 0, -1, self.dscale, self.tscale])
xp = self.p_conv3d1(xp) xp = self.p_conv3d1(xp)
xp = fluid.layers.squeeze(xp, axes=[2]) xp = paddle.squeeze(xp, axis=[2])
xp = paddle.nn.functional.relu(self.p_conv2d1(xp)) xp = paddle.nn.functional.relu(self.p_conv2d1(xp))
xp = paddle.nn.functional.relu(self.p_conv2d2(xp)) xp = paddle.nn.functional.relu(self.p_conv2d2(xp))
xp = paddle.nn.functional.relu(self.p_conv2d3(xp)) xp = paddle.nn.functional.relu(self.p_conv2d3(xp))
...@@ -411,11 +411,11 @@ def bmn_loss_func( ...@@ -411,11 +411,11 @@ def bmn_loss_func(
loss = -1 * (loss_pos + loss_neg) / num_entries loss = -1 * (loss_pos + loss_neg) / num_entries
return loss return loss
pred_bm_reg = fluid.layers.squeeze( pred_bm_reg = paddle.squeeze(
fluid.layers.slice(pred_bm, axes=[1], starts=[0], ends=[1]), axes=[1] fluid.layers.slice(pred_bm, axes=[1], starts=[0], ends=[1]), axis=[1]
) )
pred_bm_cls = fluid.layers.squeeze( pred_bm_cls = paddle.squeeze(
fluid.layers.slice(pred_bm, axes=[1], starts=[1], ends=[2]), axes=[1] fluid.layers.slice(pred_bm, axes=[1], starts=[1], ends=[2]), axis=[1]
) )
bm_mask = _get_mask(cfg) bm_mask = _get_mask(cfg)
......
...@@ -47,7 +47,7 @@ class TestBase(IPUOpTest): ...@@ -47,7 +47,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
) )
out = paddle.fluid.layers.squeeze(x, **self.attrs) out = paddle.squeeze(x, **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
def run_model(self, exec_mode): def run_model(self, exec_mode):
......
...@@ -4097,7 +4097,7 @@ class TestBook(LayerTest): ...@@ -4097,7 +4097,7 @@ class TestBook(LayerTest):
# TODO(minqiyang): dygraph do not support layers with param now # TODO(minqiyang): dygraph do not support layers with param now
with self.static_graph(): with self.static_graph():
x = layers.data(name='x', shape=[1, 1, 4], dtype='float32') x = layers.data(name='x', shape=[1, 1, 4], dtype='float32')
out = layers.squeeze(input=x, axes=[2]) out = paddle.squeeze(x, axis=[2])
return out return out
def test_flatten(self): def test_flatten(self):
......
...@@ -635,7 +635,7 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1): ...@@ -635,7 +635,7 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1):
new_h = layers.matmul(concat_in, w2) new_h = layers.matmul(concat_in, w2)
new_h = layers.unsqueeze(new_h, [1]) new_h = layers.unsqueeze(new_h, [1])
new_h, _ = dot_attention(new_h, y) new_h, _ = dot_attention(new_h, y)
new_h = layers.squeeze(new_h, [1]) new_h = paddle.squeeze(new_h, [1])
rnn.update_memory(pre_h, new_h) rnn.update_memory(pre_h, new_h)
rnn.step_output(new_h) rnn.step_output(new_h)
......
...@@ -80,7 +80,7 @@ class DecoderCell(layers.RNNCell): ...@@ -80,7 +80,7 @@ class DecoderCell(layers.RNNCell):
attn_scores, encoder_padding_mask attn_scores, encoder_padding_mask
) )
attn_scores = layers.softmax(attn_scores) attn_scores = layers.softmax(attn_scores)
attn_out = layers.squeeze( attn_out = paddle.squeeze(
layers.matmul(attn_scores, encoder_output), [1] layers.matmul(attn_scores, encoder_output), [1]
) )
attn_out = layers.concat([attn_out, hidden], 1) attn_out = layers.concat([attn_out, hidden], 1)
......
...@@ -97,7 +97,7 @@ class TestSqueeze2AxesTensor(UnittestBase): ...@@ -97,7 +97,7 @@ class TestSqueeze2AxesTensor(UnittestBase):
# axes is a Variable # axes is a Variable
axes = paddle.assign([0, 2]) axes = paddle.assign([0, 2])
out = paddle.squeeze(feat, axes) out = paddle.squeeze(feat, axes)
out2 = paddle.fluid.layers.squeeze(feat, axes) out2 = paddle.squeeze(feat, axes)
sgd = paddle.optimizer.SGD() sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out)) sgd.minimize(paddle.mean(out))
...@@ -136,7 +136,7 @@ class TestSqueeze2AxesTensorList(UnittestBase): ...@@ -136,7 +136,7 @@ class TestSqueeze2AxesTensorList(UnittestBase):
paddle.full([1], 2, dtype='int32'), paddle.full([1], 2, dtype='int32'),
] ]
out = paddle.squeeze(feat, axes) out = paddle.squeeze(feat, axes)
out2 = paddle.fluid.layers.squeeze(feat, axes) out2 = paddle.squeeze(feat, axes)
sgd = paddle.optimizer.SGD() sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out)) sgd.minimize(paddle.mean(out))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册