未验证 提交 1ca86fc6 编写于 作者: V Vvsmile 提交者: GitHub

[Clean Fluid API]Remove API: unsqueeze, use paddle.unsqueeze to replace...

[Clean Fluid API]Remove API: unsqueeze, use paddle.unsqueeze to replace paddle.fluid.layers.unsqueeze (#47936)

* Remove API: unsqueeze
	Replace the funcation call.
	Use paddle.unsqueeze to replace paddle.fluid.layers.unsqueeze

* Remove API: unsqueeze
	remove unsqueeze which is not used in Paddle 2.0

* Remove API: unsqueeze
	fix the bug by adding 'import paddle' to file
tests/unittests/test_rnn_cell_api.py

* Modify the call of unsqueeze from old style to new style

* fix the call arguments of unsqueeze

* replace unqueeze with paddle.unsqueeze

* fix the error in PR-CI-Static-Check, name 'paddle' is not defined

* fix the error in static in unsqueeze
上级 676e4b35
......@@ -71,7 +71,6 @@ __all__ = [
'spectral_norm',
'one_hot',
'autoincreased_step_counter',
'unsqueeze',
'lod_reset',
'clip',
'clip_by_norm',
......
......@@ -189,7 +189,7 @@ class BaseModel(fluid.dygraph.Layer):
return paddle.reshape(x, shape=(-1, self.beam_size, x.shape[1]))
def _expand_to_beam_size(self, x):
x = fluid.layers.unsqueeze(x, [1])
x = paddle.unsqueeze(x, [1])
expand_shape = [-1] * len(x.shape)
expand_shape[1] = self.beam_size * x.shape[1]
x = paddle.expand(x, expand_shape)
......@@ -401,7 +401,7 @@ class BaseModel(fluid.dygraph.Layer):
dec_cell = [self._expand_to_beam_size(ele) for ele in dec_cell]
batch_pos = paddle.expand(
fluid.layers.unsqueeze(
paddle.unsqueeze(
to_variable(np.arange(0, self.batch_size, 1, dtype="int64")),
[1],
),
......@@ -450,7 +450,7 @@ class BaseModel(fluid.dygraph.Layer):
step_log_probs = paddle.multiply(
paddle.expand(
fluid.layers.unsqueeze(beam_finished, [2]),
paddle.unsqueeze(beam_finished, [2]),
[-1, -1, self.tar_vocab_size],
),
noend_mask_tensor,
......@@ -664,7 +664,7 @@ class AttentionModel(fluid.dygraph.Layer):
return paddle.reshape(x, shape=(-1, x.shape[2]))
def tile_beam_merge_with_batch(self, x):
x = fluid.layers.unsqueeze(x, [1]) # [batch_size, 1, ...]
x = paddle.unsqueeze(x, [1]) # [batch_size, 1, ...]
expand_shape = [-1] * len(x.shape)
expand_shape[1] = self.beam_size * x.shape[1]
x = paddle.expand(x, expand_shape) # [batch_size, beam_size, ...]
......@@ -684,7 +684,7 @@ class AttentionModel(fluid.dygraph.Layer):
return paddle.reshape(x, shape=(-1, self.beam_size, x.shape[1]))
def _expand_to_beam_size(self, x):
x = fluid.layers.unsqueeze(x, [1])
x = paddle.unsqueeze(x, [1])
expand_shape = [-1] * len(x.shape)
expand_shape[1] = self.beam_size * x.shape[1]
x = paddle.expand(x, expand_shape)
......@@ -703,7 +703,7 @@ class AttentionModel(fluid.dygraph.Layer):
return paddle.gather_nd(x, topk_coordinates)
def attention(self, query, enc_output, mask=None):
query = fluid.layers.unsqueeze(query, [1])
query = paddle.unsqueeze(query, [1])
memory = self.attn_fc(enc_output)
attn = paddle.matmul(query, memory, transpose_y=True)
......
......@@ -135,7 +135,7 @@ class Conv1D(fluid.dygraph.Layer):
)
def forward(self, x):
x = fluid.layers.unsqueeze(input=x, axes=[2])
x = paddle.unsqueeze(x, axis=[2])
x = self._conv2d(x)
x = paddle.squeeze(x, axis=[2])
return x
......
......@@ -775,7 +775,7 @@ class Transformer(Layer):
finished = layers.cast(finished, dtype=probs.dtype)
probs = paddle.multiply(
paddle.expand(
layers.unsqueeze(finished, [2]),
paddle.unsqueeze(finished, [2]),
[-1, -1, self.trg_vocab_size],
),
noend_mask_tensor,
......@@ -805,7 +805,7 @@ class Transformer(Layer):
noend_array[eos_id] = 0
noend_mask_tensor = to_variable(np.array(noend_array, dtype="float32"))
batch_pos = paddle.expand(
layers.unsqueeze(
paddle.unsqueeze(
to_variable(np.arange(0, batch_size, 1, dtype="int64")), [1]
),
[-1, beam_size],
......
......@@ -47,7 +47,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
out = paddle.fluid.layers.unsqueeze(x, **self.attrs)
out = paddle.unsqueeze(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
......
......@@ -47,7 +47,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
bs = layers.cast(bs, 'int64')
bs.stop_gradient = stop_gradient
batch_pos = paddle.expand(
layers.unsqueeze(paddle.arange(0, bs, 1, dtype=bs.dtype), [1]),
paddle.unsqueeze(paddle.arange(0, bs, 1, dtype=bs.dtype), [1]),
[-1, beam_size],
)
topk_coordinates = paddle.stack([batch_pos, indices], axis=2)
......
......@@ -128,7 +128,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
sum_result = paddle.tensor.array_read(array=mem_array, i=j)
sum_result.persistable = True
tmp = layers.unsqueeze(sum_result, axes=[0])
tmp = paddle.unsqueeze(sum_result, axis=[0])
tmp = paddle.expand(tmp, [10, -1])
fc = layers.fc(tmp, size=256)
loss = paddle.mean(sum_result)
......
......@@ -2683,7 +2683,7 @@ class TestBook(LayerTest):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = layers.data(name='x', shape=[8, 2], dtype='float32')
out = layers.unsqueeze(input=x, axes=[1])
out = paddle.unsqueeze(x, axis=[1])
return out
def test_sequence_scatter(self):
......
......@@ -632,7 +632,7 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1):
step_in = rnn.step_input(x)
concat_in = layers.concat([step_in, pre_h], 1)
new_h = paddle.matmul(concat_in, w2)
new_h = layers.unsqueeze(new_h, [1])
new_h = paddle.unsqueeze(new_h, [1])
new_h, _ = dot_attention(new_h, y)
new_h = paddle.squeeze(new_h, [1])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册