未验证 提交 f254d0a0 编写于 作者: 傅剑寒 提交者: GitHub

[Fluid Clean] remove expand and eye under fluid.layers (#47996)

* remove expand and eye under fluid.layers

* delete expand API test case
上级 dd27996c
...@@ -120,7 +120,6 @@ __all__ = [ ...@@ -120,7 +120,6 @@ __all__ = [
'pad2d', 'pad2d',
'unique', 'unique',
'unique_with_counts', 'unique_with_counts',
'expand',
'scale', 'scale',
'elementwise_add', 'elementwise_add',
'elementwise_div', 'elementwise_div',
...@@ -7970,128 +7969,6 @@ def flatten(x, axis=1, name=None): ...@@ -7970,128 +7969,6 @@ def flatten(x, axis=1, name=None):
return out return out
@deprecated(since='2.0.0', update_to="paddle.expand")
def expand(x, expand_times, name=None):
"""
:alias_main: paddle.expand
:alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand
:old_api: paddle.fluid.layers.expand
This operation tiles ``x`` multiple times according to the parameter ``expand_times``.
The times number for each dimension of ``x`` is set by the parameter ``expand_times``.
The rank of ``x`` should be less than or equal to 6. Please note that size of ``expand_times`` must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
Attr(expand_times): [1, 2, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A ``Tensor`` or ``LoDTensor`` with dimension in [1, 6]. The data type is ``bool``, ``float32``, ``float64`` or ``int32`` .
expand_times (list|tuple|Variable): The data type is ``int32`` . If ``expand_times`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``expand_times`` is an Variable, it should be an 1-D Tensor.
Expand times number for each dimension of ``x`` .
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. After expanding, size of each dimension of output is equal to the size of the corresponding dimension of ``x`` multiplying the corresponding value given by ``expand_times`` .
Raises:
TypeError: The type of ``expand_times`` must be list, tuple or Variable.
ValueError: The elements of ``expand_times`` cannot be negative.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0)
expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2])
# the shape of expanded_1 is [2, 6, 2].
# example 2:
data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3)
expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4)
expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times)
# the shape of expanded_2 is [48, 56].
"""
if _non_static_mode():
attrs = ()
expand_times_tensor = None
if isinstance(expand_times, (list, tuple)):
expand_times = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in expand_times
]
attrs += ('expand_times', expand_times)
elif isinstance(expand_times, Variable):
expand_times_tensor = expand_times
expand_times_tensor.stop_gradient = True
return _legacy_C_ops.expand(x, expand_times_tensor, *attrs)
inputs = {"X": [x]}
attrs = {}
check_variable_and_dtype(
x,
'x',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'expand',
)
check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True:
raise ValueError(
"expand op bool date type must set the stop_gradient to be False"
)
helper = LayerHelper('expand', input=x, **locals())
def get_attr_expand_times(list_expand_times):
attrs_expand_times = []
for idx, times in enumerate(list_expand_times):
if isinstance(times, Variable):
attrs_expand_times.append(-1)
else:
attrs_expand_times.append(times)
assert (
times > 0
), "Each element given in expand_times must not be negative."
return attrs_expand_times
if isinstance(expand_times, Variable):
expand_times.stop_gradient = True
inputs['ExpandTimes'] = expand_times
elif isinstance(expand_times, (list, tuple)):
attrs['expand_times'] = get_attr_expand_times(expand_times)
if utils._contain_var(expand_times):
inputs['expand_times_tensor'] = utils._convert_to_tensor_list(
expand_times
)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand', inputs=inputs, outputs={'Out': out}, attrs=attrs
)
return out
from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.fluid.framework import convert_np_dtype_to_dtype_
......
...@@ -186,9 +186,9 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -186,9 +186,9 @@ class BaseModel(fluid.dygraph.Layer):
def _expand_to_beam_size(self, x): def _expand_to_beam_size(self, x):
x = fluid.layers.unsqueeze(x, [1]) x = fluid.layers.unsqueeze(x, [1])
expand_times = [1] * len(x.shape) expand_shape = [-1] * len(x.shape)
expand_times[1] = self.beam_size expand_shape[1] = self.beam_size * x.shape[1]
x = fluid.layers.expand(x, expand_times) x = paddle.expand(x, expand_shape)
return x return x
def _real_state(self, state, new_state, step_mask): def _real_state(self, state, new_state, step_mask):
...@@ -386,19 +386,20 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -386,19 +386,20 @@ class BaseModel(fluid.dygraph.Layer):
[[0.0] + [-self.kinf] * (self.beam_size - 1)], dtype="float32" [[0.0] + [-self.kinf] * (self.beam_size - 1)], dtype="float32"
) )
) )
beam_state_log_probs = fluid.layers.expand( beam_state_log_probs = paddle.expand(
beam_state_log_probs, [self.batch_size, 1] beam_state_log_probs,
[self.batch_size * beam_state_log_probs.shape[0], -1],
) )
dec_hidden, dec_cell = enc_hidden, enc_cell dec_hidden, dec_cell = enc_hidden, enc_cell
dec_hidden = [self._expand_to_beam_size(ele) for ele in dec_hidden] dec_hidden = [self._expand_to_beam_size(ele) for ele in dec_hidden]
dec_cell = [self._expand_to_beam_size(ele) for ele in dec_cell] dec_cell = [self._expand_to_beam_size(ele) for ele in dec_cell]
batch_pos = fluid.layers.expand( batch_pos = paddle.expand(
fluid.layers.unsqueeze( fluid.layers.unsqueeze(
to_variable(np.arange(0, self.batch_size, 1, dtype="int64")), to_variable(np.arange(0, self.batch_size, 1, dtype="int64")),
[1], [1],
), ),
[1, self.beam_size], [-1, self.beam_size],
) )
predicted_ids = [] predicted_ids = []
parent_ids = [] parent_ids = []
...@@ -442,9 +443,9 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -442,9 +443,9 @@ class BaseModel(fluid.dygraph.Layer):
) )
step_log_probs = fluid.layers.elementwise_mul( step_log_probs = fluid.layers.elementwise_mul(
fluid.layers.expand( paddle.expand(
fluid.layers.unsqueeze(beam_finished, [2]), fluid.layers.unsqueeze(beam_finished, [2]),
[1, 1, self.tar_vocab_size], [-1, -1, self.tar_vocab_size],
), ),
noend_mask_tensor, noend_mask_tensor,
axis=-1, axis=-1,
...@@ -650,9 +651,9 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -650,9 +651,9 @@ class AttentionModel(fluid.dygraph.Layer):
def tile_beam_merge_with_batch(self, x): def tile_beam_merge_with_batch(self, x):
x = fluid.layers.unsqueeze(x, [1]) # [batch_size, 1, ...] x = fluid.layers.unsqueeze(x, [1]) # [batch_size, 1, ...]
expand_times = [1] * len(x.shape) expand_shape = [-1] * len(x.shape)
expand_times[1] = self.beam_size expand_shape[1] = self.beam_size * x.shape[1]
x = fluid.layers.expand(x, expand_times) # [batch_size, beam_size, ...] x = paddle.expand(x, expand_shape) # [batch_size, beam_size, ...]
x = paddle.transpose( x = paddle.transpose(
x, list(range(2, len(x.shape))) + [0, 1] x, list(range(2, len(x.shape))) + [0, 1]
) # [..., batch_size, beam_size] ) # [..., batch_size, beam_size]
...@@ -670,9 +671,9 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -670,9 +671,9 @@ class AttentionModel(fluid.dygraph.Layer):
def _expand_to_beam_size(self, x): def _expand_to_beam_size(self, x):
x = fluid.layers.unsqueeze(x, [1]) x = fluid.layers.unsqueeze(x, [1])
expand_times = [1] * len(x.shape) expand_shape = [-1] * len(x.shape)
expand_times[1] = self.beam_size expand_shape[1] = self.beam_size * x.shape[1]
x = fluid.layers.expand(x, expand_times) x = paddle.expand(x, expand_shape)
return x return x
def _real_state(self, state, new_state, step_mask): def _real_state(self, state, new_state, step_mask):
......
...@@ -95,7 +95,7 @@ class CNN(fluid.dygraph.Layer): ...@@ -95,7 +95,7 @@ class CNN(fluid.dygraph.Layer):
o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype( o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype(
dtype='float32' dtype='float32'
) )
mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) mask_emb = paddle.expand(o_np_mask, [-1, self.hid_dim])
emb = emb * mask_emb emb = emb * mask_emb
emb = paddle.reshape( emb = paddle.reshape(
emb, shape=[-1, self.channels, self.seq_len, self.hid_dim] emb, shape=[-1, self.channels, self.seq_len, self.hid_dim]
...@@ -141,7 +141,7 @@ class BOW(fluid.dygraph.Layer): ...@@ -141,7 +141,7 @@ class BOW(fluid.dygraph.Layer):
o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype( o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype(
dtype='float32' dtype='float32'
) )
mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) mask_emb = paddle.expand(o_np_mask, [-1, self.hid_dim])
emb = emb * mask_emb emb = emb * mask_emb
emb = paddle.reshape(emb, shape=[-1, self.seq_len, self.hid_dim]) emb = paddle.reshape(emb, shape=[-1, self.seq_len, self.hid_dim])
bow_1 = fluid.layers.reduce_sum(emb, dim=1) bow_1 = fluid.layers.reduce_sum(emb, dim=1)
...@@ -189,7 +189,7 @@ class GRU(fluid.dygraph.Layer): ...@@ -189,7 +189,7 @@ class GRU(fluid.dygraph.Layer):
o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype( o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype(
'float32' 'float32'
) )
mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) mask_emb = paddle.expand(o_np_mask, [-1, self.hid_dim])
emb = emb * mask_emb emb = emb * mask_emb
emb = paddle.reshape(emb, shape=[self.batch_size, -1, self.hid_dim]) emb = paddle.reshape(emb, shape=[self.batch_size, -1, self.hid_dim])
fc_1 = self._fc1(emb) fc_1 = self._fc1(emb)
...@@ -243,7 +243,8 @@ class BiGRU(fluid.dygraph.Layer): ...@@ -243,7 +243,8 @@ class BiGRU(fluid.dygraph.Layer):
o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype( o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype(
'float32' 'float32'
) )
mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) mask_emb = paddle.expand(o_np_mask, [-1, self.hid_dim])
emb = emb * mask_emb emb = emb * mask_emb
emb = paddle.reshape(emb, shape=[self.batch_size, -1, self.hid_dim]) emb = paddle.reshape(emb, shape=[self.batch_size, -1, self.hid_dim])
fc_1 = self._fc1(emb) fc_1 = self._fc1(emb)
......
...@@ -701,9 +701,9 @@ class Transformer(Layer): ...@@ -701,9 +701,9 @@ class Transformer(Layer):
tensor = paddle.reshape( tensor = paddle.reshape(
tensor, [tensor.shape[0], 1] + list(tensor.shape[1:]) tensor, [tensor.shape[0], 1] + list(tensor.shape[1:])
) )
tile_dims = [1] * len(tensor.shape) tile_dims = [-1] * len(tensor.shape)
tile_dims[1] = beam_size tile_dims[1] = beam_size
return layers.expand(tensor, tile_dims) return paddle.expand(tensor, tile_dims)
def merge_batch_beams(tensor): def merge_batch_beams(tensor):
var_dim_in_state = 2 # count in beam dim var_dim_in_state = 2 # count in beam dim
...@@ -757,8 +757,9 @@ class Transformer(Layer): ...@@ -757,8 +757,9 @@ class Transformer(Layer):
def mask_probs(probs, finished, noend_mask_tensor): def mask_probs(probs, finished, noend_mask_tensor):
finished = layers.cast(finished, dtype=probs.dtype) finished = layers.cast(finished, dtype=probs.dtype)
probs = layers.elementwise_mul( probs = layers.elementwise_mul(
layers.expand( paddle.expand(
layers.unsqueeze(finished, [2]), [1, 1, self.trg_vocab_size] layers.unsqueeze(finished, [2]),
[-1, -1, self.trg_vocab_size],
), ),
noend_mask_tensor, noend_mask_tensor,
axis=-1, axis=-1,
...@@ -785,11 +786,11 @@ class Transformer(Layer): ...@@ -785,11 +786,11 @@ class Transformer(Layer):
noend_array = [-inf] * self.trg_vocab_size noend_array = [-inf] * self.trg_vocab_size
noend_array[eos_id] = 0 noend_array[eos_id] = 0
noend_mask_tensor = to_variable(np.array(noend_array, dtype="float32")) noend_mask_tensor = to_variable(np.array(noend_array, dtype="float32"))
batch_pos = layers.expand( batch_pos = paddle.expand(
layers.unsqueeze( layers.unsqueeze(
to_variable(np.arange(0, batch_size, 1, dtype="int64")), [1] to_variable(np.arange(0, batch_size, 1, dtype="int64")), [1]
), ),
[1, beam_size], [-1, beam_size],
) )
predict_ids = [] predict_ids = []
parent_ids = [] parent_ids = []
......
...@@ -46,7 +46,7 @@ class TestBase(IPUOpTest): ...@@ -46,7 +46,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32"
) )
out = paddle.fluid.layers.expand(x, **self.attrs) out = paddle.expand(x, **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
def run_model(self, exec_mode): def run_model(self, exec_mode):
...@@ -82,9 +82,7 @@ class TestCase1(TestBase): ...@@ -82,9 +82,7 @@ class TestCase1(TestBase):
expand_times = paddle.fluid.layers.fill_constant( expand_times = paddle.fluid.layers.fill_constant(
shape=[len(self.feed_shape[0])], dtype="int32", value=2 shape=[len(self.feed_shape[0])], dtype="int32", value=2
) )
out = paddle.fluid.layers.expand( out = paddle.expand(x, expand_times, **self.attrs)
x, expand_times=expand_times, **self.attrs
)
self.fetch_list = [out.name] self.fetch_list = [out.name]
......
...@@ -97,7 +97,7 @@ class TestExpandNet(unittest.TestCase): ...@@ -97,7 +97,7 @@ class TestExpandNet(unittest.TestCase):
name="label", shape=[32, 1], dtype='int64' name="label", shape=[32, 1], dtype='int64'
) )
res = paddle.fluid.layers.expand(a, [1, 32]) res = paddle.expand(a, [-1, 32])
loss = res.sum() loss = res.sum()
sgd = fluid.optimizer.SGD(learning_rate=0.01) sgd = fluid.optimizer.SGD(learning_rate=0.01)
sgd.minimize(loss) sgd.minimize(loss)
......
...@@ -43,9 +43,9 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): ...@@ -43,9 +43,9 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
for _ in range(20): for _ in range(20):
bs = layers.cast(bs, 'int64') bs = layers.cast(bs, 'int64')
bs.stop_gradient = stop_gradient bs.stop_gradient = stop_gradient
batch_pos = layers.expand( batch_pos = paddle.expand(
layers.unsqueeze(paddle.arange(0, bs, 1, dtype=bs.dtype), [1]), layers.unsqueeze(paddle.arange(0, bs, 1, dtype=bs.dtype), [1]),
[1, beam_size], [-1, beam_size],
) )
topk_coordinates = paddle.stack([batch_pos, indices], axis=2) topk_coordinates = paddle.stack([batch_pos, indices], axis=2)
topk_coordinates.stop_gradient = stop_gradient topk_coordinates.stop_gradient = stop_gradient
......
...@@ -128,7 +128,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -128,7 +128,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
sum_result = layers.array_read(array=mem_array, i=j) sum_result = layers.array_read(array=mem_array, i=j)
sum_result.persistable = True sum_result.persistable = True
tmp = layers.unsqueeze(sum_result, axes=[0]) tmp = layers.unsqueeze(sum_result, axes=[0])
tmp = layers.expand(tmp, expand_times=[10, 1]) tmp = paddle.expand(tmp, [10, -1])
fc = layers.fc(tmp, size=256) fc = layers.fc(tmp, size=256)
loss = paddle.mean(sum_result) loss = paddle.mean(sum_result)
......
...@@ -16,8 +16,6 @@ import unittest ...@@ -16,8 +16,6 @@ import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import paddle
# Situation 1: expand_times is a list(without tensor) # Situation 1: expand_times is a list(without tensor)
...@@ -201,62 +199,5 @@ class TestExpandOpInt64_t(OpTest): ...@@ -201,62 +199,5 @@ class TestExpandOpInt64_t(OpTest):
self.check_output() self.check_output()
class TestExpandError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace()
)
expand_times = [2, 2]
self.assertRaises(TypeError, fluid.layers.expand, x1, expand_times)
x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.expand, x2, expand_times)
x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool")
x3.stop_gradient = True
self.assertRaises(ValueError, fluid.layers.expand, x3, expand_times)
# Test python API
class TestExpandAPI(unittest.TestCase):
def test_api(self):
input = np.random.random([12, 14]).astype("float32")
x = fluid.layers.data(
name='x', shape=[12, 14], append_batch_size=False, dtype="float32"
)
positive_2 = fluid.layers.fill_constant([1], "int32", 2)
expand_times = fluid.layers.data(
name="expand_times", shape=[2], append_batch_size=False
)
out_1 = fluid.layers.expand(x, expand_times=[2, 3])
out_2 = fluid.layers.expand(x, expand_times=[positive_2, 3])
out_3 = fluid.layers.expand(x, expand_times=expand_times)
g0 = fluid.backward.calc_gradient(out_2, x)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3 = exe.run(
fluid.default_main_program(),
feed={"x": input, "expand_times": np.array([1, 3]).astype("int32")},
fetch_list=[out_1, out_2, out_3],
)
assert np.array_equal(res_1, np.tile(input, (2, 3)))
assert np.array_equal(res_2, np.tile(input, (2, 3)))
assert np.array_equal(res_3, np.tile(input, (1, 3)))
class TestExpandDygraphAPI(unittest.TestCase):
def test_expand_times_is_tensor(self):
with paddle.fluid.dygraph.guard():
a = paddle.rand([2, 5])
b = paddle.fluid.layers.expand(a, expand_times=[2, 3])
c = paddle.fluid.layers.expand(
a, expand_times=paddle.to_tensor([2, 3], dtype='int32')
)
np.testing.assert_array_equal(b.numpy(), np.tile(a.numpy(), [2, 3]))
np.testing.assert_array_equal(c.numpy(), np.tile(a.numpy(), [2, 3]))
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -738,13 +738,13 @@ class TestImperative(unittest.TestCase): ...@@ -738,13 +738,13 @@ class TestImperative(unittest.TestCase):
name='inp2', shape=[3, 3], dtype=np.float32 name='inp2', shape=[3, 3], dtype=np.float32
) )
a = fluid.layers.expand( a = paddle.expand(
paddle.reshape(fluid.layers.reduce_sum(inp_data1), [1, 1]), paddle.reshape(fluid.layers.reduce_sum(inp_data1), [1, 1]),
[4, 1], [4, -1],
) )
b = fluid.layers.expand( b = paddle.expand(
paddle.reshape(fluid.layers.reduce_sum(inp_data2), [1, 1]), paddle.reshape(fluid.layers.reduce_sum(inp_data2), [1, 1]),
[4, 1], [4, -1],
) )
cond = fluid.layers.less_than(x=a, y=b) cond = fluid.layers.less_than(x=a, y=b)
......
...@@ -306,8 +306,9 @@ class SimpleAttention(fluid.dygraph.Layer): ...@@ -306,8 +306,9 @@ class SimpleAttention(fluid.dygraph.Layer):
decoder_state_proj_reshape = paddle.reshape( decoder_state_proj_reshape = paddle.reshape(
decoder_state_fc, [-1, 1, decoder_state_fc.shape[1]] decoder_state_fc, [-1, 1, decoder_state_fc.shape[1]]
) )
decoder_state_expand = fluid.layers.expand( decoder_state_expand = paddle.expand(
decoder_state_proj_reshape, [1, encoder_proj.shape[1], 1] decoder_state_proj_reshape,
[-1, encoder_proj.shape[1], -1],
) )
concated = fluid.layers.elementwise_add( concated = fluid.layers.elementwise_add(
encoder_proj, decoder_state_expand encoder_proj, decoder_state_expand
......
...@@ -310,9 +310,7 @@ class Generator(fluid.dygraph.Layer): ...@@ -310,9 +310,7 @@ class Generator(fluid.dygraph.Layer):
def forward(self, input, label_trg): def forward(self, input, label_trg):
shape = input.shape shape = input.shape
label_trg_e = paddle.reshape(label_trg, [-1, label_trg.shape[1], 1, 1]) label_trg_e = paddle.reshape(label_trg, [-1, label_trg.shape[1], 1, 1])
label_trg_e = fluid.layers.expand( label_trg_e = paddle.expand(label_trg_e, [-1, -1, shape[2], shape[3]])
x=label_trg_e, expand_times=[1, 1, shape[2], shape[3]]
)
input1 = fluid.layers.concat([input, label_trg_e], 1) input1 = fluid.layers.concat([input, label_trg_e], 1)
......
...@@ -3554,14 +3554,6 @@ class TestBook(LayerTest): ...@@ -3554,14 +3554,6 @@ class TestBook(LayerTest):
out = layers.cross_entropy(x, label, False, 4) out = layers.cross_entropy(x, label, False, 4)
return out return out
def make_expand(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
x = self._get_data(name="input", shape=[10], dtype='int32')
out = layers.expand(x, [1, 2])
return out
def make_uniform_random_batch_size_like(self): def make_uniform_random_batch_size_like(self):
with program_guard( with program_guard(
fluid.default_main_program(), fluid.default_startup_program() fluid.default_main_program(), fluid.default_startup_program()
......
...@@ -114,31 +114,6 @@ class TestReduceSumWithDimDoubleGradCheck(unittest.TestCase): ...@@ -114,31 +114,6 @@ class TestReduceSumWithDimDoubleGradCheck(unittest.TestCase):
class TestReshapeDoubleGradCheck(unittest.TestCase): class TestReshapeDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
x_shape = [3, 12]
expand_times = [4, 9]
eps = 0.005
dtype = np.float64
x = layers.data('x', x_shape, False, dtype)
x.persistable = True
out = layers.expand(x, expand_times)
x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
gradient_checker.double_grad_check(
[x], out, x_init=x_arr, place=place, eps=eps
)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestExpandDoubleGradCheck(unittest.TestCase):
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
x_shape = [3, 12] x_shape = [3, 12]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册