未验证 提交 4244fa6e 编写于 作者: 傅剑寒 提交者: GitHub

(fluid清理)remove reshape in nn.py under fluid (#47967)

* remove reshape in nn.py under fluid

* remove reshape reference

* fix test case

* fix test case in distribution.uniform

* remove fluid reshape reference
上级 1e8346fe
...@@ -184,7 +184,7 @@ class Normal(distribution.Distribution): ...@@ -184,7 +184,7 @@ class Normal(distribution.Distribution):
zero_tmp = tensor.fill_constant_batch_size_like( zero_tmp = tensor.fill_constant_batch_size_like(
self.loc + self.scale, batch_shape + shape, self.dtype, 0.0 self.loc + self.scale, batch_shape + shape, self.dtype, 0.0
) )
zero_tmp_reshape = nn.reshape(zero_tmp, output_shape) zero_tmp_reshape = paddle.reshape(zero_tmp, output_shape)
zero_tmp_shape = nn.shape(zero_tmp_reshape) zero_tmp_shape = nn.shape(zero_tmp_reshape)
normal_random_tmp = nn.gaussian_random( normal_random_tmp = nn.gaussian_random(
zero_tmp_shape, mean=0.0, std=1.0, seed=seed, dtype=self.dtype zero_tmp_shape, mean=0.0, std=1.0, seed=seed, dtype=self.dtype
...@@ -199,7 +199,7 @@ class Normal(distribution.Distribution): ...@@ -199,7 +199,7 @@ class Normal(distribution.Distribution):
) * (tensor.zeros(output_shape, dtype=self.dtype) + self.scale) ) * (tensor.zeros(output_shape, dtype=self.dtype) + self.scale)
output = elementwise_add(output, self.loc, name=name) output = elementwise_add(output, self.loc, name=name)
if self.all_arg_is_float: if self.all_arg_is_float:
return nn.reshape(output, shape, name=name) return paddle.reshape(output, shape, name=name)
else: else:
return output return output
......
...@@ -28,6 +28,7 @@ from paddle.fluid.layers import ( ...@@ -28,6 +28,7 @@ from paddle.fluid.layers import (
nn, nn,
tensor, tensor,
) )
import paddle
class Uniform(distribution.Distribution): class Uniform(distribution.Distribution):
...@@ -174,8 +175,8 @@ class Uniform(distribution.Distribution): ...@@ -174,8 +175,8 @@ class Uniform(distribution.Distribution):
max=1.0, max=1.0,
seed=seed, seed=seed,
) )
zero_tmp_reshape = nn.reshape(zero_tmp, output_shape) zero_tmp_reshape = paddle.reshape(zero_tmp, output_shape)
uniform_random_tmp_reshape = nn.reshape( uniform_random_tmp_reshape = paddle.reshape(
uniform_random_tmp, output_shape uniform_random_tmp, output_shape
) )
output = uniform_random_tmp_reshape * ( output = uniform_random_tmp_reshape * (
...@@ -193,7 +194,7 @@ class Uniform(distribution.Distribution): ...@@ -193,7 +194,7 @@ class Uniform(distribution.Distribution):
) )
output = elementwise_add(output, self.low, name=name) output = elementwise_add(output, self.low, name=name)
if self.all_arg_is_float: if self.all_arg_is_float:
return nn.reshape(output, shape, name=name) return paddle.reshape(output, shape, name=name)
else: else:
return output return output
......
...@@ -36,7 +36,8 @@ from paddle.fluid import core ...@@ -36,7 +36,8 @@ from paddle.fluid import core
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.framework import Variable, convert_np_dtype_to_dtype_ from paddle.fluid.framework import Variable, convert_np_dtype_to_dtype_
from paddle.fluid.layers import slice, reshape from paddle.fluid.layers import slice
import paddle
import warnings import warnings
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
...@@ -1549,17 +1550,17 @@ def tdm_sampler( ...@@ -1549,17 +1550,17 @@ def tdm_sampler(
mask, axes=[1], starts=[start_offset], ends=[end_offset] mask, axes=[1], starts=[start_offset], ends=[end_offset]
) )
layer_samples = reshape( layer_samples = paddle.reshape(
layer_samples, [-1, layer_sample_num + positive_flag, 1] layer_samples, [-1, layer_sample_num + positive_flag, 1]
) )
layer_samples.stop_gradient = True layer_samples.stop_gradient = True
layer_labels = reshape( layer_labels = paddle.reshape(
layer_labels, [-1, layer_sample_num + positive_flag, 1] layer_labels, [-1, layer_sample_num + positive_flag, 1]
) )
layer_labels.stop_gradient = True layer_labels.stop_gradient = True
layer_mask = reshape( layer_mask = paddle.reshape(
layer_mask, [-1, layer_sample_num + positive_flag, 1] layer_mask, [-1, layer_sample_num + positive_flag, 1]
) )
layer_mask.stop_gradient = True layer_mask.stop_gradient = True
......
...@@ -19,6 +19,7 @@ from paddle.fluid import layers, unique_name ...@@ -19,6 +19,7 @@ from paddle.fluid import layers, unique_name
from paddle.fluid.dygraph import Layer from paddle.fluid.dygraph import Layer
from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper
from paddle.fluid.layers.control_flow import StaticRNN from paddle.fluid.layers.control_flow import StaticRNN
import paddle
__all__ = ['BasicGRUUnit', 'basic_gru', 'BasicLSTMUnit', 'basic_lstm'] __all__ = ['BasicGRUUnit', 'basic_gru', 'BasicLSTMUnit', 'basic_lstm']
...@@ -339,7 +340,7 @@ def basic_gru( ...@@ -339,7 +340,7 @@ def basic_gru(
if bidirectional: if bidirectional:
direc_num = 2 direc_num = 2
if init_hidden: if init_hidden:
init_hidden = layers.reshape( init_hidden = paddle.reshape(
init_hidden, shape=[num_layers, direc_num, -1, hidden_size] init_hidden, shape=[num_layers, direc_num, -1, hidden_size]
) )
...@@ -394,7 +395,7 @@ def basic_gru( ...@@ -394,7 +395,7 @@ def basic_gru(
last_hidden_array.append(last_hidden) last_hidden_array.append(last_hidden)
last_hidden_output = layers.concat(last_hidden_array, axis=0) last_hidden_output = layers.concat(last_hidden_array, axis=0)
last_hidden_output = layers.reshape( last_hidden_output = paddle.reshape(
last_hidden_output, shape=[num_layers, -1, hidden_size] last_hidden_output, shape=[num_layers, -1, hidden_size]
) )
...@@ -419,7 +420,7 @@ def basic_gru( ...@@ -419,7 +420,7 @@ def basic_gru(
rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2) rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2)
last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1) last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1)
last_hidden = layers.reshape( last_hidden = paddle.reshape(
last_hidden, shape=[num_layers * direc_num, -1, hidden_size] last_hidden, shape=[num_layers * direc_num, -1, hidden_size]
) )
...@@ -625,10 +626,10 @@ def basic_lstm( ...@@ -625,10 +626,10 @@ def basic_lstm(
direc_num = 2 direc_num = 2
# convert to [num_layers, 2, batch_size, hidden_size] # convert to [num_layers, 2, batch_size, hidden_size]
if init_hidden: if init_hidden:
init_hidden = layers.reshape( init_hidden = paddle.reshape(
init_hidden, shape=[num_layers, direc_num, -1, hidden_size] init_hidden, shape=[num_layers, direc_num, -1, hidden_size]
) )
init_cell = layers.reshape( init_cell = paddle.reshape(
init_cell, shape=[num_layers, direc_num, -1, hidden_size] init_cell, shape=[num_layers, direc_num, -1, hidden_size]
) )
...@@ -701,11 +702,11 @@ def basic_lstm( ...@@ -701,11 +702,11 @@ def basic_lstm(
last_cell_array.append(last_cell) last_cell_array.append(last_cell)
last_hidden_output = layers.concat(last_hidden_array, axis=0) last_hidden_output = layers.concat(last_hidden_array, axis=0)
last_hidden_output = layers.reshape( last_hidden_output = paddle.reshape(
last_hidden_output, shape=[num_layers, -1, hidden_size] last_hidden_output, shape=[num_layers, -1, hidden_size]
) )
last_cell_output = layers.concat(last_cell_array, axis=0) last_cell_output = layers.concat(last_cell_array, axis=0)
last_cell_output = layers.reshape( last_cell_output = paddle.reshape(
last_cell_output, shape=[num_layers, -1, hidden_size] last_cell_output, shape=[num_layers, -1, hidden_size]
) )
...@@ -729,12 +730,12 @@ def basic_lstm( ...@@ -729,12 +730,12 @@ def basic_lstm(
rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2) rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2)
last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1) last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1)
last_hidden = layers.reshape( last_hidden = paddle.reshape(
last_hidden, shape=[num_layers * direc_num, -1, hidden_size] last_hidden, shape=[num_layers * direc_num, -1, hidden_size]
) )
last_cell = layers.concat([fw_last_cell, bw_last_cell], axis=1) last_cell = layers.concat([fw_last_cell, bw_last_cell], axis=1)
last_cell = layers.reshape( last_cell = paddle.reshape(
last_cell, shape=[num_layers * direc_num, -1, hidden_size] last_cell, shape=[num_layers * direc_num, -1, hidden_size]
) )
......
...@@ -107,17 +107,17 @@ class TestModelCastBF16(unittest.TestCase): ...@@ -107,17 +107,17 @@ class TestModelCastBF16(unittest.TestCase):
ret = layers.elementwise_add(t, tt) ret = layers.elementwise_add(t, tt)
ret = layers.elementwise_mul(ret, t) ret = layers.elementwise_mul(ret, t)
ret = layers.reshape(ret, [0, 0]) ret = paddle.reshape(ret, [0, 0])
with amp.bf16.bf16_guard(): with amp.bf16.bf16_guard():
ret_bf16 = layers.elementwise_add(t_bf16, tt_bf16) ret_bf16 = layers.elementwise_add(t_bf16, tt_bf16)
ret_bf16 = layers.elementwise_mul(ret_bf16, t_bf16) ret_bf16 = layers.elementwise_mul(ret_bf16, t_bf16)
ret_bf16 = layers.reshape(ret_bf16, [0, 0]) ret_bf16 = paddle.reshape(ret_bf16, [0, 0])
with amp.bf16.bf16_guard(): with amp.bf16.bf16_guard():
ret_fp32bf16 = layers.elementwise_add(t, tt) ret_fp32bf16 = layers.elementwise_add(t, tt)
ret_fp32bf16 = layers.elementwise_mul(ret_fp32bf16, t) ret_fp32bf16 = layers.elementwise_mul(ret_fp32bf16, t)
ret_fp32bf16 = layers.reshape(ret_fp32bf16, [0, 0]) ret_fp32bf16 = paddle.reshape(ret_fp32bf16, [0, 0])
( (
static_ret_bf16, static_ret_bf16,
...@@ -148,7 +148,8 @@ class TestModelCastBF16(unittest.TestCase): ...@@ -148,7 +148,8 @@ class TestModelCastBF16(unittest.TestCase):
with amp.bf16.bf16_guard(): with amp.bf16.bf16_guard():
ret = layers.elementwise_add(t, tt) ret = layers.elementwise_add(t, tt)
ret = layers.reshape(ret, [0, 0], act='elu') ret = paddle.reshape(ret, [0, 0])
ret = paddle.nn.functional.elu(ret)
ret = layers.elementwise_mul(ret, t) ret = layers.elementwise_mul(ret, t)
ret = layers.elementwise_add(ret, tt) ret = layers.elementwise_add(ret, tt)
......
...@@ -320,7 +320,7 @@ def _coalesce_tensors(var_groups): ...@@ -320,7 +320,7 @@ def _coalesce_tensors(var_groups):
for g_var in grad_vars: for g_var in grad_vars:
g_var_shapes.append(g_var.shape) g_var_shapes.append(g_var.shape)
flattened_vars.append( flattened_vars.append(
nn.reshape(x=g_var, shape=[np.prod(g_var.shape)]) paddle.reshape(x=g_var, shape=[np.prod(g_var.shape)])
) )
coalesced_grad = nn.concat(flattened_vars) coalesced_grad = nn.concat(flattened_vars)
coalesced_grads_and_grad_vars.append( coalesced_grads_and_grad_vars.append(
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import copy import copy
import numpy as np import numpy as np
import paddle
from .framework import ( from .framework import (
Variable, Variable,
...@@ -114,7 +115,7 @@ class LayerHelperBase: ...@@ -114,7 +115,7 @@ class LayerHelperBase:
) )
def _create_weight_normalize(self, attr, shape, dtype): def _create_weight_normalize(self, attr, shape, dtype):
from .layers import elementwise_mul, elementwise_div, reshape from .layers import elementwise_mul, elementwise_div
# Remove these ops when LayerHelper and layers support indicating # Remove these ops when LayerHelper and layers support indicating
# program and block. # program and block.
...@@ -275,7 +276,7 @@ class LayerHelperBase: ...@@ -275,7 +276,7 @@ class LayerHelperBase:
x=v, x=v,
y=scale y=scale
if dim is None if dim is None
else reshape(x=scale, shape=[v.shape[dim]]), else paddle.reshape(x=scale, shape=[v.shape[dim]]),
axis=-1 if dim is None else dim, axis=-1 if dim is None else dim,
) )
# To serialize the original parameter for inference, maybe a # To serialize the original parameter for inference, maybe a
......
...@@ -328,8 +328,8 @@ def retinanet_target_assign( ...@@ -328,8 +328,8 @@ def retinanet_target_assign(
bbox_inside_weight.stop_gradient = True bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes)) cls_logits = paddle.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4)) bbox_pred = paddle.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = paddle.gather(cls_logits, score_index) predicted_cls_logits = paddle.gather(cls_logits, score_index)
predicted_bbox_pred = paddle.gather(bbox_pred, loc_index) predicted_bbox_pred = paddle.gather(bbox_pred, loc_index)
...@@ -510,8 +510,8 @@ def rpn_target_assign( ...@@ -510,8 +510,8 @@ def rpn_target_assign(
target_bbox.stop_gradient = True target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1)) cls_logits = paddle.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4)) bbox_pred = paddle.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = paddle.gather(cls_logits, score_index) predicted_cls_logits = paddle.gather(cls_logits, score_index)
predicted_bbox_pred = paddle.gather(bbox_pred, loc_index) predicted_bbox_pred = paddle.gather(bbox_pred, loc_index)
...@@ -1750,7 +1750,7 @@ def ssd_loss( ...@@ -1750,7 +1750,7 @@ def ssd_loss(
# 2. Compute confidence for mining hard examples # 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices # 2.1. Get the target label based on matched indices
gt_label = nn.reshape( gt_label = paddle.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0,) + (-1, 1) x=gt_label, shape=(len(gt_label.shape) - 1) * (0,) + (-1, 1)
) )
gt_label.stop_gradient = True gt_label.stop_gradient = True
...@@ -1769,9 +1769,7 @@ def ssd_loss( ...@@ -1769,9 +1769,7 @@ def ssd_loss(
actual_shape.stop_gradient = True actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by # shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime. # actual_shape in runtime.
conf_loss = nn.reshape( conf_loss = paddle.reshape(x=conf_loss, shape=actual_shape)
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape
)
conf_loss.stop_gradient = True conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32') neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype dtype = matched_indices.dtype
...@@ -1848,7 +1846,7 @@ def ssd_loss( ...@@ -1848,7 +1846,7 @@ def ssd_loss(
# reshape to [N, Np], N is the batch size and Np is the prior box number. # reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by # shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime. # actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape) loss = paddle.reshape(x=loss, shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True) loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize: if normalize:
normalizer = nn.reduce_sum(target_loc_weight) normalizer = nn.reduce_sum(target_loc_weight)
...@@ -2477,9 +2475,9 @@ def multi_box_head( ...@@ -2477,9 +2475,9 @@ def multi_box_head(
box = tensor.concat(reshaped_boxes) box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars) var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1) mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4]) mbox_locs_concat = paddle.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1) mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape( mbox_confs_concat = paddle.reshape(
mbox_confs_concat, shape=[0, -1, num_classes] mbox_confs_concat, shape=[0, -1, num_classes]
) )
......
...@@ -228,7 +228,7 @@ class Uniform(Distribution): ...@@ -228,7 +228,7 @@ class Uniform(Distribution):
uniform_random_tmp * (zero_tmp + self.high - self.low) uniform_random_tmp * (zero_tmp + self.high - self.low)
+ self.low + self.low
) )
return nn.reshape(output, output_shape) return paddle.reshape(output, output_shape)
else: else:
output_shape = shape + batch_shape output_shape = shape + batch_shape
output = ( output = (
...@@ -240,7 +240,7 @@ class Uniform(Distribution): ...@@ -240,7 +240,7 @@ class Uniform(Distribution):
+ self.low + self.low
) )
if self.all_arg_is_float: if self.all_arg_is_float:
return nn.reshape(output, shape) return paddle.reshape(output, shape)
else: else:
return output return output
...@@ -382,7 +382,7 @@ class Normal(Distribution): ...@@ -382,7 +382,7 @@ class Normal(Distribution):
zero_tmp_shape, mean=0.0, std=1.0, seed=seed zero_tmp_shape, mean=0.0, std=1.0, seed=seed
) )
output = normal_random_tmp * (zero_tmp + self.scale) + self.loc output = normal_random_tmp * (zero_tmp + self.scale) + self.loc
return nn.reshape(output, output_shape) return paddle.reshape(output, output_shape)
else: else:
output_shape = shape + batch_shape output_shape = shape + batch_shape
output = ( output = (
...@@ -394,7 +394,7 @@ class Normal(Distribution): ...@@ -394,7 +394,7 @@ class Normal(Distribution):
+ self.loc + self.loc
) )
if self.all_arg_is_float: if self.all_arg_is_float:
return nn.reshape(output, shape) return paddle.reshape(output, shape)
else: else:
return output return output
......
...@@ -101,7 +101,6 @@ __all__ = [ ...@@ -101,7 +101,6 @@ __all__ = [
'smooth_l1', 'smooth_l1',
'one_hot', 'one_hot',
'autoincreased_step_counter', 'autoincreased_step_counter',
'reshape',
'squeeze', 'squeeze',
'unsqueeze', 'unsqueeze',
'lod_reset', 'lod_reset',
...@@ -6234,240 +6233,6 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): ...@@ -6234,240 +6233,6 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
return counter return counter
def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
r"""
:alias_main: paddle.reshape
:alias: paddle.reshape,paddle.tensor.reshape,paddle.tensor.manipulation.reshape
This operator changes the shape of ``x`` without changing its data.
The target shape can be given by ``shape`` or ``actual_shape``.
When ``shape`` and ``actual_shape`` are set at the same time,
``actual_shape`` has a higher priority than ``shape``
but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to
guarantee shape inference in compile-time.
Some tricks exist when specifying the target shape.
1. -1 means the value of this dimension is inferred from the total element
number of x and remaining dimensions. Thus one and only one dimension can
be set -1.
2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The index of 0s in shape can not exceed
the dimension of x.
Here are some examples to explain it.
1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [6, 8], the reshape operator will transform x into a 2-D tensor with
shape [6, 8] and leaving x's data unchanged.
2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
specified is [2, 3, -1, 2], the reshape operator will transform x into a
4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
case, one dimension of the target shape is set to -1, the value of this
dimension is inferred from the total element number of x and remaining
dimensions.
3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x.
**Note**:
The parameter ``actual_shape`` will be deprecated in the future and only use ``shape`` instead to represent the target shape.
Args:
x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Tensor, it should be an 1-D Tensor .
actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape
according to this given shape rather than ``shape`` specifying shape.
That is to say ``actual_shape`` has a higher priority
than ``shape(list|tuple)`` but not ``shape(Tensor)``. \
This argument ``actual_shape`` will be removed in a future version. \
Instructions for updating: ``actual_shape`` will be removed in future versions and replaced by ``shape``.
act (str, optional): The non-linear activation to be applied to the reshaped input. Default None.
inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape``
are the same variable. Otherwise, the input and output of
``layers.reshape`` are different variable. Default False. Note that if ``x``
is more than one OPs' input, ``inplace`` must be False.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A reshaped Tensor with the same data type as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
# example 1:
# attr shape is a list which doesn't contain Tensors.
data_1 = fluid.data(
name='data_1', shape=[2, 4, 6], dtype='float32')
reshaped_1 = fluid.layers.reshape(
x=data_1, shape=[-1, 0, 3, 2])
# the shape of reshaped_1 is [2,4,3,2].
# example 2:
# attr shape is a list which contains Tensors.
data_2 = fluid.layers.fill_constant([2,25], "int32", 3)
dim = fluid.layers.fill_constant([1], "int32", 5)
reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10])
# the shape of reshaped_2 is [5,10].
# example 3:
data_3 = fluid.data(
name="data_3", shape=[2,4,6], dtype='float32')
reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8])
# the shape of reshaped_3 is [6,8].
"""
if in_dygraph_mode():
tmp_tensor_type = core.eager.Tensor
# TODO(zhiqiu): enable inplace in dygraph mode.
if inplace:
warnings.warn(
"Inplace on reshape is not allowed and will be discarded in dygraph mode currently."
)
if isinstance(shape, (list, tuple)):
shape = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in shape
]
out = _C_ops.reshape(x, shape)
elif isinstance(shape, tmp_tensor_type):
# TODO: Tensor shape in reshape has not been tested
shape.stop_gradient = True
out = _C_ops.reshape(x, shape)
else:
raise ValueError(
"shape must be an instance of `list`, `tuple` or `Variable`,"
" got '{}.'".format(type(shape))
)
return dygraph_utils._append_activation_in_dygraph(out, act)
else:
if _in_legacy_dygraph():
tmp_tensor_type = Variable
if inplace:
warnings.warn(
"Inplace on reshape is not allowed and will be discarded in dygraph mode currently."
)
if isinstance(shape, (list, tuple)):
shape = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in shape
]
out, _ = _legacy_C_ops.reshape2(x, None, 'shape', shape)
elif isinstance(shape, tmp_tensor_type):
shape.stop_gradient = True
out, _ = _legacy_C_ops.reshape2(x, shape)
else:
raise ValueError(
"shape must be an instance of `list`, `tuple` or `Variable`,"
" got '{}.'".format(type(shape))
)
return dygraph_utils._append_activation_in_dygraph(out, act)
check_variable_and_dtype(
x,
'x',
[
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'bool',
'uint16',
],
'reshape',
)
check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
helper = LayerHelper("reshape2", **locals())
def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one dimension value of 'shape' in reshape can "
"be -1. But received shape[%d] is also -1.\n"
"\n\t# N = x.shape()[2]\t\t# N is an int. "
"(NOT recommend under @to_static)\n\tN = paddle.shape(x)[2]\t\t"
"# N is a Tensor. (Recommend)\n\tz = paddle.reshape([N, -1, 4])"
"\t# z.shape is [-1, -1, 4]\n\n"
" If your target shape in Reshape represents dynamic shape, "
"please turn it into a Tensor under @to_static. See above example for details."
% dim_idx
)
unk_dim_idx = dim_idx
elif dim_size == 0:
assert dim_idx < len(x.shape), (
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d."
% (dim_idx, len(x.shape))
)
else:
assert dim_size > 0, (
"Each dimension value of 'shape' in reshape must not "
"be negative except one unknown dimension. "
"But received shape[%d] = %s."
% (dim_idx, str(dim_size))
)
return attrs_shape
inputs = {"X": x}
attrs = {}
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of 'shape' in reshape can't be zero, "
"but received %s." % len(shape)
)
attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape)
elif isinstance(actual_shape, Variable):
actual_shape.stop_gradient = True
inputs["Shape"] = actual_shape
out = (
x
if inplace
else helper.create_variable_for_type_inference(dtype=x.dtype)
)
x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="reshape2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out, "XShape": x_shape},
)
return helper.append_activation(out)
def squeeze(input, axes, name=None): def squeeze(input, axes, name=None):
""" """
This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
......
...@@ -1036,7 +1036,7 @@ class BeamSearchDecoder(Decoder): ...@@ -1036,7 +1036,7 @@ class BeamSearchDecoder(Decoder):
x, list(range(2, len(x.shape))) + [0, 1] x, list(range(2, len(x.shape))) + [0, 1]
) # [..., batch_size, beam_size] ) # [..., batch_size, beam_size]
# use 0 to copy to avoid wrong shape # use 0 to copy to avoid wrong shape
x = nn.reshape( x = paddle.reshape(
x, shape=[0] * (len(x.shape) - 2) + [-1] x, shape=[0] * (len(x.shape) - 2) + [-1]
) # [..., batch_size * beam_size] ) # [..., batch_size * beam_size]
x = nn.transpose( x = nn.transpose(
...@@ -1059,7 +1059,7 @@ class BeamSearchDecoder(Decoder): ...@@ -1059,7 +1059,7 @@ class BeamSearchDecoder(Decoder):
""" """
check_type(x, 'x', (Variable), 'BeamSearchDecoder._split_batch_beams') check_type(x, 'x', (Variable), 'BeamSearchDecoder._split_batch_beams')
# TODO: avoid fake shape in compile-time like tile_beam_merge_with_batch # TODO: avoid fake shape in compile-time like tile_beam_merge_with_batch
return nn.reshape(x, shape=[-1, self.beam_size] + list(x.shape[1:])) return paddle.reshape(x, shape=[-1, self.beam_size] + list(x.shape[1:]))
def _merge_batch_beams(self, x): def _merge_batch_beams(self, x):
r""" r"""
...@@ -1076,7 +1076,7 @@ class BeamSearchDecoder(Decoder): ...@@ -1076,7 +1076,7 @@ class BeamSearchDecoder(Decoder):
""" """
check_type(x, 'x', (Variable), 'BeamSearchDecoder._merge_batch_beams') check_type(x, 'x', (Variable), 'BeamSearchDecoder._merge_batch_beams')
# TODO: avoid fake shape in compile-time like tile_beam_merge_with_batch # TODO: avoid fake shape in compile-time like tile_beam_merge_with_batch
return nn.reshape(x, shape=[-1] + list(x.shape[2:])) return paddle.reshape(x, shape=[-1] + list(x.shape[2:]))
def _expand_to_beam_size(self, x): def _expand_to_beam_size(self, x):
r""" r"""
...@@ -1311,13 +1311,13 @@ class BeamSearchDecoder(Decoder): ...@@ -1311,13 +1311,13 @@ class BeamSearchDecoder(Decoder):
) )
# TODO: length penalty # TODO: length penalty
scores = log_probs scores = log_probs
scores = nn.reshape(scores, [-1, self.beam_size * self.vocab_size]) scores = paddle.reshape(scores, [-1, self.beam_size * self.vocab_size])
# TODO: add grad for topk then this beam search can be used to train # TODO: add grad for topk then this beam search can be used to train
topk_scores, topk_indices = paddle.topk(x=scores, k=self.beam_size) topk_scores, topk_indices = paddle.topk(x=scores, k=self.beam_size)
beam_indices = paddle.floor_divide(topk_indices, self.vocab_size_tensor) beam_indices = paddle.floor_divide(topk_indices, self.vocab_size_tensor)
token_indices = paddle.remainder(topk_indices, self.vocab_size_tensor) token_indices = paddle.remainder(topk_indices, self.vocab_size_tensor)
next_log_probs = self._gather( next_log_probs = self._gather(
nn.reshape(log_probs, [-1, self.beam_size * self.vocab_size]), paddle.reshape(log_probs, [-1, self.beam_size * self.vocab_size]),
topk_indices, topk_indices,
self.batch_size, self.batch_size,
) )
......
...@@ -1948,7 +1948,8 @@ def eye( ...@@ -1948,7 +1948,8 @@ def eye(
if batch_val <= 0: if batch_val <= 0:
raise TypeError("batch_shape should be a positive int list") raise TypeError("batch_shape should be a positive int list")
from .nn import reshape, expand from .nn import expand
from paddle import reshape
out = reshape(x=out, shape=re_shape) out = reshape(x=out, shape=re_shape)
out = expand(x=out, expand_times=expand_times) out = expand(x=out, expand_times=expand_times)
......
...@@ -16,6 +16,7 @@ import paddle ...@@ -16,6 +16,7 @@ import paddle
from . import layers from . import layers
from .data_feeder import check_variable_and_dtype, convert_dtype from .data_feeder import check_variable_and_dtype, convert_dtype
from ..utils import deprecated from ..utils import deprecated
import paddle
__all__ = [ __all__ = [
"simple_img_conv_pool", "simple_img_conv_pool",
...@@ -569,7 +570,7 @@ def scaled_dot_product_attention( ...@@ -569,7 +570,7 @@ def scaled_dot_product_attention(
# reshape the 3-D input: [batch_size, max_sequence_length, hidden_dim] # reshape the 3-D input: [batch_size, max_sequence_length, hidden_dim]
# into a 4-D output: # into a 4-D output:
# [batch_size, max_sequence_length, num_heads, hidden_size_per_head]. # [batch_size, max_sequence_length, num_heads, hidden_size_per_head].
reshaped = layers.reshape( reshaped = paddle.reshape(
x=x, x=x,
shape=list(x.shape[:-1]) + [num_heads, hidden_size // num_heads], shape=list(x.shape[:-1]) + [num_heads, hidden_size // num_heads],
) )
...@@ -598,7 +599,7 @@ def scaled_dot_product_attention( ...@@ -598,7 +599,7 @@ def scaled_dot_product_attention(
raise ValueError("Input(x) should be a 4-D Tensor.") raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3]) trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
return layers.reshape( return paddle.reshape(
x=trans_x, x=trans_x,
shape=list( shape=list(
map( map(
...@@ -622,12 +623,10 @@ def scaled_dot_product_attention( ...@@ -622,12 +623,10 @@ def scaled_dot_product_attention(
scaled_q = layers.scale(x=q, scale=key_dim_per_head**-0.5) scaled_q = layers.scale(x=q, scale=key_dim_per_head**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True) product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
weights = layers.reshape( x = paddle.reshape(x=product, shape=[-1, product.shape[-1]])
x=layers.reshape( x = paddle.nn.functional.softmax(x)
x=product, shape=[-1, product.shape[-1]], act="softmax" weights = paddle.reshape(x=x, shape=product.shape)
),
shape=product.shape,
)
if dropout_rate: if dropout_rate:
weights = layers.dropout( weights = layers.dropout(
weights, dropout_prob=dropout_rate, is_test=False weights, dropout_prob=dropout_rate, is_test=False
......
...@@ -140,7 +140,9 @@ def decoder_decode(context, is_sparse): ...@@ -140,7 +140,9 @@ def decoder_decode(context, is_sparse):
topk_scores, topk_indices = pd.topk(current_score, k=beam_size) topk_scores, topk_indices = pd.topk(current_score, k=beam_size)
# calculate accumulated scores after topk to reduce computation cost # calculate accumulated scores after topk to reduce computation cost
accu_scores = pd.elementwise_add( accu_scores = pd.elementwise_add(
x=pd.log(topk_scores), y=pd.reshape(pre_score, shape=[-1]), axis=0 x=pd.log(topk_scores),
y=paddle.reshape(pre_score, shape=[-1]),
axis=0,
) )
selected_ids, selected_scores = pd.beam_search( selected_ids, selected_scores = pd.beam_search(
pre_ids, pre_ids,
......
...@@ -55,7 +55,7 @@ class SimpleNet(Layer): ...@@ -55,7 +55,7 @@ class SimpleNet(Layer):
x_emb = self.word_embeddings(x1) x_emb = self.word_embeddings(x1)
fc = fluid.layers.matmul(x_emb, self.softmax_weight) fc = fluid.layers.matmul(x_emb, self.softmax_weight)
fc = fluid.layers.elementwise_add(fc, self.softmax_bias) fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
projection = fluid.layers.reshape(fc, shape=[-1, vocab_size]) projection = paddle.reshape(fc, shape=[-1, vocab_size])
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=y1, soft_label=False logits=projection, label=y1, soft_label=False
) )
...@@ -95,7 +95,7 @@ class BiasNet(Layer): ...@@ -95,7 +95,7 @@ class BiasNet(Layer):
def forward(self, args): def forward(self, args):
fc, x2 = args fc, x2 = args
fc = fluid.layers.elementwise_add(fc, self.softmax_bias) fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
projection = fluid.layers.reshape(fc, shape=[-1, vocab_size]) projection = paddle.reshape(fc, shape=[-1, vocab_size])
return projection, x2 return projection, x2
......
...@@ -59,7 +59,7 @@ class SimpleNet(Layer): ...@@ -59,7 +59,7 @@ class SimpleNet(Layer):
x_emb = self.word_embeddings(x1) x_emb = self.word_embeddings(x1)
fc = fluid.layers.matmul(x_emb, self.softmax_weight) fc = fluid.layers.matmul(x_emb, self.softmax_weight)
fc = fluid.layers.elementwise_add(fc, self.softmax_bias) fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
projection = fluid.layers.reshape(fc, shape=[-1, vocab_size]) projection = paddle.reshape(fc, shape=[-1, vocab_size])
projection = paddle.matmul(projection, self.word_embeddings.weight) projection = paddle.matmul(projection, self.word_embeddings.weight)
...@@ -106,7 +106,7 @@ class BiasNet(Layer): ...@@ -106,7 +106,7 @@ class BiasNet(Layer):
def forward(self, args): def forward(self, args):
fc, x2 = args fc, x2 = args
fc = fluid.layers.elementwise_add(fc, self.softmax_bias) fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
projection = fluid.layers.reshape(fc, shape=[-1, vocab_size]) projection = paddle.reshape(fc, shape=[-1, vocab_size])
return projection, x2 return projection, x2
......
...@@ -135,7 +135,7 @@ class SqueezeExcitation(fluid.dygraph.Layer): ...@@ -135,7 +135,7 @@ class SqueezeExcitation(fluid.dygraph.Layer):
def forward(self, input): def forward(self, input):
y = self._pool(input) y = self._pool(input)
y = fluid.layers.reshape(y, shape=[-1, self._num_channels]) y = paddle.reshape(y, shape=[-1, self._num_channels])
y = self._squeeze(y) y = self._squeeze(y)
y = self._excitation(y) y = self._excitation(y)
y = fluid.layers.elementwise_mul(x=input, y=y, axis=0) y = fluid.layers.elementwise_mul(x=input, y=y, axis=0)
...@@ -326,7 +326,7 @@ class SeResNeXt(fluid.dygraph.Layer): ...@@ -326,7 +326,7 @@ class SeResNeXt(fluid.dygraph.Layer):
for bottleneck_block in self.bottleneck_block_list: for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y) y = bottleneck_block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_output])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -325,16 +325,16 @@ class MultiHeadAttentionLayer(Layer): ...@@ -325,16 +325,16 @@ class MultiHeadAttentionLayer(Layer):
v = self._v_fc(values) v = self._v_fc(values)
# split head # split head
reshaped_q = fluid.layers.reshape( reshaped_q = paddle.reshape(
x=q, shape=[0, 0, self._n_head, self._d_key], inplace=False x=q, shape=[0, 0, self._n_head, self._d_key]
) )
transpose_q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3]) transpose_q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
reshaped_k = fluid.layers.reshape( reshaped_k = paddle.reshape(
x=k, shape=[0, 0, self._n_head, self._d_key], inplace=False x=k, shape=[0, 0, self._n_head, self._d_key]
) )
transpose_k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3]) transpose_k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = fluid.layers.reshape( reshaped_v = paddle.reshape(
x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False x=v, shape=[0, 0, self._n_head, self._d_value]
) )
transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3]) transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3])
...@@ -363,10 +363,9 @@ class MultiHeadAttentionLayer(Layer): ...@@ -363,10 +363,9 @@ class MultiHeadAttentionLayer(Layer):
if len(out.shape) != 4: if len(out.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.") raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = fluid.layers.transpose(out, perm=[0, 2, 1, 3]) trans_x = fluid.layers.transpose(out, perm=[0, 2, 1, 3])
final_out = fluid.layers.reshape( final_out = paddle.reshape(
x=trans_x, x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]], shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=False,
) )
# fc to output # fc to output
...@@ -839,8 +838,8 @@ class WrapDecoderLayer(Layer): ...@@ -839,8 +838,8 @@ class WrapDecoderLayer(Layer):
dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias
) )
dec_output_reshape = fluid.layers.reshape( dec_output_reshape = paddle.reshape(
dec_output, shape=[-1, dec_output.shape[-1]], inplace=False dec_output, shape=[-1, dec_output.shape[-1]]
) )
if self._weight_sharing: if self._weight_sharing:
......
...@@ -127,7 +127,7 @@ def train_network( ...@@ -127,7 +127,7 @@ def train_network(
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -154,7 +154,7 @@ def train_network( ...@@ -154,7 +154,7 @@ def train_network(
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -178,7 +178,7 @@ def train_network( ...@@ -178,7 +178,7 @@ def train_network(
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -1142,7 +1142,7 @@ def multi_head_attention( ...@@ -1142,7 +1142,7 @@ def multi_head_attention(
hidden_size = x.shape[-1] hidden_size = x.shape[-1]
# The value 0 in shape attr means copying the corresponding dimension # The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size. # size of the input as the output dimension size.
reshaped = layers.reshape( reshaped = paddle.reshape(
x=x, shape=[0, 0, n_head, hidden_size // n_head] x=x, shape=[0, 0, n_head, hidden_size // n_head]
) )
...@@ -1163,7 +1163,7 @@ def multi_head_attention( ...@@ -1163,7 +1163,7 @@ def multi_head_attention(
trans_x = layers.transpose(x, perm=[0, 2, 1, 3]) trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension # The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size. # size of the input as the output dimension size.
return layers.reshape( return paddle.reshape(
x=trans_x, x=trans_x,
shape=list(map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]])), shape=list(map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]])),
) )
...@@ -1585,7 +1585,7 @@ def transformer( ...@@ -1585,7 +1585,7 @@ def transformer(
) )
cost = layers.softmax_with_cross_entropy( cost = layers.softmax_with_cross_entropy(
logits=layers.reshape(predict, shape=[-1, trg_vocab_size]), logits=paddle.reshape(predict, shape=[-1, trg_vocab_size]),
label=label, label=label,
soft_label=True if label_smooth_eps else False, soft_label=True if label_smooth_eps else False,
) )
...@@ -1765,7 +1765,7 @@ def fast_decode( ...@@ -1765,7 +1765,7 @@ def fast_decode(
while_op = layers.While(cond) while_op = layers.While(cond)
# array states will be stored for each step. # array states will be stored for each step.
ids = layers.array_write( ids = layers.array_write(
layers.reshape(start_tokens, (-1, 1)), step_idx paddle.reshape(start_tokens, (-1, 1)), step_idx
) )
scores = layers.array_write(init_scores, step_idx) scores = layers.array_write(init_scores, step_idx)
# cell states will be overwrited at each step. # cell states will be overwrited at each step.
...@@ -1790,7 +1790,7 @@ def fast_decode( ...@@ -1790,7 +1790,7 @@ def fast_decode(
] ]
with while_op.block(): with while_op.block():
pre_ids = layers.array_read(array=ids, i=step_idx) pre_ids = layers.array_read(array=ids, i=step_idx)
pre_ids = layers.reshape(pre_ids, (-1, 1, 1)) pre_ids = paddle.reshape(pre_ids, (-1, 1, 1))
pre_scores = layers.array_read(array=scores, i=step_idx) pre_scores = layers.array_read(array=scores, i=step_idx)
# sequence_expand can gather sequences according to lod thus can be # sequence_expand can gather sequences according to lod thus can be
# used in beam search to sift states corresponding to selected ids. # used in beam search to sift states corresponding to selected ids.
...@@ -1830,14 +1830,14 @@ def fast_decode( ...@@ -1830,14 +1830,14 @@ def fast_decode(
enc_output=pre_enc_output, enc_output=pre_enc_output,
caches=pre_caches, caches=pre_caches,
) )
logits = layers.reshape(logits, (-1, trg_vocab_size)) logits = paddle.reshape(logits, (-1, trg_vocab_size))
topk_scores, topk_indices = layers.topk( topk_scores, topk_indices = layers.topk(
input=layers.softmax(logits), k=beam_size input=layers.softmax(logits), k=beam_size
) )
accu_scores = layers.elementwise_add( accu_scores = layers.elementwise_add(
x=layers.log(topk_scores), x=layers.log(topk_scores),
y=layers.reshape(pre_scores, shape=[-1]), y=paddle.reshape(pre_scores, shape=[-1]),
axis=0, axis=0,
) )
# beam_search op uses lod to distinguish branches. # beam_search op uses lod to distinguish branches.
......
...@@ -295,7 +295,7 @@ class BertModelLayer(Layer): ...@@ -295,7 +295,7 @@ class BertModelLayer(Layer):
input=enc_output, axes=[1], starts=[0], ends=[1] input=enc_output, axes=[1], starts=[0], ends=[1]
) )
next_sent_feat = self.pooled_fc(next_sent_feat) next_sent_feat = self.pooled_fc(next_sent_feat)
next_sent_feat = fluid.layers.reshape( next_sent_feat = paddle.reshape(
next_sent_feat, shape=[-1, self._emb_size] next_sent_feat, shape=[-1, self._emb_size]
) )
...@@ -391,7 +391,7 @@ class PretrainModelLayer(Layer): ...@@ -391,7 +391,7 @@ class PretrainModelLayer(Layer):
enc_output, next_sent_feat = self.bert_layer( enc_output, next_sent_feat = self.bert_layer(
src_ids, position_ids, sentence_ids, input_mask src_ids, position_ids, sentence_ids, input_mask
) )
reshaped_emb_out = fluid.layers.reshape( reshaped_emb_out = paddle.reshape(
x=enc_output, shape=[-1, self._emb_size] x=enc_output, shape=[-1, self._emb_size]
) )
......
...@@ -173,7 +173,7 @@ def nested_if_else(x_v): ...@@ -173,7 +173,7 @@ def nested_if_else(x_v):
def nested_if_else_2(x): def nested_if_else_2(x):
y = fluid.layers.reshape(x, [-1, 1]) y = paddle.reshape(x, [-1, 1])
b = 2 b = 2
if b < 1: if b < 1:
# var `z` is not visible for outer scope # var `z` is not visible for outer scope
...@@ -196,7 +196,7 @@ def nested_if_else_2(x): ...@@ -196,7 +196,7 @@ def nested_if_else_2(x):
def nested_if_else_3(x): def nested_if_else_3(x):
y = fluid.layers.reshape(x, [-1, 1]) y = paddle.reshape(x, [-1, 1])
b = 2 b = 2
# var `z` is visible for func.body # var `z` is visible for func.body
if b < 1: if b < 1:
......
...@@ -179,10 +179,10 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -179,10 +179,10 @@ class BaseModel(fluid.dygraph.Layer):
return fluid.layers.transpose(x, [1, 0] + list(range(2, len(x.shape)))) return fluid.layers.transpose(x, [1, 0] + list(range(2, len(x.shape))))
def _merge_batch_beams(self, x): def _merge_batch_beams(self, x):
return fluid.layers.reshape(x, shape=(-1, x.shape[2])) return paddle.reshape(x, shape=(-1, x.shape[2]))
def _split_batch_beams(self, x): def _split_batch_beams(self, x):
return fluid.layers.reshape(x, shape=(-1, self.beam_size, x.shape[1])) return paddle.reshape(x, shape=(-1, self.beam_size, x.shape[1]))
def _expand_to_beam_size(self, x): def _expand_to_beam_size(self, x):
x = fluid.layers.unsqueeze(x, [1]) x = fluid.layers.unsqueeze(x, [1])
...@@ -454,7 +454,7 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -454,7 +454,7 @@ class BaseModel(fluid.dygraph.Layer):
log_probs = fluid.layers.elementwise_add( log_probs = fluid.layers.elementwise_add(
x=step_log_probs, y=beam_state_log_probs, axis=0 x=step_log_probs, y=beam_state_log_probs, axis=0
) )
scores = fluid.layers.reshape( scores = paddle.reshape(
log_probs, [-1, self.beam_size * self.tar_vocab_size] log_probs, [-1, self.beam_size * self.tar_vocab_size]
) )
topk_scores, topk_indices = fluid.layers.topk( topk_scores, topk_indices = fluid.layers.topk(
...@@ -646,7 +646,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -646,7 +646,7 @@ class AttentionModel(fluid.dygraph.Layer):
return fluid.layers.transpose(x, [1, 0] + list(range(2, len(x.shape)))) return fluid.layers.transpose(x, [1, 0] + list(range(2, len(x.shape))))
def _merge_batch_beams(self, x): def _merge_batch_beams(self, x):
return fluid.layers.reshape(x, shape=(-1, x.shape[2])) return paddle.reshape(x, shape=(-1, x.shape[2]))
def tile_beam_merge_with_batch(self, x): def tile_beam_merge_with_batch(self, x):
x = fluid.layers.unsqueeze(x, [1]) # [batch_size, 1, ...] x = fluid.layers.unsqueeze(x, [1]) # [batch_size, 1, ...]
...@@ -657,7 +657,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -657,7 +657,7 @@ class AttentionModel(fluid.dygraph.Layer):
x, list(range(2, len(x.shape))) + [0, 1] x, list(range(2, len(x.shape))) + [0, 1]
) # [..., batch_size, beam_size] ) # [..., batch_size, beam_size]
# use 0 to copy to avoid wrong shape # use 0 to copy to avoid wrong shape
x = fluid.layers.reshape( x = paddle.reshape(
x, shape=[0] * (len(x.shape) - 2) + [-1] x, shape=[0] * (len(x.shape) - 2) + [-1]
) # [..., batch_size * beam_size] ) # [..., batch_size * beam_size]
x = fluid.layers.transpose( x = fluid.layers.transpose(
...@@ -666,7 +666,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -666,7 +666,7 @@ class AttentionModel(fluid.dygraph.Layer):
return x return x
def _split_batch_beams(self, x): def _split_batch_beams(self, x):
return fluid.layers.reshape(x, shape=(-1, self.beam_size, x.shape[1])) return paddle.reshape(x, shape=(-1, self.beam_size, x.shape[1]))
def _expand_to_beam_size(self, x): def _expand_to_beam_size(self, x):
x = fluid.layers.unsqueeze(x, [1]) x = fluid.layers.unsqueeze(x, [1])
......
...@@ -503,10 +503,10 @@ class BOW(Layer): ...@@ -503,10 +503,10 @@ class BOW(Layer):
# embedding layer # embedding layer
left_emb = self.emb_layer(left) left_emb = self.emb_layer(left)
right_emb = self.emb_layer(right) right_emb = self.emb_layer(right)
left_emb = fluid.layers.reshape( left_emb = paddle.reshape(
left_emb, shape=[-1, self.seq_len, self.bow_dim] left_emb, shape=[-1, self.seq_len, self.bow_dim]
) )
right_emb = fluid.layers.reshape( right_emb = paddle.reshape(
right_emb, shape=[-1, self.seq_len, self.bow_dim] right_emb, shape=[-1, self.seq_len, self.bow_dim]
) )
......
...@@ -284,9 +284,7 @@ class BMN(fluid.dygraph.Layer): ...@@ -284,9 +284,7 @@ class BMN(fluid.dygraph.Layer):
xp = paddle.nn.functional.relu(self.p_conv1(x)) xp = paddle.nn.functional.relu(self.p_conv1(x))
# BM layer # BM layer
xp = fluid.layers.matmul(xp, self.sample_mask) xp = fluid.layers.matmul(xp, self.sample_mask)
xp = fluid.layers.reshape( xp = paddle.reshape(xp, shape=[0, 0, -1, self.dscale, self.tscale])
xp, shape=[0, 0, -1, self.dscale, self.tscale]
)
xp = self.p_conv3d1(xp) xp = self.p_conv3d1(xp)
xp = fluid.layers.squeeze(xp, axes=[2]) xp = fluid.layers.squeeze(xp, axes=[2])
...@@ -319,12 +317,8 @@ def bmn_loss_func( ...@@ -319,12 +317,8 @@ def bmn_loss_func(
def tem_loss_func(pred_start, pred_end, gt_start, gt_end): def tem_loss_func(pred_start, pred_end, gt_start, gt_end):
def bi_loss(pred_score, gt_label): def bi_loss(pred_score, gt_label):
pred_score = fluid.layers.reshape( pred_score = paddle.reshape(x=pred_score, shape=[-1])
x=pred_score, shape=[-1], inplace=False gt_label = paddle.reshape(x=gt_label, shape=[-1])
)
gt_label = fluid.layers.reshape(
x=gt_label, shape=[-1], inplace=False
)
gt_label.stop_gradient = True gt_label.stop_gradient = True
pmask = fluid.layers.cast(x=(gt_label > 0.5), dtype=DATATYPE) pmask = fluid.layers.cast(x=(gt_label > 0.5), dtype=DATATYPE)
num_entries = fluid.layers.cast( num_entries = fluid.layers.cast(
......
...@@ -41,7 +41,7 @@ def func_error_in_compile_time(x): ...@@ -41,7 +41,7 @@ def func_error_in_compile_time(x):
@paddle.jit.to_static @paddle.jit.to_static
def func_error_in_compile_time_2(x): def func_error_in_compile_time_2(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
x = fluid.layers.reshape(x, shape=[1, 2]) x = paddle.reshape(x, shape=[1, 2])
return x return x
...@@ -49,7 +49,7 @@ def func_error_in_compile_time_2(x): ...@@ -49,7 +49,7 @@ def func_error_in_compile_time_2(x):
def func_error_in_runtime(x): def func_error_in_runtime(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32") two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")
x = fluid.layers.reshape(x, shape=[1, two]) x = paddle.reshape(x, shape=[1, two])
return x return x
...@@ -101,7 +101,7 @@ def func_error_in_runtime_with_empty_line(x): ...@@ -101,7 +101,7 @@ def func_error_in_runtime_with_empty_line(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32") two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")
x = fluid.layers.reshape(x, shape=[1, two]) x = paddle.reshape(x, shape=[1, two])
return x return x
...@@ -290,7 +290,7 @@ class TestErrorStaticLayerCallInCompiletime_2( ...@@ -290,7 +290,7 @@ class TestErrorStaticLayerCallInCompiletime_2(
), ),
'def func_error_in_compile_time_2(x):', 'def func_error_in_compile_time_2(x):',
'x = fluid.dygraph.to_variable(x)', 'x = fluid.dygraph.to_variable(x)',
'x = fluid.layers.reshape(x, shape=[1, 2])', 'x = paddle.reshape(x, shape=[1, 2])',
'<--- HERE', '<--- HERE',
'return x', 'return x',
] ]
...@@ -340,7 +340,7 @@ class TestErrorStaticLayerCallInRuntime(TestErrorStaticLayerCallInCompiletime): ...@@ -340,7 +340,7 @@ class TestErrorStaticLayerCallInRuntime(TestErrorStaticLayerCallInCompiletime):
), ),
'x = fluid.dygraph.to_variable(x)', 'x = fluid.dygraph.to_variable(x)',
'two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")', 'two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")',
'x = fluid.layers.reshape(x, shape=[1, two])', 'x = paddle.reshape(x, shape=[1, two])',
'<--- HERE', '<--- HERE',
'return x', 'return x',
] ]
...@@ -356,7 +356,7 @@ class TestErrorStaticLayerCallInRuntime2(TestErrorStaticLayerCallInRuntime): ...@@ -356,7 +356,7 @@ class TestErrorStaticLayerCallInRuntime2(TestErrorStaticLayerCallInRuntime):
self.filepath self.filepath
), ),
'two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")', 'two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")',
'x = fluid.layers.reshape(x, shape=[1, two])', 'x = paddle.reshape(x, shape=[1, two])',
'<--- HERE', '<--- HERE',
'return x', 'return x',
] ]
......
...@@ -87,13 +87,9 @@ class DynamicGRU(fluid.dygraph.Layer): ...@@ -87,13 +87,9 @@ class DynamicGRU(fluid.dygraph.Layer):
input_ = fluid.layers.slice( input_ = fluid.layers.slice(
inputs, axes=[1], starts=[j], ends=[j + 1] inputs, axes=[1], starts=[j], ends=[j + 1]
) )
input_ = fluid.layers.reshape( input_ = paddle.reshape(input_, [-1, input_.shape[2]])
input_, [-1, input_.shape[2]], inplace=False
)
hidden, reset, gate = self.gru_unit(input_, hidden) hidden, reset, gate = self.gru_unit(input_, hidden)
hidden_ = fluid.layers.reshape( hidden_ = paddle.reshape(hidden, [-1, 1, hidden.shape[1]])
hidden, [-1, 1, hidden.shape[1]], inplace=False
)
res.append(hidden_) res.append(hidden_)
if self.is_reverse: if self.is_reverse:
......
...@@ -125,7 +125,7 @@ class MNIST(fluid.dygraph.Layer): ...@@ -125,7 +125,7 @@ class MNIST(fluid.dygraph.Layer):
def inference(self, inputs): def inference(self, inputs):
x = self._simple_img_conv_pool_1(inputs) x = self._simple_img_conv_pool_1(inputs)
x = self._simple_img_conv_pool_2(x) x = self._simple_img_conv_pool_2(x)
x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape]) x = paddle.reshape(x, shape=[-1, self.pool_2_shape])
x = self._fc(x) x = self._fc(x)
return x return x
......
...@@ -271,7 +271,7 @@ class MobileNetV1(fluid.dygraph.Layer): ...@@ -271,7 +271,7 @@ class MobileNetV1(fluid.dygraph.Layer):
for dws in self.dwsl: for dws in self.dwsl:
y = dws(y) y = dws(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, 1024]) y = paddle.reshape(y, shape=[-1, 1024])
y = self.out(y) y = self.out(y)
return y return y
...@@ -438,7 +438,7 @@ class MobileNetV2(fluid.dygraph.Layer): ...@@ -438,7 +438,7 @@ class MobileNetV2(fluid.dygraph.Layer):
y = inv(y) y = inv(y)
y = self._conv9(y, if_act=True) y = self._conv9(y, if_act=True)
y = self._pool2d_avg(y) y = self._pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self._out_c]) y = paddle.reshape(y, shape=[-1, self._out_c])
y = self._fc(y) y = self._fc(y)
return y return y
......
...@@ -186,7 +186,7 @@ class GPT2LMHeadModel(fluid.dygraph.Layer): ...@@ -186,7 +186,7 @@ class GPT2LMHeadModel(fluid.dygraph.Layer):
@declarative @declarative
def forward(self, x): def forward(self, x):
x = fluid.layers.reshape(x, shape=[-1, 6]) x = paddle.reshape(x, shape=[-1, 6])
x1, x2, x3 = fluid.layers.split(input=x, dim=1, num_or_sections=3) x1, x2, x3 = fluid.layers.split(input=x, dim=1, num_or_sections=3)
return x1 return x1
......
...@@ -115,16 +115,16 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -115,16 +115,16 @@ class SimpleLSTMRNN(fluid.Layer):
) )
res.append(step_input) res.append(step_input)
real_res = fluid.layers.concat(res, 1) real_res = fluid.layers.concat(res, 1)
real_res = fluid.layers.reshape( real_res = paddle.reshape(
real_res, [-1, self._num_steps, self._hidden_size] real_res, [-1, self._num_steps, self._hidden_size]
) )
last_hidden = fluid.layers.concat(hidden_array, 1) last_hidden = fluid.layers.concat(hidden_array, 1)
last_hidden = fluid.layers.reshape( last_hidden = paddle.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size] last_hidden, shape=[-1, self._num_layers, self._hidden_size]
) )
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(cell_array, 1) last_cell = fluid.layers.concat(cell_array, 1)
last_cell = fluid.layers.reshape( last_cell = paddle.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size] last_cell, shape=[-1, self._num_layers, self._hidden_size]
) )
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
...@@ -189,17 +189,17 @@ class PtbModel(fluid.Layer): ...@@ -189,17 +189,17 @@ class PtbModel(fluid.Layer):
@declarative @declarative
def forward(self, input, label, init_hidden, init_cell): def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape( init_h = paddle.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size] init_hidden, shape=[self.num_layers, -1, self.hidden_size]
) )
init_c = fluid.layers.reshape( init_c = paddle.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size] init_cell, shape=[self.num_layers, -1, self.hidden_size]
) )
x_emb = self.embedding(input) x_emb = self.embedding(input)
x_emb = fluid.layers.reshape( x_emb = paddle.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size] x_emb, shape=[-1, self.num_steps, self.hidden_size]
) )
if self.dropout is not None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
...@@ -218,7 +218,7 @@ class PtbModel(fluid.Layer): ...@@ -218,7 +218,7 @@ class PtbModel(fluid.Layer):
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
) )
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss) loss = fluid.layers.reduce_sum(loss)
......
...@@ -41,7 +41,7 @@ class Policy(Layer): ...@@ -41,7 +41,7 @@ class Policy(Layer):
@declarative @declarative
def forward(self, x): def forward(self, x):
x = fluid.layers.reshape(x, shape=[1, 4]) x = paddle.reshape(x, shape=[1, 4])
x = self.affine1(x) x = self.affine1(x)
x = fluid.layers.dropout(x, self.dropout_ratio) x = fluid.layers.dropout(x, self.dropout_ratio)
x = fluid.layers.relu(x) x = fluid.layers.relu(x)
......
...@@ -211,7 +211,7 @@ class ResNet(fluid.dygraph.Layer): ...@@ -211,7 +211,7 @@ class ResNet(fluid.dygraph.Layer):
for bottleneck_block in self.bottleneck_block_list: for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y) y = bottleneck_block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_output])
pred = self.out(y) pred = self.out(y)
return pred return pred
......
...@@ -148,7 +148,7 @@ class SqueezeExcitation(fluid.dygraph.Layer): ...@@ -148,7 +148,7 @@ class SqueezeExcitation(fluid.dygraph.Layer):
def forward(self, input): def forward(self, input):
y = self._pool(input) y = self._pool(input)
y = fluid.layers.reshape(y, shape=[-1, self._num_channels]) y = paddle.reshape(y, shape=[-1, self._num_channels])
y = self._fc(y) y = self._fc(y)
y = self._excitation(y) y = self._excitation(y)
y = fluid.layers.elementwise_mul(x=input, y=y, axis=0) y = fluid.layers.elementwise_mul(x=input, y=y, axis=0)
...@@ -344,7 +344,7 @@ class SeResNeXt(fluid.dygraph.Layer): ...@@ -344,7 +344,7 @@ class SeResNeXt(fluid.dygraph.Layer):
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.dropout(y, dropout_prob=0.5, seed=100) y = fluid.layers.dropout(y, dropout_prob=0.5, seed=100)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_output])
out = self.out(y) out = self.out(y)
softmax_out = fluid.layers.softmax(out) softmax_out = fluid.layers.softmax(out)
......
...@@ -53,7 +53,7 @@ class SimpleConvPool(fluid.dygraph.Layer): ...@@ -53,7 +53,7 @@ class SimpleConvPool(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
x = paddle.tanh(self._conv2d(inputs)) x = paddle.tanh(self._conv2d(inputs))
x = fluid.layers.reduce_max(x, dim=-1) x = fluid.layers.reduce_max(x, dim=-1)
x = fluid.layers.reshape(x, shape=[self.batch_size, -1]) x = paddle.reshape(x, shape=[self.batch_size, -1])
return x return x
...@@ -92,12 +92,12 @@ class CNN(fluid.dygraph.Layer): ...@@ -92,12 +92,12 @@ class CNN(fluid.dygraph.Layer):
@declarative @declarative
def forward(self, inputs, label=None): def forward(self, inputs, label=None):
emb = self.embedding(inputs) emb = self.embedding(inputs)
o_np_mask = ( o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype(
fluid.layers.reshape(inputs, [-1, 1]) != self.dict_dim dtype='float32'
).astype(dtype='float32') )
mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim])
emb = emb * mask_emb emb = emb * mask_emb
emb = fluid.layers.reshape( emb = paddle.reshape(
emb, shape=[-1, self.channels, self.seq_len, self.hid_dim] emb, shape=[-1, self.channels, self.seq_len, self.hid_dim]
) )
conv_3 = self._simple_conv_pool_1(emb) conv_3 = self._simple_conv_pool_1(emb)
...@@ -138,12 +138,12 @@ class BOW(fluid.dygraph.Layer): ...@@ -138,12 +138,12 @@ class BOW(fluid.dygraph.Layer):
@declarative @declarative
def forward(self, inputs, label=None): def forward(self, inputs, label=None):
emb = self.embedding(inputs) emb = self.embedding(inputs)
o_np_mask = ( o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype(
fluid.layers.reshape(inputs, [-1, 1]) != self.dict_dim dtype='float32'
).astype(dtype='float32') )
mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim])
emb = emb * mask_emb emb = emb * mask_emb
emb = fluid.layers.reshape(emb, shape=[-1, self.seq_len, self.hid_dim]) emb = paddle.reshape(emb, shape=[-1, self.seq_len, self.hid_dim])
bow_1 = fluid.layers.reduce_sum(emb, dim=1) bow_1 = fluid.layers.reduce_sum(emb, dim=1)
bow_1 = paddle.tanh(bow_1) bow_1 = paddle.tanh(bow_1)
fc_1 = self._fc1(bow_1) fc_1 = self._fc1(bow_1)
...@@ -186,14 +186,12 @@ class GRU(fluid.dygraph.Layer): ...@@ -186,14 +186,12 @@ class GRU(fluid.dygraph.Layer):
@declarative @declarative
def forward(self, inputs, label=None): def forward(self, inputs, label=None):
emb = self.embedding(inputs) emb = self.embedding(inputs)
o_np_mask = ( o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype(
fluid.layers.reshape(inputs, [-1, 1]) != self.dict_dim 'float32'
).astype('float32') )
mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim])
emb = emb * mask_emb emb = emb * mask_emb
emb = fluid.layers.reshape( emb = paddle.reshape(emb, shape=[self.batch_size, -1, self.hid_dim])
emb, shape=[self.batch_size, -1, self.hid_dim]
)
fc_1 = self._fc1(emb) fc_1 = self._fc1(emb)
gru_hidden = self._gru(fc_1) gru_hidden = self._gru(fc_1)
gru_hidden = fluid.layers.reduce_max(gru_hidden, dim=1) gru_hidden = fluid.layers.reduce_max(gru_hidden, dim=1)
...@@ -242,14 +240,12 @@ class BiGRU(fluid.dygraph.Layer): ...@@ -242,14 +240,12 @@ class BiGRU(fluid.dygraph.Layer):
@declarative @declarative
def forward(self, inputs, label=None): def forward(self, inputs, label=None):
emb = self.embedding(inputs) emb = self.embedding(inputs)
o_np_mask = ( o_np_mask = (paddle.reshape(inputs, [-1, 1]) != self.dict_dim).astype(
fluid.layers.reshape(inputs, [-1, 1]) != self.dict_dim 'float32'
).astype('float32') )
mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim])
emb = emb * mask_emb emb = emb * mask_emb
emb = fluid.layers.reshape( emb = paddle.reshape(emb, shape=[self.batch_size, -1, self.hid_dim])
emb, shape=[self.batch_size, -1, self.hid_dim]
)
fc_1 = self._fc1(emb) fc_1 = self._fc1(emb)
gru_forward = self._gru_forward(fc_1) gru_forward = self._gru_forward(fc_1)
gru_backward = self._gru_backward(fc_1) gru_backward = self._gru_backward(fc_1)
......
...@@ -145,9 +145,9 @@ def train(conf_dict, to_static): ...@@ -145,9 +145,9 @@ def train(conf_dict, to_static):
) )
for left, pos_right, neg_right in train_loader(): for left, pos_right, neg_right in train_loader():
left = fluid.layers.reshape(left, shape=[-1, 1]) left = paddle.reshape(left, shape=[-1, 1])
pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1]) pos_right = paddle.reshape(pos_right, shape=[-1, 1])
neg_right = fluid.layers.reshape(neg_right, shape=[-1, 1]) neg_right = paddle.reshape(neg_right, shape=[-1, 1])
net.train() net.train()
global_step += 1 global_step += 1
left_feat, pos_score = net(left, pos_right) left_feat, pos_score = net(left, pos_right)
......
...@@ -22,7 +22,7 @@ from paddle.fluid.dygraph.jit import declarative ...@@ -22,7 +22,7 @@ from paddle.fluid.dygraph.jit import declarative
def dyfunc_tensor_shape_1(x): def dyfunc_tensor_shape_1(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
res = fluid.layers.reshape(x, shape=x.shape) res = paddle.reshape(x, shape=x.shape)
return res return res
...@@ -38,13 +38,13 @@ def dyfunc_tensor_shape_3(x): ...@@ -38,13 +38,13 @@ def dyfunc_tensor_shape_3(x):
# Transform y.shape but run y.shape actually because y is not Tensor # Transform y.shape but run y.shape actually because y is not Tensor
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
y = np.ones(5) y = np.ones(5)
res = fluid.layers.reshape(x, shape=y.shape) res = paddle.reshape(x, shape=y.shape)
return res return res
def dyfunc_tensor_shape_4(x): def dyfunc_tensor_shape_4(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
res = fluid.layers.reshape(x, shape=(-1, x.shape[0], len(x.shape))) res = paddle.reshape(x, shape=(-1, x.shape[0], len(x.shape)))
return res return res
...@@ -54,7 +54,7 @@ def dyfunc_tensor_shape_5(x): ...@@ -54,7 +54,7 @@ def dyfunc_tensor_shape_5(x):
# paddle.jit.dy2static.convert_var_shape(x)[0]))` # paddle.jit.dy2static.convert_var_shape(x)[0]))`
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
s = x.shape[0] s = x.shape[0]
res = fluid.layers.reshape(x, shape=(-1, s)) res = paddle.reshape(x, shape=(-1, s))
return res return res
...@@ -64,7 +64,7 @@ def dyfunc_tensor_shape_6(x): ...@@ -64,7 +64,7 @@ def dyfunc_tensor_shape_6(x):
# paddle.jit.dy2static.convert_var_shape(x)[0:]))` # paddle.jit.dy2static.convert_var_shape(x)[0:]))`
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
s = x.shape[0:] s = x.shape[0:]
res = fluid.layers.reshape(x, shape=s) res = paddle.reshape(x, shape=s)
return res return res
...@@ -103,7 +103,7 @@ def dyfunc_paddle_shape_api(x): ...@@ -103,7 +103,7 @@ def dyfunc_paddle_shape_api(x):
def dyfunc_with_if_1(x): def dyfunc_with_if_1(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
res = fluid.layers.reshape(x, [-1, 1]) res = paddle.reshape(x, [-1, 1])
x_shape_0 = x.shape[0] x_shape_0 = x.shape[0]
if x_shape_0 < 1: if x_shape_0 < 1:
# `res.shape[0]` is transformed into # `res.shape[0]` is transformed into
......
...@@ -200,16 +200,16 @@ class TSM_ResNet(fluid.dygraph.Layer): ...@@ -200,16 +200,16 @@ class TSM_ResNet(fluid.dygraph.Layer):
@declarative @declarative
def forward(self, inputs): def forward(self, inputs):
y = fluid.layers.reshape(inputs, [-1] + self.reshape_list) y = paddle.reshape(inputs, [-1] + self.reshape_list)
y = self.conv(y) y = self.conv(y)
y = self.pool2d_max(y) y = self.pool2d_max(y)
for bottleneck_block in self.bottleneck_block_list: for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y) y = bottleneck_block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.dropout(y, dropout_prob=0.5) y = fluid.layers.dropout(y, dropout_prob=0.5)
y = fluid.layers.reshape(y, [-1, self.seg_num, y.shape[1]]) y = paddle.reshape(y, [-1, self.seg_num, y.shape[1]])
y = fluid.layers.reduce_mean(y, dim=1) y = fluid.layers.reduce_mean(y, dim=1)
y = fluid.layers.reshape(y, shape=[-1, 2048]) y = paddle.reshape(y, shape=[-1, 2048])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -138,11 +138,11 @@ class MultiHeadAttention(Layer): ...@@ -138,11 +138,11 @@ class MultiHeadAttention(Layer):
k = self.k_fc(keys) k = self.k_fc(keys)
v = self.v_fc(values) v = self.v_fc(values)
# split head # split head
q = layers.reshape(x=q, shape=[0, 0, self.n_head, self.d_key]) q = paddle.reshape(x=q, shape=[0, 0, self.n_head, self.d_key])
q = layers.transpose(x=q, perm=[0, 2, 1, 3]) q = layers.transpose(x=q, perm=[0, 2, 1, 3])
k = layers.reshape(x=k, shape=[0, 0, self.n_head, self.d_key]) k = paddle.reshape(x=k, shape=[0, 0, self.n_head, self.d_key])
k = layers.transpose(x=k, perm=[0, 2, 1, 3]) k = layers.transpose(x=k, perm=[0, 2, 1, 3])
v = layers.reshape(x=v, shape=[0, 0, self.n_head, self.d_value]) v = paddle.reshape(x=v, shape=[0, 0, self.n_head, self.d_value])
v = layers.transpose(x=v, perm=[0, 2, 1, 3]) v = layers.transpose(x=v, perm=[0, 2, 1, 3])
if cache is not None: if cache is not None:
...@@ -161,7 +161,7 @@ class MultiHeadAttention(Layer): ...@@ -161,7 +161,7 @@ class MultiHeadAttention(Layer):
weights = layers.dropout(weights, dropout_prob=self.dropout_rate) weights = layers.dropout(weights, dropout_prob=self.dropout_rate)
out = layers.matmul(weights, v) out = layers.matmul(weights, v)
out = layers.transpose(out, perm=[0, 2, 1, 3]) out = layers.transpose(out, perm=[0, 2, 1, 3])
out = layers.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]]) out = paddle.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])
out = self.proj_fc(out) out = self.proj_fc(out)
return out return out
...@@ -557,7 +557,7 @@ class WrapDecoder(Layer): ...@@ -557,7 +557,7 @@ class WrapDecoder(Layer):
dec_output = self.decoder( dec_output = self.decoder(
dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias, caches dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias, caches
) )
dec_output = layers.reshape( dec_output = paddle.reshape(
dec_output, dec_output,
shape=[-1, dec_output.shape[-1]], shape=[-1, dec_output.shape[-1]],
) )
...@@ -694,7 +694,7 @@ class Transformer(Layer): ...@@ -694,7 +694,7 @@ class Transformer(Layer):
max_len=256, max_len=256,
): ):
def expand_to_beam_size(tensor, beam_size): def expand_to_beam_size(tensor, beam_size):
tensor = layers.reshape( tensor = paddle.reshape(
tensor, [tensor.shape[0], 1] + list(tensor.shape[1:]) tensor, [tensor.shape[0], 1] + list(tensor.shape[1:])
) )
tile_dims = [1] * len(tensor.shape) tile_dims = [1] * len(tensor.shape)
...@@ -709,7 +709,7 @@ class Transformer(Layer): ...@@ -709,7 +709,7 @@ class Transformer(Layer):
+ list(range(0, var_dim_in_state)), + list(range(0, var_dim_in_state)),
) )
tensor = layers.reshape( tensor = paddle.reshape(
tensor, tensor,
[0] * (len(tensor.shape) - var_dim_in_state) [0] * (len(tensor.shape) - var_dim_in_state)
+ [batch_size * beam_size], + [batch_size * beam_size],
...@@ -733,7 +733,7 @@ class Transformer(Layer): ...@@ -733,7 +733,7 @@ class Transformer(Layer):
list(range(var_dim_in_state, len(tensor.shape))) list(range(var_dim_in_state, len(tensor.shape)))
+ list(range(0, var_dim_in_state)), + list(range(0, var_dim_in_state)),
) )
tensor = layers.reshape( tensor = paddle.reshape(
tensor, tensor,
[0] * (len(tensor.shape) - var_dim_in_state) [0] * (len(tensor.shape) - var_dim_in_state)
+ [batch_size, beam_size], + [batch_size, beam_size],
...@@ -849,7 +849,7 @@ class Transformer(Layer): ...@@ -849,7 +849,7 @@ class Transformer(Layer):
log_probs = layers.elementwise_add( log_probs = layers.elementwise_add(
x=step_log_probs, y=log_probs, axis=0 x=step_log_probs, y=log_probs, axis=0
) )
log_probs = layers.reshape( log_probs = paddle.reshape(
log_probs, [-1, beam_size * self.trg_vocab_size] log_probs, [-1, beam_size * self.trg_vocab_size]
) )
scores = log_probs scores = log_probs
...@@ -868,7 +868,7 @@ class Transformer(Layer): ...@@ -868,7 +868,7 @@ class Transformer(Layer):
finished = layers.logical_or( finished = layers.logical_or(
finished, layers.equal(token_indices, end_token_tensor) finished, layers.equal(token_indices, end_token_tensor)
) )
trg_word = layers.reshape(token_indices, [-1, 1]) trg_word = paddle.reshape(token_indices, [-1, 1])
predict_ids.append(token_indices) predict_ids.append(token_indices)
parent_ids.append(beam_indices) parent_ids.append(beam_indices)
......
...@@ -50,7 +50,7 @@ class TestBase(IPUOpTest): ...@@ -50,7 +50,7 @@ class TestBase(IPUOpTest):
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
) )
add = paddle.fluid.layers.elementwise_add(x, x) add = paddle.fluid.layers.elementwise_add(x, x)
out = paddle.fluid.layers.reshape(add, **self.attrs) out = paddle.reshape(add, **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
def run_model(self, exec_mode): def run_model(self, exec_mode):
......
...@@ -47,7 +47,7 @@ class TestBase(IPUOpTest): ...@@ -47,7 +47,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
) )
out = paddle.fluid.layers.reshape(x=x, **self.attrs) out = paddle.reshape(x=x, **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
def run_model(self, exec_mode): def run_model(self, exec_mode):
......
...@@ -59,7 +59,7 @@ class TestBase(IPUOpTest): ...@@ -59,7 +59,7 @@ class TestBase(IPUOpTest):
dtype=self.feed_dtype[0], dtype=self.feed_dtype[0],
) )
add1 = paddle.fluid.layers.elementwise_add(x, x) add1 = paddle.fluid.layers.elementwise_add(x, x)
reshape = paddle.fluid.layers.reshape(add1, **self.attrs) reshape = paddle.reshape(add1, **self.attrs)
add2 = paddle.fluid.layers.elementwise_add(reshape, reshape) add2 = paddle.fluid.layers.elementwise_add(reshape, reshape)
scale1 = paddle.fluid.layers.scale(add2) scale1 = paddle.fluid.layers.scale(add2)
scale2 = paddle.fluid.layers.scale(scale1, scale=1.3, bias=0.5) scale2 = paddle.fluid.layers.scale(scale1, scale=1.3, bias=0.5)
......
...@@ -17,6 +17,7 @@ import numpy as np ...@@ -17,6 +17,7 @@ import numpy as np
from inference_pass_test import InferencePassTest from inference_pass_test import InferencePassTest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
import paddle
class TestMKLDNNCpuBfloat16Pass(InferencePassTest): class TestMKLDNNCpuBfloat16Pass(InferencePassTest):
...@@ -27,7 +28,7 @@ class TestMKLDNNCpuBfloat16Pass(InferencePassTest): ...@@ -27,7 +28,7 @@ class TestMKLDNNCpuBfloat16Pass(InferencePassTest):
name='x', shape=[-1] + self.shape_x, dtype=self.d_type name='x', shape=[-1] + self.shape_x, dtype=self.d_type
) )
out = fluid.layers.transpose(x, perm=[0, 1, 2, 3]) out = fluid.layers.transpose(x, perm=[0, 1, 2, 3])
out = fluid.layers.reshape(out, [0, 0, 0, 0]) out = paddle.reshape(out, [0, 0, 0, 0])
out = fluid.layers.fc(out, size=1) out = fluid.layers.fc(out, size=1)
self.feeds = { self.feeds = {
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import unittest import unittest
import numpy as np import numpy as np
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from inference_pass_test import InferencePassTest from inference_pass_test import InferencePassTest
...@@ -37,9 +37,7 @@ class TestMKLDNNMatmulFuseOp(InferencePassTest): ...@@ -37,9 +37,7 @@ class TestMKLDNNMatmulFuseOp(InferencePassTest):
) )
out = fluid.layers.matmul(x, y) out = fluid.layers.matmul(x, y)
out = fluid.layers.transpose(out, perm=[0, 2, 1, 3]) out = fluid.layers.transpose(out, perm=[0, 2, 1, 3])
out = fluid.layers.reshape( out = paddle.reshape(out, [0, 0, self.shape_y[0] * self.shape_y[2]])
out, [0, 0, self.shape_y[0] * self.shape_y[2]]
)
out = fluid.layers.relu(out) out = fluid.layers.relu(out)
return out return out
...@@ -80,7 +78,7 @@ class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp): ...@@ -80,7 +78,7 @@ class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp):
) )
out = fluid.layers.matmul(x, y) out = fluid.layers.matmul(x, y)
out = fluid.layers.transpose(out, perm=[0, 1, 2, 3]) out = fluid.layers.transpose(out, perm=[0, 1, 2, 3])
out = fluid.layers.reshape(out, [0, 0, 0, 0]) out = paddle.reshape(out, [0, 0, 0, 0])
out = fluid.layers.fc(out, size=1) out = fluid.layers.fc(out, size=1)
return out return out
...@@ -106,9 +104,7 @@ class TestMKLDNNMatmulOpNotFusedBreakPattern(TestMKLDNNMatmulFuseOp): ...@@ -106,9 +104,7 @@ class TestMKLDNNMatmulOpNotFusedBreakPattern(TestMKLDNNMatmulFuseOp):
out = fluid.layers.transpose( out = fluid.layers.transpose(
out, perm=[0, 1, 2, 3] out, perm=[0, 1, 2, 3]
) # breaks pattern ) # breaks pattern
out = fluid.layers.reshape( out = paddle.reshape(out, [0, 0, self.shape_y[0] * self.shape_y[2]])
out, [0, 0, self.shape_y[0] * self.shape_y[2]]
)
out = fluid.layers.relu(out) out = fluid.layers.relu(out)
return out return out
......
...@@ -33,7 +33,7 @@ class TestReshapeTransposeMatmulV2OneDNNFusePass(InferencePassTest): ...@@ -33,7 +33,7 @@ class TestReshapeTransposeMatmulV2OneDNNFusePass(InferencePassTest):
weight = fluid.layers.create_parameter( weight = fluid.layers.create_parameter(
shape=self.weight_shape, dtype="float32" shape=self.weight_shape, dtype="float32"
) )
reshape = fluid.layers.reshape(data, shape=self.reshape_shape) reshape = paddle.reshape(data, shape=self.reshape_shape)
transpose = fluid.layers.transpose(reshape, self.tranpose_perm) transpose = fluid.layers.transpose(reshape, self.tranpose_perm)
matmul = paddle.matmul( matmul = paddle.matmul(
transpose, transpose,
......
...@@ -30,9 +30,9 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): ...@@ -30,9 +30,9 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
self.data = fluid.data( self.data = fluid.data(
name='data', shape=[1, 28, 28], dtype='float32' name='data', shape=[1, 28, 28], dtype='float32'
) )
data_reshape = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14]) data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14])
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
label_shape = fluid.layers.reshape(self.label, shape=[1, 1, 1]) label_shape = paddle.reshape(self.label, shape=[1, 1, 1])
conv_out = fluid.layers.conv2d( conv_out = fluid.layers.conv2d(
input=data_reshape, input=data_reshape,
num_filters=self.conv_num_filters, num_filters=self.conv_num_filters,
...@@ -44,13 +44,13 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): ...@@ -44,13 +44,13 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
act=None, act=None,
) )
if self.conv_padding == [1, 1]: if self.conv_padding == [1, 1]:
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816]) cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
elif self.conv_padding == 'VALID': elif self.conv_padding == 'VALID':
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 7744]) cout = paddle.reshape(conv_out, shape=[1, 1, 7744])
elif self.conv_padding == 'SAME': elif self.conv_padding == 'SAME':
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 12544]) cout = paddle.reshape(conv_out, shape=[1, 1, 12544])
elif self.conv_groups == 4: elif self.conv_groups == 4:
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816]) cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout) result = fluid.layers.relu(cout)
loss = fluid.layers.cross_entropy(input=result, label=label_shape) loss = fluid.layers.cross_entropy(input=result, label=label_shape)
avg_loss = paddle.mean(loss) avg_loss = paddle.mean(loss)
...@@ -140,9 +140,9 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): ...@@ -140,9 +140,9 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
self.data = fluid.data( self.data = fluid.data(
name='data', shape=[1, 28, 28], dtype='float32' name='data', shape=[1, 28, 28], dtype='float32'
) )
data_reshape = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14]) data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14])
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
label_shape = fluid.layers.reshape(self.label, shape=[1, 1, 1]) label_shape = paddle.reshape(self.label, shape=[1, 1, 1])
conv_out = fluid.layers.conv2d( conv_out = fluid.layers.conv2d(
input=data_reshape, input=data_reshape,
num_filters=self.conv_num_filters, num_filters=self.conv_num_filters,
...@@ -153,7 +153,7 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): ...@@ -153,7 +153,7 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
use_cudnn=self.use_cudnn, use_cudnn=self.use_cudnn,
act=None, act=None,
) )
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816]) cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout) result = fluid.layers.relu(cout)
loss = fluid.layers.cross_entropy(input=result, label=label_shape) loss = fluid.layers.cross_entropy(input=result, label=label_shape)
avg_loss = paddle.mean(loss) avg_loss = paddle.mean(loss)
...@@ -234,9 +234,9 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest): ...@@ -234,9 +234,9 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest):
self.data = fluid.data( self.data = fluid.data(
name='data', shape=[1, 28, 28], dtype='float32' name='data', shape=[1, 28, 28], dtype='float32'
) )
data_reshape = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14]) data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14])
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
label_shape = fluid.layers.reshape(self.label, shape=[1, 1, 1]) label_shape = paddle.reshape(self.label, shape=[1, 1, 1])
conv_out = fluid.layers.conv2d_transpose( conv_out = fluid.layers.conv2d_transpose(
input=data_reshape, input=data_reshape,
num_filters=self.conv_num_filters, num_filters=self.conv_num_filters,
...@@ -248,13 +248,13 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest): ...@@ -248,13 +248,13 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest):
act=None, act=None,
) )
if self.conv_padding == [1, 1]: if self.conv_padding == [1, 1]:
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 14400]) cout = paddle.reshape(conv_out, shape=[1, 1, 14400])
elif self.conv_padding == 'VALID': elif self.conv_padding == 'VALID':
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 18496]) cout = paddle.reshape(conv_out, shape=[1, 1, 18496])
elif self.conv_padding == 'SAME': elif self.conv_padding == 'SAME':
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 12544]) cout = paddle.reshape(conv_out, shape=[1, 1, 12544])
elif self.conv_groups == 4: elif self.conv_groups == 4:
cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816]) cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout) result = fluid.layers.relu(cout)
loss = fluid.layers.cross_entropy(input=result, label=label_shape) loss = fluid.layers.cross_entropy(input=result, label=label_shape)
avg_loss = paddle.mean(loss) avg_loss = paddle.mean(loss)
......
...@@ -102,7 +102,7 @@ class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest): ...@@ -102,7 +102,7 @@ class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest):
bias_attr=False, bias_attr=False,
act=None, act=None,
) )
c_out = fluid.layers.reshape(fc_out, shape=[0, 784]) c_out = paddle.reshape(fc_out, shape=[0, 784])
result = fluid.layers.relu(c_out) result = fluid.layers.relu(c_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label) loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = paddle.mean(loss) avg_loss = paddle.mean(loss)
...@@ -162,8 +162,8 @@ class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest): ...@@ -162,8 +162,8 @@ class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest):
name='data', shape=[1, 28, 28], dtype='float32' name='data', shape=[1, 28, 28], dtype='float32'
) )
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
label_shape = fluid.layers.reshape(self.label, shape=[1, 1, 1]) label_shape = paddle.reshape(self.label, shape=[1, 1, 1])
reshape_out = fluid.layers.reshape(self.data, shape=[1, 14, 14, 4]) reshape_out = paddle.reshape(self.data, shape=[1, 14, 14, 4])
fc_out = fluid.layers.fc( fc_out = fluid.layers.fc(
input=reshape_out, input=reshape_out,
size=14, size=14,
...@@ -171,7 +171,7 @@ class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest): ...@@ -171,7 +171,7 @@ class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest):
bias_attr=False, bias_attr=False,
act=None, act=None,
) )
c_out = fluid.layers.reshape(fc_out, shape=[1, 1, 2744]) c_out = paddle.reshape(fc_out, shape=[1, 1, 2744])
result = fluid.layers.relu(c_out) result = fluid.layers.relu(c_out)
loss = fluid.layers.cross_entropy(input=result, label=label_shape) loss = fluid.layers.cross_entropy(input=result, label=label_shape)
avg_loss = paddle.mean(loss) avg_loss = paddle.mean(loss)
......
...@@ -126,7 +126,7 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest): ...@@ -126,7 +126,7 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest):
name='data', shape=[1, 28, 28], dtype='float32' name='data', shape=[1, 28, 28], dtype='float32'
) )
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
reshape_out = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14]) reshape_out = paddle.reshape(self.data, shape=[1, 4, 14, 14])
matmul_out = fluid.layers.matmul( matmul_out = fluid.layers.matmul(
x=reshape_out, x=reshape_out,
y=reshape_out, y=reshape_out,
......
...@@ -22,6 +22,7 @@ from paddle.fluid.framework import in_dygraph_mode ...@@ -22,6 +22,7 @@ from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import AnalysisConfig
import paddle
def multiclass_nms( def multiclass_nms(
...@@ -235,7 +236,7 @@ class TensorRTMultiClassNMS3Test(InferencePassTest): ...@@ -235,7 +236,7 @@ class TensorRTMultiClassNMS3Test(InferencePassTest):
nms_eta=self.nms_eta, nms_eta=self.nms_eta,
) )
mutliclass_nms_out = multiclass_nms_out + 1.0 mutliclass_nms_out = multiclass_nms_out + 1.0
multiclass_nms_out = fluid.layers.reshape( multiclass_nms_out = paddle.reshape(
multiclass_nms_out, multiclass_nms_out,
[self.bs, 1, self.keep_top_k, 6], [self.bs, 1, self.keep_top_k, 6],
name='reshape', name='reshape',
......
...@@ -20,6 +20,7 @@ import paddle.fluid as fluid ...@@ -20,6 +20,7 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import AnalysisConfig
import paddle
class TensorRTMultiClassNMSTest(InferencePassTest): class TensorRTMultiClassNMSTest(InferencePassTest):
...@@ -62,7 +63,7 @@ class TensorRTMultiClassNMSTest(InferencePassTest): ...@@ -62,7 +63,7 @@ class TensorRTMultiClassNMSTest(InferencePassTest):
normalized=self.normalized, normalized=self.normalized,
) )
mutliclass_nms_out = multiclass_nms_out + 1.0 mutliclass_nms_out = multiclass_nms_out + 1.0
multiclass_nms_out = fluid.layers.reshape( multiclass_nms_out = paddle.reshape(
multiclass_nms_out, multiclass_nms_out,
[self.bs, 1, self.keep_top_k, 6], [self.bs, 1, self.keep_top_k, 6],
name='reshape', name='reshape',
......
...@@ -19,6 +19,7 @@ import paddle.fluid as fluid ...@@ -19,6 +19,7 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import AnalysisConfig
import paddle
class TRTReshapeTest(InferencePassTest): class TRTReshapeTest(InferencePassTest):
...@@ -48,7 +49,7 @@ class TRTReshapeTest(InferencePassTest): ...@@ -48,7 +49,7 @@ class TRTReshapeTest(InferencePassTest):
self.fetch_list = [out] self.fetch_list = [out]
def append_reshape(self, data, reshape): def append_reshape(self, data, reshape):
return fluid.layers.reshape(data, reshape) return paddle.reshape(data, reshape)
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
...@@ -101,7 +102,7 @@ class TRTReshapeTest2(TRTReshapeTest): ...@@ -101,7 +102,7 @@ class TRTReshapeTest2(TRTReshapeTest):
data = fluid.data( data = fluid.data(
name='data', shape=self.data_shape, dtype='float32' name='data', shape=self.data_shape, dtype='float32'
) )
reshape_out = fluid.layers.reshape(x=data, shape=self.reshape) reshape_out = paddle.reshape(x=data, shape=self.reshape)
out = fluid.layers.batch_norm(reshape_out, is_test=True) out = fluid.layers.batch_norm(reshape_out, is_test=True)
self.feeds = { self.feeds = {
'data': np.random.random(self.data_shape).astype('float32') 'data': np.random.random(self.data_shape).astype('float32')
......
...@@ -18,6 +18,7 @@ from inference_pass_test import InferencePassTest ...@@ -18,6 +18,7 @@ from inference_pass_test import InferencePassTest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import AnalysisConfig
import paddle
class ShuffleChannelFuseTRTPassTest(InferencePassTest): class ShuffleChannelFuseTRTPassTest(InferencePassTest):
...@@ -26,9 +27,9 @@ class ShuffleChannelFuseTRTPassTest(InferencePassTest): ...@@ -26,9 +27,9 @@ class ShuffleChannelFuseTRTPassTest(InferencePassTest):
data = fluid.data( data = fluid.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32" name="data", shape=[-1, 6, 64, 64], dtype="float32"
) )
reshape1 = fluid.layers.reshape(x=data, shape=[-1, 2, 3, 64, 64]) reshape1 = paddle.reshape(x=data, shape=[-1, 2, 3, 64, 64])
trans = fluid.layers.transpose(x=reshape1, perm=[0, 2, 1, 3, 4]) trans = fluid.layers.transpose(x=reshape1, perm=[0, 2, 1, 3, 4])
reshape2 = fluid.layers.reshape(x=trans, shape=[-1, 6, 64, 64]) reshape2 = paddle.reshape(x=trans, shape=[-1, 6, 64, 64])
out = fluid.layers.batch_norm(reshape2, is_test=True) out = fluid.layers.batch_norm(reshape2, is_test=True)
self.feeds = { self.feeds = {
......
...@@ -21,6 +21,7 @@ import paddle.fluid as fluid ...@@ -21,6 +21,7 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import AnalysisConfig
import paddle
class TensorRTSubgraphPassFcTest(InferencePassTest): class TensorRTSubgraphPassFcTest(InferencePassTest):
...@@ -30,7 +31,7 @@ class TensorRTSubgraphPassFcTest(InferencePassTest): ...@@ -30,7 +31,7 @@ class TensorRTSubgraphPassFcTest(InferencePassTest):
name="data", shape=[-1, 6, 64, 64], dtype="float32" name="data", shape=[-1, 6, 64, 64], dtype="float32"
) )
fc_out = fluid.layers.fc(input=[data], act=None, size=1000) fc_out = fluid.layers.fc(input=[data], act=None, size=1000)
reshape_out = fluid.layers.reshape(x=fc_out, shape=[1, 1000]) reshape_out = paddle.reshape(x=fc_out, shape=[1, 1000])
self.feeds = { self.feeds = {
"data": np.random.random([1, 6, 64, 64]).astype("float32"), "data": np.random.random([1, 6, 64, 64]).astype("float32"),
} }
......
...@@ -18,6 +18,7 @@ from inference_pass_test import InferencePassTest ...@@ -18,6 +18,7 @@ from inference_pass_test import InferencePassTest
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import AnalysisConfig
import paddle
class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): class TransposeFlattenConcatFusePassTRTTest(InferencePassTest):
...@@ -36,7 +37,7 @@ class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): ...@@ -36,7 +37,7 @@ class TransposeFlattenConcatFusePassTRTTest(InferencePassTest):
concat_out = fluid.layers.concat([flatt1, flatt2], axis=1) concat_out = fluid.layers.concat([flatt1, flatt2], axis=1)
# There is no parameters for above structure. # There is no parameters for above structure.
# Hence, append a batch_norm to avoid failure caused by load_combined. # Hence, append a batch_norm to avoid failure caused by load_combined.
reshape_out = fluid.layers.reshape(concat_out, [-1, 0, 1, 1]) reshape_out = paddle.reshape(concat_out, [-1, 0, 1, 1])
out = fluid.layers.batch_norm(reshape_out, is_test=True) out = fluid.layers.batch_norm(reshape_out, is_test=True)
self.feeds = { self.feeds = {
......
...@@ -100,7 +100,7 @@ class MNIST(fluid.dygraph.Layer): ...@@ -100,7 +100,7 @@ class MNIST(fluid.dygraph.Layer):
def forward(self, inputs, label): def forward(self, inputs, label):
x = self._simple_img_conv_pool_1(inputs) x = self._simple_img_conv_pool_1(inputs)
x = self._simple_img_conv_pool_2(x) x = self._simple_img_conv_pool_2(x)
x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape]) x = paddle.reshape(x, shape=[-1, self.pool_2_shape])
cost = self._fc(x) cost = self._fc(x)
loss = fluid.layers.cross_entropy(cost, label) loss = fluid.layers.cross_entropy(cost, label)
avg_loss = paddle.mean(loss) avg_loss = paddle.mean(loss)
......
...@@ -68,11 +68,11 @@ class SimpleNet(fluid.Layer): ...@@ -68,11 +68,11 @@ class SimpleNet(fluid.Layer):
x_emb = self.embedding(input) x_emb = self.embedding(input)
fc = fluid.layers.matmul(x_emb, self.softmax_weight) fc = fluid.layers.matmul(x_emb, self.softmax_weight)
fc = fluid.layers.elementwise_add(fc, self.softmax_bias) fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
projection = fluid.layers.reshape(fc, shape=[-1, self.vocab_size]) projection = paddle.reshape(fc, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
) )
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss) loss = fluid.layers.reduce_sum(loss)
......
...@@ -48,9 +48,7 @@ def squeeze_excitation(input, num_channels, reduction_ratio): ...@@ -48,9 +48,7 @@ def squeeze_excitation(input, num_channels, reduction_ratio):
# input=input, pool_size=0, pool_type='avg', global_pooling=True) # input=input, pool_size=0, pool_type='avg', global_pooling=True)
conv = input conv = input
shape = conv.shape shape = conv.shape
reshape = fluid.layers.reshape( reshape = paddle.reshape(x=conv, shape=[-1, shape[1], shape[2] * shape[3]])
x=conv, shape=[-1, shape[1], shape[2] * shape[3]]
)
pool = fluid.layers.reduce_mean(input=reshape, dim=2) pool = fluid.layers.reduce_mean(input=reshape, dim=2)
squeeze = fluid.layers.fc( squeeze = fluid.layers.fc(
...@@ -161,9 +159,7 @@ def SE_ResNeXt50Small(use_feed): ...@@ -161,9 +159,7 @@ def SE_ResNeXt50Small(use_feed):
) )
shape = conv.shape shape = conv.shape
reshape = fluid.layers.reshape( reshape = paddle.reshape(x=conv, shape=[-1, shape[1], shape[2] * shape[3]])
x=conv, shape=[-1, shape[1], shape[2] * shape[3]]
)
pool = fluid.layers.reduce_mean(input=reshape, dim=2) pool = fluid.layers.reduce_mean(input=reshape, dim=2)
dropout = ( dropout = (
pool pool
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
import paddle
def create_tensor(scope, name, np_data): def create_tensor(scope, name, np_data):
...@@ -312,7 +313,7 @@ class TestBeamSearchOpError(unittest.TestCase): ...@@ -312,7 +313,7 @@ class TestBeamSearchOpError(unittest.TestCase):
topk_scores, topk_indices = fluid.layers.topk(probs, k=4) topk_scores, topk_indices = fluid.layers.topk(probs, k=4)
accu_scores = fluid.layers.elementwise_add( accu_scores = fluid.layers.elementwise_add(
x=fluid.layers.log(x=topk_scores), x=fluid.layers.log(x=topk_scores),
y=fluid.layers.reshape(pre_scores, shape=[-1]), y=paddle.reshape(pre_scores, shape=[-1]),
axis=0, axis=0,
) )
......
...@@ -84,7 +84,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -84,7 +84,7 @@ class TestPSPassWithBow(unittest.TestCase):
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase):
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -147,7 +147,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -147,7 +147,7 @@ class TestPSPassWithBow(unittest.TestCase):
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -80,7 +80,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -108,7 +108,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -135,7 +135,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -135,7 +135,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -83,7 +83,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -83,7 +83,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -111,7 +111,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -111,7 +111,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -138,7 +138,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -86,7 +86,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -86,7 +86,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -145,7 +145,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -145,7 +145,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -85,7 +85,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -85,7 +85,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -145,7 +145,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -145,7 +145,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -84,7 +84,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -84,7 +84,7 @@ class TestPSPassWithBow(unittest.TestCase):
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase):
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -147,7 +147,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -147,7 +147,7 @@ class TestPSPassWithBow(unittest.TestCase):
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -82,7 +82,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -82,7 +82,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -112,7 +112,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -141,7 +141,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -141,7 +141,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -84,7 +84,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -84,7 +84,7 @@ class TestPSPassWithBow(unittest.TestCase):
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -116,7 +116,7 @@ class TestPSPassWithBow(unittest.TestCase):
), ),
is_sparse=is_sparse, is_sparse=is_sparse,
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -147,7 +147,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -147,7 +147,7 @@ class TestPSPassWithBow(unittest.TestCase):
), ),
is_sparse=False, is_sparse=False,
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -82,7 +82,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -82,7 +82,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
...@@ -112,7 +112,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
...@@ -141,7 +141,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -141,7 +141,7 @@ class TestPSPassWithBow(unittest.TestCase):
learning_rate=emb_lr, learning_rate=emb_lr,
), ),
) )
nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
......
...@@ -381,7 +381,7 @@ class TestFakeInit(TranspilerTest): ...@@ -381,7 +381,7 @@ class TestFakeInit(TranspilerTest):
), ),
) )
neg_word_reshape = fluid.layers.reshape(inputs[2], shape=[-1, 1]) neg_word_reshape = paddle.reshape(inputs[2], shape=[-1, 1])
neg_word_reshape.stop_gradient = True neg_word_reshape.stop_gradient = True
neg_emb_w = fluid.layers.embedding( neg_emb_w = fluid.layers.embedding(
...@@ -391,7 +391,7 @@ class TestFakeInit(TranspilerTest): ...@@ -391,7 +391,7 @@ class TestFakeInit(TranspilerTest):
param_attr=fluid.ParamAttr(name='emb_w', learning_rate=1.0), param_attr=fluid.ParamAttr(name='emb_w', learning_rate=1.0),
) )
neg_emb_w_re = fluid.layers.reshape( neg_emb_w_re = paddle.reshape(
neg_emb_w, shape=[-1, neg_num, embedding_size] neg_emb_w, shape=[-1, neg_num, embedding_size]
) )
...@@ -402,7 +402,7 @@ class TestFakeInit(TranspilerTest): ...@@ -402,7 +402,7 @@ class TestFakeInit(TranspilerTest):
param_attr=fluid.ParamAttr(name='emb_b', learning_rate=1.0), param_attr=fluid.ParamAttr(name='emb_b', learning_rate=1.0),
) )
neg_emb_b_vec = fluid.layers.reshape(neg_emb_b, shape=[-1, neg_num]) neg_emb_b_vec = paddle.reshape(neg_emb_b, shape=[-1, neg_num])
true_logits = fluid.layers.elementwise_add( true_logits = fluid.layers.elementwise_add(
fluid.layers.reduce_sum( fluid.layers.reduce_sum(
...@@ -413,14 +413,12 @@ class TestFakeInit(TranspilerTest): ...@@ -413,14 +413,12 @@ class TestFakeInit(TranspilerTest):
true_emb_b, true_emb_b,
) )
input_emb_re = fluid.layers.reshape( input_emb_re = paddle.reshape(input_emb, shape=[-1, 1, embedding_size])
input_emb, shape=[-1, 1, embedding_size]
)
neg_matmul = fluid.layers.matmul( neg_matmul = fluid.layers.matmul(
input_emb_re, neg_emb_w_re, transpose_y=True input_emb_re, neg_emb_w_re, transpose_y=True
) )
neg_matmul_re = fluid.layers.reshape(neg_matmul, shape=[-1, neg_num]) neg_matmul_re = paddle.reshape(neg_matmul, shape=[-1, neg_num])
neg_logits = fluid.layers.elementwise_add(neg_matmul_re, neg_emb_b_vec) neg_logits = fluid.layers.elementwise_add(neg_matmul_re, neg_emb_b_vec)
# nce loss # nce loss
label_ones = fluid.layers.fill_constant_batch_size_like( label_ones = fluid.layers.fill_constant_batch_size_like(
......
...@@ -115,7 +115,7 @@ class MNIST(fluid.dygraph.Layer): ...@@ -115,7 +115,7 @@ class MNIST(fluid.dygraph.Layer):
def forward(self, inputs, label): def forward(self, inputs, label):
x = paddle.nn.functional.relu(self._simple_img_conv_pool_1(inputs)) x = paddle.nn.functional.relu(self._simple_img_conv_pool_1(inputs))
x = paddle.nn.functional.relu(self._simple_img_conv_pool_2(x)) x = paddle.nn.functional.relu(self._simple_img_conv_pool_2(x))
x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape]) x = paddle.reshape(x, shape=[-1, self.pool_2_shape])
cost = self._linear(x) cost = self._linear(x)
loss = fluid.layers.cross_entropy(cost, label) loss = fluid.layers.cross_entropy(cost, label)
avg_loss = paddle.mean(loss) avg_loss = paddle.mean(loss)
......
...@@ -104,7 +104,7 @@ class MNIST(fluid.dygraph.Layer): ...@@ -104,7 +104,7 @@ class MNIST(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
x = self._simple_img_conv_pool_1(inputs) x = self._simple_img_conv_pool_1(inputs)
x = self._simple_img_conv_pool_2(x) x = self._simple_img_conv_pool_2(x)
x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape]) x = paddle.reshape(x, shape=[-1, self.pool_2_shape])
x = self._fc(x) x = self._fc(x)
return x return x
......
...@@ -149,8 +149,8 @@ def lm_model( ...@@ -149,8 +149,8 @@ def lm_model(
pre_cell = layers.slice( pre_cell = layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1] init_cell, axes=[0], starts=[i], ends=[i + 1]
) )
pre_hidden = layers.reshape(pre_hidden, shape=[-1, hidden_size]) pre_hidden = paddle.reshape(pre_hidden, shape=[-1, hidden_size])
pre_cell = layers.reshape(pre_cell, shape=[-1, hidden_size]) pre_cell = paddle.reshape(pre_cell, shape=[-1, hidden_size])
hidden_array.append(pre_hidden) hidden_array.append(pre_hidden)
cell_array.append(pre_cell) cell_array.append(pre_cell)
...@@ -270,12 +270,8 @@ def lm_model( ...@@ -270,12 +270,8 @@ def lm_model(
pre_cell = layers.slice( pre_cell = layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1] init_cell, axes=[0], starts=[i], ends=[i + 1]
) )
pre_hidden = layers.reshape( pre_hidden = paddle.reshape(pre_hidden, shape=[-1, hidden_size])
pre_hidden, shape=[-1, hidden_size], inplace=True pre_cell = paddle.reshape(pre_cell, shape=[-1, hidden_size])
)
pre_cell = layers.reshape(
pre_cell, shape=[-1, hidden_size], inplace=True
)
hidden_array.append(pre_hidden) hidden_array.append(pre_hidden)
cell_array.append(pre_cell) cell_array.append(pre_cell)
...@@ -286,7 +282,7 @@ def lm_model( ...@@ -286,7 +282,7 @@ def lm_model(
for index in range(len): for index in range(len):
input = sliced_inputs[index] input = sliced_inputs[index]
input = layers.reshape(input, shape=[-1, hidden_size], inplace=True) input = paddle.reshape(input, shape=[-1, hidden_size])
for k in range(num_layers): for k in range(num_layers):
pre_hidden = hidden_array[k] pre_hidden = hidden_array[k]
pre_cell = cell_array[k] pre_cell = cell_array[k]
...@@ -318,21 +314,19 @@ def lm_model( ...@@ -318,21 +314,19 @@ def lm_model(
res.append(input) res.append(input)
last_hidden = layers.concat(hidden_array, 1) last_hidden = layers.concat(hidden_array, 1)
last_hidden = layers.reshape( last_hidden = paddle.reshape(
last_hidden, shape=[-1, num_layers, hidden_size], inplace=True last_hidden, shape=[-1, num_layers, hidden_size]
) )
last_hidden = layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_hidden = layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = layers.concat(cell_array, 1) last_cell = layers.concat(cell_array, 1)
last_cell = layers.reshape( last_cell = paddle.reshape(
last_cell, shape=[-1, num_layers, hidden_size] last_cell, shape=[-1, num_layers, hidden_size]
) )
last_cell = layers.transpose(x=last_cell, perm=[1, 0, 2]) last_cell = layers.transpose(x=last_cell, perm=[1, 0, 2])
real_res = layers.concat(res, 0) real_res = layers.concat(res, 0)
real_res = layers.reshape( real_res = paddle.reshape(real_res, shape=[len, -1, hidden_size])
real_res, shape=[len, -1, hidden_size], inplace=True
)
real_res = layers.transpose(x=real_res, perm=[1, 0, 2]) real_res = layers.transpose(x=real_res, perm=[1, 0, 2])
return real_res, last_hidden, last_cell return real_res, last_hidden, last_cell
...@@ -367,10 +361,10 @@ def lm_model( ...@@ -367,10 +361,10 @@ def lm_model(
init_cell.persistable = True init_cell.persistable = True
init_hidden.persistable = True init_hidden.persistable = True
init_hidden_reshape = layers.reshape( init_hidden_reshape = paddle.reshape(
init_hidden, shape=[num_layers, -1, hidden_size] init_hidden, shape=[num_layers, -1, hidden_size]
) )
init_cell_reshape = layers.reshape( init_cell_reshape = paddle.reshape(
init_cell, shape=[num_layers, -1, hidden_size] init_cell, shape=[num_layers, -1, hidden_size]
) )
...@@ -387,9 +381,7 @@ def lm_model( ...@@ -387,9 +381,7 @@ def lm_model(
), ),
) )
x_emb = layers.reshape( x_emb = paddle.reshape(x_emb, shape=[-1, num_steps, hidden_size])
x_emb, shape=[-1, num_steps, hidden_size], inplace=True
)
if dropout is not None and dropout > 0.0: if dropout is not None and dropout > 0.0:
x_emb = layers.dropout( x_emb = layers.dropout(
x_emb, x_emb,
...@@ -447,9 +439,7 @@ def lm_model( ...@@ -447,9 +439,7 @@ def lm_model(
print("type not support") print("type not support")
return return
rnn_out = layers.reshape( rnn_out = paddle.reshape(rnn_out, shape=[-1, num_steps, hidden_size])
rnn_out, shape=[-1, num_steps, hidden_size], inplace=True
)
softmax_weight = layers.create_parameter( softmax_weight = layers.create_parameter(
[hidden_size, vocab_size], [hidden_size, vocab_size],
...@@ -470,15 +460,13 @@ def lm_model( ...@@ -470,15 +460,13 @@ def lm_model(
projection = layers.matmul(rnn_out, softmax_weight) projection = layers.matmul(rnn_out, softmax_weight)
projection = layers.elementwise_add(projection, softmax_bias) projection = layers.elementwise_add(projection, softmax_bias)
projection = layers.reshape( projection = paddle.reshape(projection, shape=[-1, vocab_size])
projection, shape=[-1, vocab_size], inplace=True
)
loss = layers.softmax_with_cross_entropy( loss = layers.softmax_with_cross_entropy(
logits=projection, label=y, soft_label=False logits=projection, label=y, soft_label=False
) )
loss = layers.reshape(loss, shape=[-1, num_steps], inplace=True) loss = paddle.reshape(loss, shape=[-1, num_steps])
loss = layers.reduce_mean(loss, dim=[0]) loss = layers.reduce_mean(loss, dim=[0])
loss = layers.reduce_sum(loss) loss = layers.reduce_sum(loss)
......
...@@ -107,8 +107,8 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -107,8 +107,8 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
with while_op.block(): with while_op.block():
d = layers.array_read(array=data_array, i=i) d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i) prev = layers.array_read(array=mem_array, i=i)
d = layers.reshape(d, shape=[10]) d = paddle.reshape(d, shape=[10])
prev = layers.reshape(prev, shape=[10]) prev = paddle.reshape(prev, shape=[10])
result = layers.sums(input=[d, prev]) result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True) i = layers.increment(x=i, in_place=True)
...@@ -117,8 +117,8 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -117,8 +117,8 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
with while_op2.block(): with while_op2.block():
d2 = layers.array_read(array=data_array, i=j) d2 = layers.array_read(array=data_array, i=j)
prev2 = layers.array_read(array=mem_array, i=j) prev2 = layers.array_read(array=mem_array, i=j)
d2 = layers.reshape(d2, shape=[10]) d2 = paddle.reshape(d2, shape=[10])
prev2 = layers.reshape(prev2, shape=[10]) prev2 = paddle.reshape(prev2, shape=[10])
result2 = layers.sums(input=[d2, prev2]) result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True) j = layers.increment(x=j, in_place=True)
......
...@@ -52,7 +52,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase): ...@@ -52,7 +52,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase):
x = fluid.layers.concat([x_1, x_2], axis=-1) x = fluid.layers.concat([x_1, x_2], axis=-1)
for _ in range(self.reshape_times): for _ in range(self.reshape_times):
x = fluid.layers.reshape(x, [-1, 1]) x = paddle.reshape(x, [-1, 1])
x.stop_gradient = stop_gradient x.stop_gradient = stop_gradient
......
...@@ -54,7 +54,7 @@ def simple_depthwise_net(use_feed): ...@@ -54,7 +54,7 @@ def simple_depthwise_net(use_feed):
assert use_feed assert use_feed
img = fluid.layers.data(name='image', shape=[784], dtype='float32') img = fluid.layers.data(name='image', shape=[784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64') label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = fluid.layers.reshape(img, (-1, 1, 28, 28)) hidden = paddle.reshape(img, (-1, 1, 28, 28))
for _ in range(4): for _ in range(4):
hidden = sep_conv(hidden, channel=200, stride=2, filter=5) hidden = sep_conv(hidden, channel=200, stride=2, filter=5)
hidden = fluid.layers.relu(hidden) hidden = fluid.layers.relu(hidden)
......
...@@ -140,7 +140,7 @@ class SimpleRNN(fluid.Layer): ...@@ -140,7 +140,7 @@ class SimpleRNN(fluid.Layer):
input = fluid.layers.slice( input = fluid.layers.slice(
inputs, axes=[1], starts=[i], ends=[i + 1] inputs, axes=[1], starts=[i], ends=[i + 1]
) )
input = fluid.layers.reshape(input, shape=[1, 3]) input = paddle.reshape(input, shape=[1, 3])
out_softmax, pre_hidden = self._cell(input, pre_hidden) out_softmax, pre_hidden = self._cell(input, pre_hidden)
outs.append(out_softmax) outs.append(out_softmax)
...@@ -739,15 +739,11 @@ class TestImperative(unittest.TestCase): ...@@ -739,15 +739,11 @@ class TestImperative(unittest.TestCase):
) )
a = fluid.layers.expand( a = fluid.layers.expand(
fluid.layers.reshape( paddle.reshape(fluid.layers.reduce_sum(inp_data1), [1, 1]),
fluid.layers.reduce_sum(inp_data1), [1, 1]
),
[4, 1], [4, 1],
) )
b = fluid.layers.expand( b = fluid.layers.expand(
fluid.layers.reshape( paddle.reshape(fluid.layers.reduce_sum(inp_data2), [1, 1]),
fluid.layers.reduce_sum(inp_data2), [1, 1]
),
[4, 1], [4, 1],
) )
cond = fluid.layers.less_than(x=a, y=b) cond = fluid.layers.less_than(x=a, y=b)
...@@ -796,7 +792,7 @@ class TestImperative(unittest.TestCase): ...@@ -796,7 +792,7 @@ class TestImperative(unittest.TestCase):
np_inp = np_inp.astype(np.float32) np_inp = np_inp.astype(np.float32)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
var_inp = paddle.to_tensor(np_inp) var_inp = paddle.to_tensor(np_inp)
var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) var_inp = paddle.reshape(var_inp, shape=[1, 4, 3])
simple_rnn = SimpleRNN() simple_rnn = SimpleRNN()
outs, pre_hiddens = simple_rnn.forward(var_inp) outs, pre_hiddens = simple_rnn.forward(var_inp)
dy_out = outs[3].numpy() dy_out = outs[3].numpy()
...@@ -807,7 +803,7 @@ class TestImperative(unittest.TestCase): ...@@ -807,7 +803,7 @@ class TestImperative(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
var_inp2 = paddle.to_tensor(np_inp) var_inp2 = paddle.to_tensor(np_inp)
var_inp2 = fluid.layers.reshape(var_inp2, shape=[1, 4, 3]) var_inp2 = paddle.reshape(var_inp2, shape=[1, 4, 3])
simple_rnn2 = SimpleRNN() simple_rnn2 = SimpleRNN()
outs2, pre_hiddens2 = simple_rnn2.forward(var_inp2) outs2, pre_hiddens2 = simple_rnn2.forward(var_inp2)
dy_out2 = outs2[3].numpy() dy_out2 = outs2[3].numpy()
......
...@@ -92,7 +92,7 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -92,7 +92,7 @@ class TestDygraphGNN(unittest.TestCase):
model = GCN('test_gcn', 50) model = GCN('test_gcn', 50)
logits = model(features, adj) logits = model(features, adj)
logits = fluid.layers.reshape(logits, logits.shape[1:]) logits = paddle.reshape(logits, logits.shape[1:])
# In other example, it's nll with log_softmax. However, paddle's # In other example, it's nll with log_softmax. However, paddle's
# log_loss only supports binary classification now. # log_loss only supports binary classification now.
loss = fluid.layers.softmax_with_cross_entropy(logits, labels) loss = fluid.layers.softmax_with_cross_entropy(logits, labels)
...@@ -130,7 +130,7 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -130,7 +130,7 @@ class TestDygraphGNN(unittest.TestCase):
model = GCN('test_gcn', 50) model = GCN('test_gcn', 50)
logits = model(to_variable(features), to_variable(adj)) logits = model(to_variable(features), to_variable(adj))
logits = fluid.layers.reshape(logits, logits.shape[1:]) logits = paddle.reshape(logits, logits.shape[1:])
# In other example, it's nll with log_softmax. However, paddle's # In other example, it's nll with log_softmax. However, paddle's
# log_loss only supports binary classification now. # log_loss only supports binary classification now.
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
...@@ -158,7 +158,7 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -158,7 +158,7 @@ class TestDygraphGNN(unittest.TestCase):
model2 = GCN('test_gcn', 50) model2 = GCN('test_gcn', 50)
logits2 = model2(to_variable(features2), to_variable(adj2)) logits2 = model2(to_variable(features2), to_variable(adj2))
logits2 = fluid.layers.reshape(logits2, logits2.shape[1:]) logits2 = paddle.reshape(logits2, logits2.shape[1:])
# In other example, it's nll with log_softmax. However, paddle's # In other example, it's nll with log_softmax. However, paddle's
# log_loss only supports binary classification now. # log_loss only supports binary classification now.
loss2 = fluid.layers.softmax_with_cross_entropy( loss2 = fluid.layers.softmax_with_cross_entropy(
......
...@@ -66,13 +66,11 @@ class SimpleNet(fluid.Layer): ...@@ -66,13 +66,11 @@ class SimpleNet(fluid.Layer):
x_emb, fluid.layers.transpose(self.embedding.weight, perm=[1, 0]) x_emb, fluid.layers.transpose(self.embedding.weight, perm=[1, 0])
) )
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = fluid.layers.reshape( projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
projection, shape=[-1, self.vocab_size]
)
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
) )
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss) loss = fluid.layers.reduce_sum(loss)
......
...@@ -103,7 +103,7 @@ class MNIST(fluid.dygraph.Layer): ...@@ -103,7 +103,7 @@ class MNIST(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
x = self._simple_img_conv_pool_1(inputs) x = self._simple_img_conv_pool_1(inputs)
x = self._simple_img_conv_pool_2(x) x = self._simple_img_conv_pool_2(x)
x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape]) x = paddle.reshape(x, shape=[-1, self.pool_2_shape])
x = self._fc(x) x = self._fc(x)
return x return x
......
...@@ -195,13 +195,9 @@ class DynamicGRU(fluid.dygraph.Layer): ...@@ -195,13 +195,9 @@ class DynamicGRU(fluid.dygraph.Layer):
input_ = fluid.layers.slice( input_ = fluid.layers.slice(
inputs, axes=[1], starts=[i], ends=[i + 1] inputs, axes=[1], starts=[i], ends=[i + 1]
) )
input_ = fluid.layers.reshape( input_ = paddle.reshape(input_, [-1, input_.shape[2]])
input_, [-1, input_.shape[2]], inplace=False
)
hidden, reset, gate = self.gru_unit(input_, hidden) hidden, reset, gate = self.gru_unit(input_, hidden)
hidden_ = fluid.layers.reshape( hidden_ = paddle.reshape(hidden, [-1, 1, hidden.shape[1]])
hidden, [-1, 1, hidden.shape[1]], inplace=False
)
if self.is_reverse: if self.is_reverse:
res = [hidden_] + res res = [hidden_] + res
else: else:
...@@ -271,7 +267,7 @@ class EncoderNet(fluid.dygraph.Layer): ...@@ -271,7 +267,7 @@ class EncoderNet(fluid.dygraph.Layer):
transpose_conv_features = fluid.layers.transpose( transpose_conv_features = fluid.layers.transpose(
conv_features, perm=[0, 3, 1, 2] conv_features, perm=[0, 3, 1, 2]
) )
sliced_feature = fluid.layers.reshape( sliced_feature = paddle.reshape(
transpose_conv_features, transpose_conv_features,
[ [
-1, -1,
...@@ -279,7 +275,6 @@ class EncoderNet(fluid.dygraph.Layer): ...@@ -279,7 +275,6 @@ class EncoderNet(fluid.dygraph.Layer):
transpose_conv_features.shape[2] transpose_conv_features.shape[2]
* transpose_conv_features.shape[3], * transpose_conv_features.shape[3],
], ],
inplace=False,
) )
fc_1 = self.fc_1_layer(sliced_feature) fc_1 = self.fc_1_layer(sliced_feature)
fc_2 = self.fc_2_layer(sliced_feature) fc_2 = self.fc_2_layer(sliced_feature)
...@@ -308,8 +303,8 @@ class SimpleAttention(fluid.dygraph.Layer): ...@@ -308,8 +303,8 @@ class SimpleAttention(fluid.dygraph.Layer):
def forward(self, encoder_vec, encoder_proj, decoder_state): def forward(self, encoder_vec, encoder_proj, decoder_state):
decoder_state_fc = self.fc_1(decoder_state) decoder_state_fc = self.fc_1(decoder_state)
decoder_state_proj_reshape = fluid.layers.reshape( decoder_state_proj_reshape = paddle.reshape(
decoder_state_fc, [-1, 1, decoder_state_fc.shape[1]], inplace=False decoder_state_fc, [-1, 1, decoder_state_fc.shape[1]]
) )
decoder_state_expand = fluid.layers.expand( decoder_state_expand = fluid.layers.expand(
decoder_state_proj_reshape, [1, encoder_proj.shape[1], 1] decoder_state_proj_reshape, [1, encoder_proj.shape[1], 1]
...@@ -320,10 +315,9 @@ class SimpleAttention(fluid.dygraph.Layer): ...@@ -320,10 +315,9 @@ class SimpleAttention(fluid.dygraph.Layer):
concated = paddle.tanh(x=concated) concated = paddle.tanh(x=concated)
attention_weight = self.fc_2(concated) attention_weight = self.fc_2(concated)
weights_reshape = fluid.layers.reshape( weights_reshape = paddle.reshape(
x=attention_weight, x=attention_weight,
shape=[attention_weight.shape[0], attention_weight.shape[1]], shape=[attention_weight.shape[0], attention_weight.shape[1]],
inplace=False,
) )
weights_reshape = fluid.layers.softmax(weights_reshape) weights_reshape = fluid.layers.softmax(weights_reshape)
...@@ -364,8 +358,8 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer): ...@@ -364,8 +358,8 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer):
current_word = fluid.layers.slice( current_word = fluid.layers.slice(
target_embedding, axes=[1], starts=[i], ends=[i + 1] target_embedding, axes=[1], starts=[i], ends=[i + 1]
) )
current_word = fluid.layers.reshape( current_word = paddle.reshape(
current_word, [-1, current_word.shape[2]], inplace=False current_word, [-1, current_word.shape[2]]
) )
context = self.simple_attention( context = self.simple_attention(
...@@ -407,17 +401,16 @@ class OCRAttention(fluid.dygraph.Layer): ...@@ -407,17 +401,16 @@ class OCRAttention(fluid.dygraph.Layer):
backward_first = fluid.layers.slice( backward_first = fluid.layers.slice(
gru_backward, axes=[1], starts=[0], ends=[1] gru_backward, axes=[1], starts=[0], ends=[1]
) )
backward_first = fluid.layers.reshape( backward_first = paddle.reshape(
backward_first, [-1, backward_first.shape[2]], inplace=False backward_first, [-1, backward_first.shape[2]]
) )
decoder_boot = self.fc(backward_first) decoder_boot = self.fc(backward_first)
label_in = fluid.layers.reshape(label_in, [-1], inplace=False) label_in = paddle.reshape(label_in, [-1])
trg_embedding = self.embedding(label_in) trg_embedding = self.embedding(label_in)
trg_embedding = fluid.layers.reshape( trg_embedding = paddle.reshape(
trg_embedding, trg_embedding,
[-1, Config.max_length, trg_embedding.shape[1]], [-1, Config.max_length, trg_embedding.shape[1]],
inplace=False,
) )
prediction = self.gru_decoder_with_attention( prediction = self.gru_decoder_with_attention(
...@@ -497,11 +490,9 @@ class TestDygraphOCRAttention(unittest.TestCase): ...@@ -497,11 +490,9 @@ class TestDygraphOCRAttention(unittest.TestCase):
label_out.stop_gradient = True label_out.stop_gradient = True
img = to_variable(image_np) img = to_variable(image_np)
dy_prediction = ocr_attention(img, label_in) dy_prediction = ocr_attention(img, label_in)
label_out = fluid.layers.reshape( label_out = paddle.reshape(label_out, [-1, 1])
label_out, [-1, 1], inplace=False dy_prediction = paddle.reshape(
) dy_prediction, [label_out.shape[0], -1]
dy_prediction = fluid.layers.reshape(
dy_prediction, [label_out.shape[0], -1], inplace=False
) )
loss = fluid.layers.cross_entropy( loss = fluid.layers.cross_entropy(
input=dy_prediction, label=label_out input=dy_prediction, label=label_out
...@@ -577,7 +568,7 @@ class TestDygraphOCRAttention(unittest.TestCase): ...@@ -577,7 +568,7 @@ class TestDygraphOCRAttention(unittest.TestCase):
static_prediction = ocr_attention(images, static_label_in) static_prediction = ocr_attention(images, static_label_in)
static_prediction = fluid.layers.reshape( static_prediction = paddle.reshape(
static_prediction, shape=[-1, Config.num_classes + 2] static_prediction, shape=[-1, Config.num_classes + 2]
) )
......
...@@ -141,7 +141,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -141,7 +141,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
label = data[1] label = data[1]
label.stop_gradient = True label.stop_gradient = True
img = fluid.layers.reshape(img, shape=[batch_size, -1]) img = paddle.reshape(img, shape=[batch_size, -1])
cost = mlp(img) cost = mlp(img)
avg_loss = fluid.layers.reduce_mean(cost) avg_loss = fluid.layers.reduce_mean(cost)
dy_out = avg_loss.numpy() dy_out = avg_loss.numpy()
...@@ -180,7 +180,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -180,7 +180,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
name='pixel', shape=[1, 28, 28], dtype='float32' name='pixel', shape=[1, 28, 28], dtype='float32'
) )
label = fluid.layers.data(name='label', shape=[1], dtype='int64') label = fluid.layers.data(name='label', shape=[1], dtype='int64')
img = fluid.layers.reshape(img, shape=[batch_size, 784]) img = paddle.reshape(img, shape=[batch_size, 784])
cost = mlp(img) cost = mlp(img)
avg_loss = fluid.layers.reduce_mean(cost) avg_loss = fluid.layers.reduce_mean(cost)
optimizer.minimize(avg_loss) optimizer.minimize(avg_loss)
......
...@@ -139,7 +139,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -139,7 +139,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
label.stop_gradient = True label.stop_gradient = True
img = fluid.layers.reshape(img, shape=[batch_size, -1]) img = paddle.reshape(img, shape=[batch_size, -1])
cost = mlp(img) cost = mlp(img)
avg_loss = fluid.layers.reduce_mean(cost) avg_loss = fluid.layers.reduce_mean(cost)
dy_out = avg_loss.numpy() dy_out = avg_loss.numpy()
...@@ -189,7 +189,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -189,7 +189,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
name='pixel', shape=[1, 28, 28], dtype='float32' name='pixel', shape=[1, 28, 28], dtype='float32'
) )
label = fluid.layers.data(name='label', shape=[1], dtype='int64') label = fluid.layers.data(name='label', shape=[1], dtype='int64')
img = fluid.layers.reshape(img, shape=[batch_size, 784]) img = paddle.reshape(img, shape=[batch_size, 784])
cost = mlp(img) cost = mlp(img)
avg_loss = fluid.layers.reduce_mean(cost) avg_loss = fluid.layers.reduce_mean(cost)
optimizer.minimize(avg_loss) optimizer.minimize(avg_loss)
......
...@@ -85,12 +85,10 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -85,12 +85,10 @@ class SimpleLSTMRNN(fluid.Layer):
pre_cell = fluid.layers.slice( pre_cell = fluid.layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1] init_cell, axes=[0], starts=[i], ends=[i + 1]
) )
pre_hidden = fluid.layers.reshape( pre_hidden = paddle.reshape(
pre_hidden, shape=[-1, self._hidden_size] pre_hidden, shape=[-1, self._hidden_size]
) )
pre_cell = fluid.layers.reshape( pre_cell = paddle.reshape(pre_cell, shape=[-1, self._hidden_size])
pre_cell, shape=[-1, self._hidden_size]
)
self.hidden_array.append(pre_hidden) self.hidden_array.append(pre_hidden)
self.cell_array.append(pre_cell) self.cell_array.append(pre_cell)
...@@ -99,7 +97,7 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -99,7 +97,7 @@ class SimpleLSTMRNN(fluid.Layer):
self._input = fluid.layers.slice( self._input = fluid.layers.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1] input_embedding, axes=[1], starts=[index], ends=[index + 1]
) )
self._input = fluid.layers.reshape( self._input = paddle.reshape(
self._input, shape=[-1, self._hidden_size] self._input, shape=[-1, self._hidden_size]
) )
for k in range(self._num_layers): for k in range(self._num_layers):
...@@ -130,19 +128,17 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -130,19 +128,17 @@ class SimpleLSTMRNN(fluid.Layer):
dropout_implementation='upscale_in_train', dropout_implementation='upscale_in_train',
) )
res.append( res.append(
fluid.layers.reshape( paddle.reshape(self._input, shape=[1, -1, self._hidden_size])
self._input, shape=[1, -1, self._hidden_size]
)
) )
real_res = fluid.layers.concat(res, 0) real_res = fluid.layers.concat(res, 0)
real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2])
last_hidden = fluid.layers.concat(self.hidden_array, 1) last_hidden = fluid.layers.concat(self.hidden_array, 1)
last_hidden = fluid.layers.reshape( last_hidden = paddle.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size] last_hidden, shape=[-1, self._num_layers, self._hidden_size]
) )
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(self.cell_array, 1) last_cell = fluid.layers.concat(self.cell_array, 1)
last_cell = fluid.layers.reshape( last_cell = paddle.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size] last_cell, shape=[-1, self._num_layers, self._hidden_size]
) )
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
...@@ -203,16 +199,16 @@ class PtbModel(fluid.Layer): ...@@ -203,16 +199,16 @@ class PtbModel(fluid.Layer):
) )
def forward(self, input, label, init_hidden, init_cell): def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape( init_h = paddle.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size] init_hidden, shape=[self.num_layers, -1, self.hidden_size]
) )
init_c = fluid.layers.reshape( init_c = paddle.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size] init_cell, shape=[self.num_layers, -1, self.hidden_size]
) )
x_emb = self.embedding(input) x_emb = self.embedding(input)
x_emb = fluid.layers.reshape( x_emb = paddle.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size] x_emb, shape=[-1, self.num_steps, self.hidden_size]
) )
if self.dropout is not None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
...@@ -224,18 +220,16 @@ class PtbModel(fluid.Layer): ...@@ -224,18 +220,16 @@ class PtbModel(fluid.Layer):
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn( rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(
x_emb, init_h, init_c x_emb, init_h, init_c
) )
rnn_out = fluid.layers.reshape( rnn_out = paddle.reshape(
rnn_out, shape=[-1, self.num_steps, self.hidden_size] rnn_out, shape=[-1, self.num_steps, self.hidden_size]
) )
projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = fluid.layers.reshape( projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
projection, shape=[-1, self.vocab_size]
)
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
) )
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss) loss = fluid.layers.reduce_sum(loss)
......
...@@ -36,7 +36,7 @@ class Policy(fluid.dygraph.Layer): ...@@ -36,7 +36,7 @@ class Policy(fluid.dygraph.Layer):
self.rewards = [] self.rewards = []
def forward(self, inputs): def forward(self, inputs):
x = fluid.layers.reshape(inputs, shape=[-1, 4]) x = paddle.reshape(inputs, shape=[-1, 4])
x = self.affine1(x) x = self.affine1(x)
x = fluid.layers.dropout(x, self.dropout_ratio) x = fluid.layers.dropout(x, self.dropout_ratio)
x = fluid.layers.relu(x) x = fluid.layers.relu(x)
......
...@@ -241,7 +241,7 @@ class ResNet(fluid.Layer): ...@@ -241,7 +241,7 @@ class ResNet(fluid.Layer):
for bottleneck_block in self.bottleneck_block_list: for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y) y = bottleneck_block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_output])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -80,12 +80,10 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -80,12 +80,10 @@ class SimpleLSTMRNN(fluid.Layer):
pre_cell = fluid.layers.slice( pre_cell = fluid.layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1] init_cell, axes=[0], starts=[i], ends=[i + 1]
) )
pre_hidden = fluid.layers.reshape( pre_hidden = paddle.reshape(
pre_hidden, shape=[-1, self._hidden_size] pre_hidden, shape=[-1, self._hidden_size]
) )
pre_cell = fluid.layers.reshape( pre_cell = paddle.reshape(pre_cell, shape=[-1, self._hidden_size])
pre_cell, shape=[-1, self._hidden_size]
)
self.hidden_array.append(pre_hidden) self.hidden_array.append(pre_hidden)
self.cell_array.append(pre_cell) self.cell_array.append(pre_cell)
...@@ -94,7 +92,7 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -94,7 +92,7 @@ class SimpleLSTMRNN(fluid.Layer):
self._input = fluid.layers.slice( self._input = fluid.layers.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1] input_embedding, axes=[1], starts=[index], ends=[index + 1]
) )
self._input = fluid.layers.reshape( self._input = paddle.reshape(
self._input, shape=[-1, self._hidden_size] self._input, shape=[-1, self._hidden_size]
) )
for k in range(self._num_layers): for k in range(self._num_layers):
...@@ -125,19 +123,17 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -125,19 +123,17 @@ class SimpleLSTMRNN(fluid.Layer):
dropout_implementation='upscale_in_train', dropout_implementation='upscale_in_train',
) )
res.append( res.append(
fluid.layers.reshape( paddle.reshape(self._input, shape=[1, -1, self._hidden_size])
self._input, shape=[1, -1, self._hidden_size]
)
) )
real_res = fluid.layers.concat(res, 0) real_res = fluid.layers.concat(res, 0)
real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2])
last_hidden = fluid.layers.concat(self.hidden_array, 1) last_hidden = fluid.layers.concat(self.hidden_array, 1)
last_hidden = fluid.layers.reshape( last_hidden = paddle.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size] last_hidden, shape=[-1, self._num_layers, self._hidden_size]
) )
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(self.cell_array, 1) last_cell = fluid.layers.concat(self.cell_array, 1)
last_cell = fluid.layers.reshape( last_cell = paddle.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size] last_cell, shape=[-1, self._num_layers, self._hidden_size]
) )
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
...@@ -198,16 +194,16 @@ class PtbModel(fluid.Layer): ...@@ -198,16 +194,16 @@ class PtbModel(fluid.Layer):
) )
def forward(self, input, label, init_hidden, init_cell): def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape( init_h = paddle.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size] init_hidden, shape=[self.num_layers, -1, self.hidden_size]
) )
init_c = fluid.layers.reshape( init_c = paddle.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size] init_cell, shape=[self.num_layers, -1, self.hidden_size]
) )
x_emb = self.embedding(input) x_emb = self.embedding(input)
x_emb = fluid.layers.reshape( x_emb = paddle.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size] x_emb, shape=[-1, self.num_steps, self.hidden_size]
) )
if self.dropout is not None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
...@@ -219,19 +215,17 @@ class PtbModel(fluid.Layer): ...@@ -219,19 +215,17 @@ class PtbModel(fluid.Layer):
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn( rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(
x_emb, init_h, init_c x_emb, init_h, init_c
) )
rnn_out = fluid.layers.reshape( rnn_out = paddle.reshape(
rnn_out, shape=[-1, self.num_steps, self.hidden_size] rnn_out, shape=[-1, self.num_steps, self.hidden_size]
) )
projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = fluid.layers.reshape( projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
projection, shape=[-1, self.vocab_size]
)
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
) )
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss) loss = fluid.layers.reduce_sum(loss)
......
...@@ -82,12 +82,10 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -82,12 +82,10 @@ class SimpleLSTMRNN(fluid.Layer):
pre_cell = fluid.layers.slice( pre_cell = fluid.layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1] init_cell, axes=[0], starts=[i], ends=[i + 1]
) )
pre_hidden = fluid.layers.reshape( pre_hidden = paddle.reshape(
pre_hidden, shape=[-1, self._hidden_size] pre_hidden, shape=[-1, self._hidden_size]
) )
pre_cell = fluid.layers.reshape( pre_cell = paddle.reshape(pre_cell, shape=[-1, self._hidden_size])
pre_cell, shape=[-1, self._hidden_size]
)
self.hidden_array.append(pre_hidden) self.hidden_array.append(pre_hidden)
self.cell_array.append(pre_cell) self.cell_array.append(pre_cell)
...@@ -96,7 +94,7 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -96,7 +94,7 @@ class SimpleLSTMRNN(fluid.Layer):
self._input = fluid.layers.slice( self._input = fluid.layers.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1] input_embedding, axes=[1], starts=[index], ends=[index + 1]
) )
self._input = fluid.layers.reshape( self._input = paddle.reshape(
self._input, shape=[-1, self._hidden_size] self._input, shape=[-1, self._hidden_size]
) )
for k in range(self._num_layers): for k in range(self._num_layers):
...@@ -127,19 +125,17 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -127,19 +125,17 @@ class SimpleLSTMRNN(fluid.Layer):
dropout_implementation='upscale_in_train', dropout_implementation='upscale_in_train',
) )
res.append( res.append(
fluid.layers.reshape( paddle.reshape(self._input, shape=[1, -1, self._hidden_size])
self._input, shape=[1, -1, self._hidden_size]
)
) )
real_res = fluid.layers.concat(res, 0) real_res = fluid.layers.concat(res, 0)
real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2])
last_hidden = fluid.layers.concat(self.hidden_array, 1) last_hidden = fluid.layers.concat(self.hidden_array, 1)
last_hidden = fluid.layers.reshape( last_hidden = paddle.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size] last_hidden, shape=[-1, self._num_layers, self._hidden_size]
) )
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(self.cell_array, 1) last_cell = fluid.layers.concat(self.cell_array, 1)
last_cell = fluid.layers.reshape( last_cell = paddle.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size] last_cell, shape=[-1, self._num_layers, self._hidden_size]
) )
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
...@@ -200,16 +196,16 @@ class PtbModel(fluid.Layer): ...@@ -200,16 +196,16 @@ class PtbModel(fluid.Layer):
) )
def forward(self, input, label, init_hidden, init_cell): def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape( init_h = paddle.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size] init_hidden, shape=[self.num_layers, -1, self.hidden_size]
) )
init_c = fluid.layers.reshape( init_c = paddle.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size] init_cell, shape=[self.num_layers, -1, self.hidden_size]
) )
x_emb = self.embedding(input) x_emb = self.embedding(input)
x_emb = fluid.layers.reshape( x_emb = paddle.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size] x_emb, shape=[-1, self.num_steps, self.hidden_size]
) )
if self.dropout is not None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
...@@ -221,19 +217,17 @@ class PtbModel(fluid.Layer): ...@@ -221,19 +217,17 @@ class PtbModel(fluid.Layer):
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn( rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(
x_emb, init_h, init_c x_emb, init_h, init_c
) )
rnn_out = fluid.layers.reshape( rnn_out = paddle.reshape(
rnn_out, shape=[-1, self.num_steps, self.hidden_size] rnn_out, shape=[-1, self.num_steps, self.hidden_size]
) )
projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = fluid.layers.reshape( projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
projection, shape=[-1, self.vocab_size]
)
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
) )
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss) loss = fluid.layers.reduce_sum(loss)
......
...@@ -123,7 +123,7 @@ class SqueezeExcitation(fluid.dygraph.Layer): ...@@ -123,7 +123,7 @@ class SqueezeExcitation(fluid.dygraph.Layer):
def forward(self, input): def forward(self, input):
y = self._pool(input) y = self._pool(input)
y = fluid.layers.reshape(y, shape=[-1, self._num_channels]) y = paddle.reshape(y, shape=[-1, self._num_channels])
y = self._squeeze(y) y = self._squeeze(y)
y = self._excitation(y) y = self._excitation(y)
y = fluid.layers.elementwise_mul(x=input, y=y, axis=0) y = fluid.layers.elementwise_mul(x=input, y=y, axis=0)
...@@ -318,7 +318,7 @@ class SeResNeXt(fluid.dygraph.Layer): ...@@ -318,7 +318,7 @@ class SeResNeXt(fluid.dygraph.Layer):
for bottleneck_block in self.bottleneck_block_list: for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y) y = bottleneck_block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_output])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -75,13 +75,11 @@ class SimpleNet(fluid.Layer): ...@@ -75,13 +75,11 @@ class SimpleNet(fluid.Layer):
projection = fluid.layers.matmul( projection = fluid.layers.matmul(
fc, fluid.layers.transpose(self.embedding.weight, perm=[1, 0]) fc, fluid.layers.transpose(self.embedding.weight, perm=[1, 0])
) )
projection = fluid.layers.reshape( projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
projection, shape=[-1, self.vocab_size]
)
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
) )
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss) loss = fluid.layers.reduce_sum(loss)
......
...@@ -309,9 +309,7 @@ class Generator(fluid.dygraph.Layer): ...@@ -309,9 +309,7 @@ class Generator(fluid.dygraph.Layer):
def forward(self, input, label_trg): def forward(self, input, label_trg):
shape = input.shape shape = input.shape
label_trg_e = fluid.layers.reshape( label_trg_e = paddle.reshape(label_trg, [-1, label_trg.shape[1], 1, 1])
label_trg, [-1, label_trg.shape[1], 1, 1]
)
label_trg_e = fluid.layers.expand( label_trg_e = fluid.layers.expand(
x=label_trg_e, expand_times=[1, 1, shape[2], shape[3]] x=label_trg_e, expand_times=[1, 1, shape[2], shape[3]]
) )
...@@ -380,9 +378,7 @@ class Discriminator(fluid.dygraph.Layer): ...@@ -380,9 +378,7 @@ class Discriminator(fluid.dygraph.Layer):
def loss_cls(cls, label, cfg): def loss_cls(cls, label, cfg):
cls_shape = cls.shape cls_shape = cls.shape
cls = fluid.layers.reshape( cls = paddle.reshape(cls, [-1, cls_shape[1] * cls_shape[2] * cls_shape[3]])
cls, [-1, cls_shape[1] * cls_shape[2] * cls_shape[3]]
)
return ( return (
fluid.layers.reduce_sum( fluid.layers.reduce_sum(
fluid.layers.sigmoid_cross_entropy_with_logits(cls, label) fluid.layers.sigmoid_cross_entropy_with_logits(cls, label)
...@@ -432,7 +428,7 @@ def gradient_penalty(f, real, fake, no_grad_set, cfg): ...@@ -432,7 +428,7 @@ def gradient_penalty(f, real, fake, no_grad_set, cfg):
gradient = gradient[0] gradient = gradient[0]
grad_shape = gradient.shape grad_shape = gradient.shape
gradient = fluid.layers.reshape( gradient = paddle.reshape(
gradient, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]] gradient, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]]
) )
......
...@@ -476,16 +476,16 @@ class MultiHeadAttentionLayer(Layer): ...@@ -476,16 +476,16 @@ class MultiHeadAttentionLayer(Layer):
v = self._v_fc(values) v = self._v_fc(values)
# split head # split head
reshaped_q = fluid.layers.reshape( reshaped_q = paddle.reshape(
x=q, shape=[0, 0, self._n_head, self._d_key], inplace=False x=q, shape=[0, 0, self._n_head, self._d_key]
) )
transpose_q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3]) transpose_q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
reshaped_k = fluid.layers.reshape( reshaped_k = paddle.reshape(
x=k, shape=[0, 0, self._n_head, self._d_key], inplace=False x=k, shape=[0, 0, self._n_head, self._d_key]
) )
transpose_k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3]) transpose_k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = fluid.layers.reshape( reshaped_v = paddle.reshape(
x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False x=v, shape=[0, 0, self._n_head, self._d_value]
) )
transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3]) transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3])
...@@ -514,10 +514,9 @@ class MultiHeadAttentionLayer(Layer): ...@@ -514,10 +514,9 @@ class MultiHeadAttentionLayer(Layer):
if len(out.shape) != 4: if len(out.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.") raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = fluid.layers.transpose(out, perm=[0, 2, 1, 3]) trans_x = fluid.layers.transpose(out, perm=[0, 2, 1, 3])
final_out = fluid.layers.reshape( final_out = paddle.reshape(
x=trans_x, x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]], shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=False,
) )
# fc to output # fc to output
...@@ -994,8 +993,8 @@ class WrapDecoderLayer(Layer): ...@@ -994,8 +993,8 @@ class WrapDecoderLayer(Layer):
dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias
) )
dec_output_reshape = fluid.layers.reshape( dec_output_reshape = paddle.reshape(
dec_output, shape=[-1, dec_output.shape[-1]], inplace=False dec_output, shape=[-1, dec_output.shape[-1]]
) )
if self._weight_sharing: if self._weight_sharing:
......
...@@ -43,8 +43,8 @@ def fc_with_inplace_net(use_feed): ...@@ -43,8 +43,8 @@ def fc_with_inplace_net(use_feed):
x, y = _feed_data_helper() x, y = _feed_data_helper()
fc = fluid.layers.fc(input=x, size=20, act='relu') fc = fluid.layers.fc(input=x, size=20, act='relu')
fc = fluid.layers.fc(input=fc, size=10, act='relu') fc = fluid.layers.fc(input=fc, size=10, act='relu')
reshape = fluid.layers.reshape(x=fc, shape=[-1, 2, 5]) reshape = paddle.reshape(x=fc, shape=[-1, 2, 5])
reshape = fluid.layers.reshape(x=reshape, shape=[-1, 5, 2]) reshape = paddle.reshape(x=reshape, shape=[-1, 5, 2])
y_predict = fluid.layers.fc(input=reshape, size=10, act='softmax') y_predict = fluid.layers.fc(input=reshape, size=10, act='softmax')
cost = fluid.layers.cross_entropy(input=y_predict, label=y) cost = fluid.layers.cross_entropy(input=y_predict, label=y)
avg_cost = paddle.mean(cost) avg_cost = paddle.mean(cost)
......
...@@ -148,7 +148,7 @@ class TestExpandDoubleGradCheck(unittest.TestCase): ...@@ -148,7 +148,7 @@ class TestExpandDoubleGradCheck(unittest.TestCase):
x = layers.data('x', x_shape, False, dtype) x = layers.data('x', x_shape, False, dtype)
x.persistable = True x.persistable = True
out = layers.reshape(x, new_shape) out = paddle.reshape(x, new_shape)
x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
gradient_checker.double_grad_check( gradient_checker.double_grad_check(
......
...@@ -301,11 +301,6 @@ class TestReshapeAPI(unittest.TestCase): ...@@ -301,11 +301,6 @@ class TestReshapeAPI(unittest.TestCase):
def _executed_api(self): def _executed_api(self):
self.reshape = paddle.reshape self.reshape = paddle.reshape
def _set_fluid_api(self):
self.fill_constant = fluid.layers.fill_constant
self.data = paddle.static.data
self.reshape = fluid.layers.reshape
def _test_api(self): def _test_api(self):
paddle.enable_static() paddle.enable_static()
input = np.random.random([2, 25]).astype("float32") input = np.random.random([2, 25]).astype("float32")
...@@ -317,18 +312,16 @@ class TestReshapeAPI(unittest.TestCase): ...@@ -317,18 +312,16 @@ class TestReshapeAPI(unittest.TestCase):
actual_shape = self.data(name="shape", shape=[3], dtype="int32") actual_shape = self.data(name="shape", shape=[3], dtype="int32")
# situation 1: have shape( list, no tensor), no actual shape(Tensor) # situation 1: have shape( list, no tensor)
out_1 = self.reshape(x, shape) out_1 = self.reshape(x, shape)
# situation 2: have shape(list, no tensor), have actual shape(Tensor) # situation 2: have shape(list, no tensor)
out_2 = fluid.layers.reshape( out_2 = paddle.reshape(x, actual_shape)
x, shape=shape, actual_shape=actual_shape
)
# Situation 3: have shape(list, have tensor), no actual shape(Tensor) # Situation 3: have shape(list, have tensor)
out_3 = self.reshape(x, shape=[positive_five, 10]) out_3 = self.reshape(x, shape=[positive_five, 10])
# Situation 4: have shape(Tensor), no actual shape(Tensor) # Situation 4: have shape(Tensor)
out_4 = self.reshape(x, shape=actual_shape) out_4 = self.reshape(x, shape=actual_shape)
exe = paddle.static.Executor(place=paddle.CPUPlace()) exe = paddle.static.Executor(place=paddle.CPUPlace())
...@@ -347,10 +340,6 @@ class TestReshapeAPI(unittest.TestCase): ...@@ -347,10 +340,6 @@ class TestReshapeAPI(unittest.TestCase):
self._set_paddle_api() self._set_paddle_api()
self._test_api() self._test_api()
def test_fluid_api(self):
self._set_fluid_api()
self._test_api()
def test_imperative(self): def test_imperative(self):
self._set_paddle_api() self._set_paddle_api()
input = np.random.random([2, 25]).astype("float32") input = np.random.random([2, 25]).astype("float32")
...@@ -401,10 +390,6 @@ class TestReshapeOpError(unittest.TestCase): ...@@ -401,10 +390,6 @@ class TestReshapeOpError(unittest.TestCase):
self.data = paddle.static.data self.data = paddle.static.data
self.reshape = paddle.reshape self.reshape = paddle.reshape
def _set_fluid_api(self):
self.data = fluid.data
self.reshape = fluid.layers.reshape
def _test_errors(self): def _test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
# The x type of reshape_op must be Variable. # The x type of reshape_op must be Variable.
...@@ -439,12 +424,6 @@ class TestReshapeOpError(unittest.TestCase): ...@@ -439,12 +424,6 @@ class TestReshapeOpError(unittest.TestCase):
self.assertRaises(TypeError, test_shape_type) self.assertRaises(TypeError, test_shape_type)
# The argument actual_shape's type of reshape_op must be Variable or None.
def test_actual_shape_type():
self.reshape(x3, shape=[25, 2], actual_shape=1)
self.assertRaises(TypeError, test_actual_shape_type)
# The argument shape have more than one -1. # The argument shape have more than one -1.
def test_shape_1(): def test_shape_1():
self.reshape(x3, shape=[-1, -1, 5]) self.reshape(x3, shape=[-1, -1, 5])
...@@ -467,10 +446,6 @@ class TestReshapeOpError(unittest.TestCase): ...@@ -467,10 +446,6 @@ class TestReshapeOpError(unittest.TestCase):
self._set_paddle_api() self._set_paddle_api()
self._test_errors() self._test_errors()
def test_fluid_api_error(self):
self._set_fluid_api()
self._test_errors()
class TestDygraphReshapeAPI(unittest.TestCase): class TestDygraphReshapeAPI(unittest.TestCase):
def setUp(self): def setUp(self):
......
...@@ -92,12 +92,10 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -92,12 +92,10 @@ class SimpleLSTMRNN(fluid.Layer):
pre_cell = fluid.layers.slice( pre_cell = fluid.layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1] init_cell, axes=[0], starts=[i], ends=[i + 1]
) )
pre_hidden = fluid.layers.reshape( pre_hidden = paddle.reshape(
pre_hidden, shape=[-1, self._hidden_size] pre_hidden, shape=[-1, self._hidden_size]
) )
pre_cell = fluid.layers.reshape( pre_cell = paddle.reshape(pre_cell, shape=[-1, self._hidden_size])
pre_cell, shape=[-1, self._hidden_size]
)
self.hidden_array.append(pre_hidden) self.hidden_array.append(pre_hidden)
self.cell_array.append(pre_cell) self.cell_array.append(pre_cell)
...@@ -106,7 +104,7 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -106,7 +104,7 @@ class SimpleLSTMRNN(fluid.Layer):
self._input = fluid.layers.slice( self._input = fluid.layers.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1] input_embedding, axes=[1], starts=[index], ends=[index + 1]
) )
self._input = fluid.layers.reshape( self._input = paddle.reshape(
self._input, shape=[-1, self._hidden_size] self._input, shape=[-1, self._hidden_size]
) )
for k in range(self._num_layers): for k in range(self._num_layers):
...@@ -137,19 +135,17 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -137,19 +135,17 @@ class SimpleLSTMRNN(fluid.Layer):
dropout_implementation='upscale_in_train', dropout_implementation='upscale_in_train',
) )
res.append( res.append(
fluid.layers.reshape( paddle.reshape(self._input, shape=[1, -1, self._hidden_size])
self._input, shape=[1, -1, self._hidden_size]
)
) )
real_res = fluid.layers.concat(res, 0) real_res = fluid.layers.concat(res, 0)
real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2])
last_hidden = fluid.layers.concat(self.hidden_array, 1) last_hidden = fluid.layers.concat(self.hidden_array, 1)
last_hidden = fluid.layers.reshape( last_hidden = paddle.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size] last_hidden, shape=[-1, self._num_layers, self._hidden_size]
) )
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(self.cell_array, 1) last_cell = fluid.layers.concat(self.cell_array, 1)
last_cell = fluid.layers.reshape( last_cell = paddle.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size] last_cell, shape=[-1, self._num_layers, self._hidden_size]
) )
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
...@@ -210,18 +206,18 @@ class PtbModel(fluid.Layer): ...@@ -210,18 +206,18 @@ class PtbModel(fluid.Layer):
) )
def forward(self, input, label, init_hidden, init_cell): def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape( init_h = paddle.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size] init_hidden, shape=[self.num_layers, -1, self.hidden_size]
) )
init_c = fluid.layers.reshape( init_c = paddle.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size] init_cell, shape=[self.num_layers, -1, self.hidden_size]
) )
# NPU 'tok_k' kernel only support `int32` dtype, so cast `input` from `int64` to `int32`. # NPU 'tok_k' kernel only support `int32` dtype, so cast `input` from `int64` to `int32`.
input = fluid.layers.cast(input, "int32") input = fluid.layers.cast(input, "int32")
x_emb = self.embedding(input) x_emb = self.embedding(input)
x_emb = fluid.layers.reshape( x_emb = paddle.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size] x_emb, shape=[-1, self.num_steps, self.hidden_size]
) )
if self.dropout is not None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
...@@ -234,18 +230,16 @@ class PtbModel(fluid.Layer): ...@@ -234,18 +230,16 @@ class PtbModel(fluid.Layer):
x_emb, init_h, init_c x_emb, init_h, init_c
) )
rnn_out = fluid.layers.reshape( rnn_out = paddle.reshape(
rnn_out, shape=[-1, self.num_steps, self.hidden_size] rnn_out, shape=[-1, self.num_steps, self.hidden_size]
) )
projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = fluid.layers.reshape( projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
projection, shape=[-1, self.vocab_size]
)
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
) )
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss) loss = fluid.layers.reduce_sum(loss)
......
...@@ -727,7 +727,7 @@ class TestVarBase(unittest.TestCase): ...@@ -727,7 +727,7 @@ class TestVarBase(unittest.TestCase):
var3 = var[0:1] var3 = var[0:1]
var4 = var[::-1] var4 = var[::-1]
var5 = var[1, 1:, 1:] var5 = var[1, 1:, 1:]
var_reshape = fluid.layers.reshape(var, [3, -1, 3]) var_reshape = paddle.reshape(var, [3, -1, 3])
var6 = var_reshape[:, :, -1] var6 = var_reshape[:, :, -1]
var7 = var[:, :, :-1] var7 = var[:, :, :-1]
var8 = var[:1, :1, :1] var8 = var[:1, :1, :1]
...@@ -820,7 +820,7 @@ class TestVarBase(unittest.TestCase): ...@@ -820,7 +820,7 @@ class TestVarBase(unittest.TestCase):
var3 = var[0:one] var3 = var[0:one]
var4 = var[::negative_one] var4 = var[::negative_one]
var5 = var[one, one:, one:] var5 = var[one, one:, one:]
var_reshape = fluid.layers.reshape(var, [3, negative_one, 3]) var_reshape = paddle.reshape(var, [3, negative_one, 3])
var6 = var_reshape[:, :, negative_one] var6 = var_reshape[:, :, negative_one]
var7 = var[:, :, :negative_one] var7 = var[:, :, :negative_one]
var8 = var[:one, :one, :1] var8 = var[:one, :one, :1]
......
...@@ -156,7 +156,7 @@ class TestVariable(unittest.TestCase): ...@@ -156,7 +156,7 @@ class TestVariable(unittest.TestCase):
var3 = var[0:1] var3 = var[0:1]
var4 = var[::-1] var4 = var[::-1]
var5 = var[1, 1:, 1:] var5 = var[1, 1:, 1:]
var_reshape = fluid.layers.reshape(var, [3, -1, 3]) var_reshape = paddle.reshape(var, [3, -1, 3])
var6 = var_reshape[:, :, -1] var6 = var_reshape[:, :, -1]
var7 = var[:, :, :-1] var7 = var[:, :, :-1]
var8 = var[:1, :1, :1] var8 = var[:1, :1, :1]
......
...@@ -92,7 +92,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -92,7 +92,7 @@ class TestApiWhileLoop(unittest.TestCase):
test_dict["test_key"] = i test_dict["test_key"] = i
test_dict["test_key"] += 1 test_dict["test_key"] += 1
test_list[0] = fluid.layers.reshape(test_list[0], [2, -1]) + 1 test_list[0] = paddle.reshape(test_list[0], [2, -1]) + 1
test_list_dict[0]["test_key"] += 1 test_list_dict[0]["test_key"] += 1
test_list_dict[0]["test_key"] = fluid.layers.relu( test_list_dict[0]["test_key"] = fluid.layers.relu(
......
...@@ -115,7 +115,7 @@ def multi_head_attention( ...@@ -115,7 +115,7 @@ def multi_head_attention(
hidden_size = x.shape[-1] hidden_size = x.shape[-1]
# FIXME(guosheng): Decouple the program desc with batch_size. # FIXME(guosheng): Decouple the program desc with batch_size.
reshaped = layers.reshape( reshaped = paddle.reshape(
x=x, shape=[batch_size, -1, n_head, hidden_size // n_head] x=x, shape=[batch_size, -1, n_head, hidden_size // n_head]
) )
...@@ -135,7 +135,7 @@ def multi_head_attention( ...@@ -135,7 +135,7 @@ def multi_head_attention(
trans_x = layers.transpose(x, perm=[0, 2, 1, 3]) trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# FIXME(guosheng): Decouple the program desc with batch_size. # FIXME(guosheng): Decouple the program desc with batch_size.
return layers.reshape( return paddle.reshape(
x=trans_x, x=trans_x,
shape=list( shape=list(
map(int, [batch_size, -1, trans_x.shape[2] * trans_x.shape[3]]) map(int, [batch_size, -1, trans_x.shape[2] * trans_x.shape[3]])
...@@ -281,7 +281,7 @@ def prepare_encoder( ...@@ -281,7 +281,7 @@ def prepare_encoder(
enc_input = src_word_emb + src_pos_enc enc_input = src_word_emb + src_pos_enc
# FIXME(guosheng): Decouple the program desc with batch_size. # FIXME(guosheng): Decouple the program desc with batch_size.
enc_input = layers.reshape(x=enc_input, shape=[batch_size, -1, src_emb_dim]) enc_input = paddle.reshape(x=enc_input, shape=[batch_size, -1, src_emb_dim])
return ( return (
layers.dropout(enc_input, dropout_prob=dropout, is_test=False) layers.dropout(enc_input, dropout_prob=dropout, is_test=False)
if dropout if dropout
...@@ -581,7 +581,7 @@ def transformer( ...@@ -581,7 +581,7 @@ def transformer(
# TODO(guosheng): Share the weight matrix between the embedding layers and # TODO(guosheng): Share the weight matrix between the embedding layers and
# the pre-softmax linear transformation. # the pre-softmax linear transformation.
predict = layers.reshape( predict = paddle.reshape(
x=layers.fc( x=layers.fc(
input=dec_output, input=dec_output,
size=trg_vocab_size, size=trg_vocab_size,
...@@ -590,8 +590,8 @@ def transformer( ...@@ -590,8 +590,8 @@ def transformer(
num_flatten_dims=2, num_flatten_dims=2,
), ),
shape=[-1, trg_vocab_size], shape=[-1, trg_vocab_size],
act="softmax",
) )
predict = paddle.nn.functional.softmax(predict)
cost = layers.cross_entropy(input=predict, label=gold) cost = layers.cross_entropy(input=predict, label=gold)
weighted_cost = cost * weights weighted_cost = cost * weights
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册