未验证 提交 0707c0af 编写于 作者: 2 201716010711 提交者: GitHub

delete slice api (#48399)

上级 57e097ac
......@@ -36,7 +36,6 @@ from paddle.fluid import core
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.framework import Variable, convert_np_dtype_to_dtype_
from paddle.fluid.layers import slice
import paddle
import warnings
from paddle import _C_ops, _legacy_C_ops
......@@ -1540,13 +1539,13 @@ def tdm_sampler(
for layer_sample_num in neg_samples_num_list:
end_offset = start_offset + layer_sample_num + positive_flag
layer_samples = slice(
layer_samples = paddle.slice(
out, axes=[1], starts=[start_offset], ends=[end_offset]
)
layer_labels = slice(
layer_labels = paddle.slice(
labels, axes=[1], starts=[start_offset], ends=[end_offset]
)
layer_mask = slice(
layer_mask = paddle.slice(
mask, axes=[1], starts=[start_offset], ends=[end_offset]
)
......
......@@ -29,7 +29,6 @@ from paddle.fluid.layers import (
from paddle.fluid.layers import (
assign,
fill_constant,
slice,
reduce_all,
reduce_any,
)
......@@ -819,7 +818,7 @@ def _slice_tensor_array(array, start, end):
return null_array
def false_fn(array, start, end):
new_array = slice(array, starts=[start], ends=[end], axes=[0])
new_array = paddle.slice(array, starts=[start], ends=[end], axes=[0])
return new_array
new_array = cond(start == end, true_fn, lambda: false_fn(array, start, end))
......
......@@ -1768,7 +1768,7 @@ def ssd_loss(
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape = paddle.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
......
......@@ -110,7 +110,6 @@ __all__ = [
'gaussian_random',
'sampling_id',
'sum',
'slice',
'shape',
'clip',
'clip_by_norm',
......@@ -6007,261 +6006,6 @@ def sum(x):
return paddle.add_n(x)
@templatedoc()
def slice(input, axes, starts, ends):
"""
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` (here 0 is the initial position).
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``.
Following examples will explain how slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0.
Then:
result = [ [2, 3, 4], ] # result = data[0:1, 1:4]
Args:
input (Tensor): A ``Tensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to .
starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Tensor, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Tensor, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
Returns:
Tensor: A ``Tensor``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Tensor.
TypeError: The type of ``ends`` must be list, tuple or Tensor.
Examples:
.. code-block:: python
import paddle
input = paddle.rand(shape=[4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
sliced_1 = paddle.slice(input, axes=axes, starts=starts, ends=ends)
# sliced_1 is input[0:3, 0:2, 2:4].
# example 2:
# attr starts is a list which contain tensor.
minus_3 = paddle.full([1], -3, "int32")
sliced_2 = paddle.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
# sliced_2 is input[0:3, 0:2, 2:4].
"""
if in_dygraph_mode():
attrs = ()
starts_tensor = None
ends_tensor = None
if isinstance(axes, (list, tuple)):
axes = list(axes)
if len(axes) == 0:
raise ValueError(
"Input axes should not be an empty list/tuple."
)
for i in range(len(axes)):
if axes[i] < 0:
axes[i] = max(0, axes[i] + len(input.shape))
else:
axes[i] = min(len(input.shape) - 1, axes[i])
else:
raise ValueError(
"Input axes must be a python list or tuple, but reveived {}".format(
type(axes)
)
)
infer_flags = list(1 for i in range(len(axes)))
tmp_tensor_type = core.eager.Tensor
if isinstance(starts, (list, tuple)):
starts = [
item.numpy().item(0)
if isinstance(item, tmp_tensor_type)
else item
for item in starts
]
elif isinstance(starts, tmp_tensor_type):
tensor_t = starts.numpy()
starts = [ele for ele in tensor_t]
if isinstance(ends, (list, tuple)):
ends = [
item.numpy().item(0)
if isinstance(item, tmp_tensor_type)
else item
for item in ends
]
attrs += ('ends', ends)
elif isinstance(ends, tmp_tensor_type):
tensor_t = ends.numpy()
ends = [ele for ele in tensor_t]
return _C_ops.slice(input, axes, starts, ends, infer_flags, [])
else:
if _in_legacy_dygraph():
attrs = ()
starts_tensor = None
ends_tensor = None
if isinstance(axes, (list, tuple)):
axes = list(axes)
if len(axes) == 0:
raise ValueError(
"Input axes should not be an empty list/tuple."
)
for i in range(len(axes)):
if axes[i] < 0:
axes[i] = max(0, axes[i] + len(input.shape))
else:
axes[i] = min(len(input.shape) - 1, axes[i])
else:
raise ValueError(
"Input axes must be a python list or tuple, but reveived {}".format(
type(axes)
)
)
infer_flags = list(1 for i in range(len(axes)))
tmp_tensor_type = Variable
if isinstance(starts, (list, tuple)):
starts = [
item.numpy().item(0)
if isinstance(item, tmp_tensor_type)
else item
for item in starts
]
attrs += ('starts', starts)
elif isinstance(starts, tmp_tensor_type):
starts_tensor = starts
starts.stop_gradient = True
infer_flags = list(-1 for i in range(len(axes)))
if isinstance(ends, (list, tuple)):
ends = [
item.numpy().item(0)
if isinstance(item, tmp_tensor_type)
else item
for item in ends
]
attrs += ('ends', ends)
elif isinstance(ends, tmp_tensor_type):
ends_tensor = ends
ends_tensor.stop_gradient = True
infer_flags = list(-1 for i in range(len(axes)))
return _legacy_C_ops.slice(
input,
starts_tensor,
ends_tensor,
None,
None,
'axes',
axes,
'infer_flags',
infer_flags,
*attrs,
)
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple."
)
if not isinstance(ends, (list, tuple, Variable)):
raise ValueError(
"Input ends must be an Variable, python list or tuple."
)
helper = LayerHelper('slice', **locals())
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if utils._contain_var(starts):
inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if utils._contain_var(ends):
inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# infer_flags
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input')
)
helper.append_op(
type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out}
)
return out
def shape(input):
"""
:alias_main: paddle.shape
......
......@@ -2089,7 +2089,7 @@ class TrainingHelper(DecodeHelper):
def _slice(x): # TODO: use Variable.__getitem__
axes = [0 if self.time_major else 1]
return paddle.squeeze(
nn.slice(
paddle.slice(
x, axes=axes, starts=[next_time], ends=[next_time + 1]
),
axis=axes,
......
......@@ -291,7 +291,7 @@ class BertModelLayer(Layer):
#
# if not self.return_pooled_out:
# return enc_output
next_sent_feat = fluid.layers.slice(
next_sent_feat = paddle.slice(
input=enc_output, axes=[1], starts=[0], ends=[1]
)
next_sent_feat = self.pooled_fc(next_sent_feat)
......
......@@ -412,10 +412,10 @@ def bmn_loss_func(
return loss
pred_bm_reg = paddle.squeeze(
fluid.layers.slice(pred_bm, axes=[1], starts=[0], ends=[1]), axis=[1]
paddle.slice(pred_bm, axes=[1], starts=[0], ends=[1]), axis=[1]
)
pred_bm_cls = paddle.squeeze(
fluid.layers.slice(pred_bm, axes=[1], starts=[1], ends=[2]), axis=[1]
paddle.slice(pred_bm, axes=[1], starts=[1], ends=[2]), axis=[1]
)
bm_mask = _get_mask(cfg)
......
......@@ -85,9 +85,7 @@ class DynamicGRU(fluid.dygraph.Layer):
j = i
# input_ = inputs[:, j:j+1, :] # original code
input_ = fluid.layers.slice(
inputs, axes=[1], starts=[j], ends=[j + 1]
)
input_ = paddle.slice(inputs, axes=[1], starts=[j], ends=[j + 1])
input_ = paddle.reshape(input_, [-1, input_.shape[2]])
hidden, reset, gate = self.gru_unit(input_, hidden)
hidden_ = paddle.reshape(hidden, [-1, 1, hidden.shape[1]])
......
......@@ -206,9 +206,7 @@ class Upsample(fluid.dygraph.Layer):
def forward(self, inputs):
# get dynamic upsample output shape
shape_nchw = fluid.layers.shape(inputs)
shape_hw = fluid.layers.slice(
shape_nchw, axes=[0], starts=[2], ends=[4]
)
shape_hw = paddle.slice(shape_nchw, axes=[0], starts=[2], ends=[4])
shape_hw.stop_gradient = True
in_shape = fluid.layers.cast(shape_hw, dtype='int32')
out_shape = in_shape * self.scale
......
......@@ -51,7 +51,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
out = paddle.fluid.layers.slice(x, **self.attrs)
out = paddle.slice(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
......@@ -105,9 +105,7 @@ class TestCase2(TestBase):
ends = paddle.static.data(
name=self.feed_list[2], shape=self.feed_shape[2], dtype='int32'
)
out = paddle.fluid.layers.slice(
x, starts=starts, ends=ends, **self.attrs
)
out = paddle.slice(x, starts=starts, ends=ends, **self.attrs)
self.fetch_list = [out.name]
......
......@@ -17,6 +17,7 @@ import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
......@@ -49,9 +50,7 @@ class SlicePluginTRTDynamicTest(InferencePassTest):
axes = self.params_axes
starts = self.params_starts
ends = self.params_ends
slice_out = fluid.layers.slice(
data, axes=axes, starts=starts, ends=ends
)
slice_out = paddle.slice(data, axes=axes, starts=starts, ends=ends)
self.feeds = {
"data": np.random.random((3, 3, 3, 3)).astype("float32"),
......
......@@ -17,6 +17,7 @@ import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
......@@ -43,9 +44,7 @@ class SlicePluginTRTTest(InferencePassTest):
axes = self.params_axes
starts = self.params_starts
ends = self.params_ends
slice_out = fluid.layers.slice(
data, axes=axes, starts=starts, ends=ends
)
slice_out = paddle.slice(data, axes=axes, starts=starts, ends=ends)
out = fluid.layers.batch_norm(slice_out, is_test=True)
self.feeds = {
......@@ -114,9 +113,7 @@ class SlicePluginTRTTestInt32(SlicePluginTRTTest):
axes = self.params_axes
starts = self.params_starts
ends = self.params_ends
slice_out = fluid.layers.slice(
data, axes=axes, starts=starts, ends=ends
)
slice_out = paddle.slice(data, axes=axes, starts=starts, ends=ends)
cast_out = fluid.layers.cast(slice_out, 'float32')
out = fluid.layers.batch_norm(cast_out, is_test=True)
......@@ -141,9 +138,7 @@ class StaticSlicePluginTRTTestInt32(SlicePluginTRTTest):
axes = self.params_axes
starts = self.params_starts
ends = self.params_ends
slice_out = fluid.layers.slice(
data, axes=axes, starts=starts, ends=ends
)
slice_out = paddle.slice(data, axes=axes, starts=starts, ends=ends)
cast_out = fluid.layers.cast(slice_out, 'float32')
out = fluid.layers.batch_norm(cast_out, is_test=True)
......
......@@ -143,10 +143,10 @@ def lm_model(
)
bias_arr.append(bias_1)
pre_hidden = layers.slice(
pre_hidden = paddle.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1]
)
pre_cell = layers.slice(
pre_cell = paddle.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1]
)
pre_hidden = paddle.reshape(pre_hidden, shape=[-1, hidden_size])
......@@ -169,22 +169,22 @@ def lm_model(
gate_input = layers.matmul(x=nn, y=weight_1)
gate_input = layers.elementwise_add(gate_input, bias)
i = layers.slice(
i = paddle.slice(
gate_input, axes=[1], starts=[0], ends=[hidden_size]
)
j = layers.slice(
j = paddle.slice(
gate_input,
axes=[1],
starts=[hidden_size],
ends=[hidden_size * 2],
)
f = layers.slice(
f = paddle.slice(
gate_input,
axes=[1],
starts=[hidden_size * 2],
ends=[hidden_size * 3],
)
o = layers.slice(
o = paddle.slice(
gate_input,
axes=[1],
starts=[hidden_size * 3],
......@@ -222,11 +222,11 @@ def lm_model(
c = rnnout[i * 2 + 1]
m.stop_gradient = True
c.stop_gradient = True
last_h = layers.slice(
last_h = paddle.slice(
m, axes=[0], starts=[num_steps - 1], ends=[num_steps]
)
last_hidden_array.append(last_h)
last_c = layers.slice(
last_c = paddle.slice(
c, axes=[0], starts=[num_steps - 1], ends=[num_steps]
)
last_cell_array.append(last_c)
......@@ -264,10 +264,10 @@ def lm_model(
)
bias_arr.append(bias_1)
pre_hidden = layers.slice(
pre_hidden = paddle.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1]
)
pre_cell = layers.slice(
pre_cell = paddle.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1]
)
pre_hidden = paddle.reshape(pre_hidden, shape=[-1, hidden_size])
......
......@@ -137,9 +137,7 @@ class SimpleRNN(fluid.Layer):
)
pre_hidden = init_hidden
for i in range(self.seq_len):
input = fluid.layers.slice(
inputs, axes=[1], starts=[i], ends=[i + 1]
)
input = paddle.slice(inputs, axes=[1], starts=[i], ends=[i + 1])
input = paddle.reshape(input, shape=[1, 3])
out_softmax, pre_hidden = self._cell(input, pre_hidden)
outs.append(out_softmax)
......
......@@ -192,9 +192,7 @@ class DynamicGRU(fluid.dygraph.Layer):
for i in range(inputs.shape[1]):
if self.is_reverse:
i = inputs.shape[1] - 1 - i
input_ = fluid.layers.slice(
inputs, axes=[1], starts=[i], ends=[i + 1]
)
input_ = paddle.slice(inputs, axes=[1], starts=[i], ends=[i + 1])
input_ = paddle.reshape(input_, [-1, input_.shape[2]])
hidden, reset, gate = self.gru_unit(input_, hidden)
hidden_ = paddle.reshape(hidden, [-1, 1, hidden.shape[1]])
......@@ -356,7 +354,7 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer):
res = []
hidden_mem = decoder_boot
for i in range(target_embedding.shape[1]):
current_word = fluid.layers.slice(
current_word = paddle.slice(
target_embedding, axes=[1], starts=[i], ends=[i + 1]
)
current_word = paddle.reshape(
......@@ -399,7 +397,7 @@ class OCRAttention(fluid.dygraph.Layer):
def forward(self, inputs, label_in):
gru_backward, encoded_vector, encoded_proj = self.encoder_net(inputs)
backward_first = fluid.layers.slice(
backward_first = paddle.slice(
gru_backward, axes=[1], starts=[0], ends=[1]
)
backward_first = paddle.reshape(
......
......@@ -79,10 +79,10 @@ class SimpleLSTMRNN(fluid.Layer):
self.hidden_array = []
for i in range(self._num_layers):
pre_hidden = fluid.layers.slice(
pre_hidden = paddle.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1]
)
pre_cell = fluid.layers.slice(
pre_cell = paddle.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1]
)
pre_hidden = paddle.reshape(
......@@ -94,7 +94,7 @@ class SimpleLSTMRNN(fluid.Layer):
res = []
for index in range(self._num_steps):
self._input = fluid.layers.slice(
self._input = paddle.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1]
)
self._input = paddle.reshape(
......
......@@ -74,10 +74,10 @@ class SimpleLSTMRNN(fluid.Layer):
self.hidden_array = []
for i in range(self._num_layers):
pre_hidden = fluid.layers.slice(
pre_hidden = paddle.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1]
)
pre_cell = fluid.layers.slice(
pre_cell = paddle.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1]
)
pre_hidden = paddle.reshape(
......@@ -89,7 +89,7 @@ class SimpleLSTMRNN(fluid.Layer):
res = []
for index in range(self._num_steps):
self._input = fluid.layers.slice(
self._input = paddle.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1]
)
self._input = paddle.reshape(
......
......@@ -77,10 +77,10 @@ class SimpleLSTMRNN(fluid.Layer):
self.hidden_array = []
for i in range(self._num_layers):
pre_hidden = fluid.layers.slice(
pre_hidden = paddle.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1]
)
pre_cell = fluid.layers.slice(
pre_cell = paddle.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1]
)
pre_hidden = paddle.reshape(
......@@ -92,7 +92,7 @@ class SimpleLSTMRNN(fluid.Layer):
res = []
for index in range(self._num_steps):
self._input = fluid.layers.slice(
self._input = paddle.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1]
)
self._input = paddle.reshape(
......
......@@ -3584,7 +3584,7 @@ class TestBook(LayerTest):
name="input", shape=[3, 4, 5, 6], dtype='float32'
)
out = layers.slice(input, axes=axes, starts=starts, ends=ends)
out = paddle.slice(input, axes=axes, starts=starts, ends=ends)
return out
def make_scale_variable(self):
......
......@@ -30,7 +30,7 @@ class TestSliceOpDoubleGradCheck(unittest.TestCase):
def func(self, place):
self.config()
out = fluid.layers.slice(
out = paddle.slice(
self.inputs, axes=self.axes, starts=self.starts, ends=self.ends
)
gradient_checker.double_grad_check(
......
......@@ -85,10 +85,10 @@ class SimpleLSTMRNN(fluid.Layer):
self.hidden_array = []
for i in range(self._num_layers):
pre_hidden = fluid.layers.slice(
pre_hidden = paddle.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1]
)
pre_cell = fluid.layers.slice(
pre_cell = paddle.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1]
)
pre_hidden = paddle.reshape(
......@@ -100,7 +100,7 @@ class SimpleLSTMRNN(fluid.Layer):
res = []
for index in range(self._num_steps):
self._input = fluid.layers.slice(
self._input = paddle.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1]
)
self._input = paddle.reshape(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册