Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
0707c0af
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0707c0af
编写于
11月 29, 2022
作者:
2
201716010711
提交者:
GitHub
11月 29, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
delete slice api (#48399)
上级
57e097ac
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
47 addition
and
321 deletion
+47
-321
python/paddle/fluid/contrib/layers/nn.py
python/paddle/fluid/contrib/layers/nn.py
+3
-4
python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py
...ddle/fluid/dygraph/dygraph_to_static/convert_operators.py
+1
-2
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+1
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+0
-256
python/paddle/fluid/layers/rnn.py
python/paddle/fluid/layers/rnn.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/bert_dygraph_model.py
...d/tests/unittests/dygraph_to_static/bert_dygraph_model.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
...addle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
+2
-2
python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py
...addle/fluid/tests/unittests/dygraph_to_static/test_lac.py
+1
-3
python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py
.../paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py
+1
-3
python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py
python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py
+2
-4
python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py
...s/unittests/ir/inference/test_trt_slice_dynamic_plugin.py
+2
-3
python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py
...uid/tests/unittests/ir/inference/test_trt_slice_plugin.py
+4
-9
python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py
.../fluid/tests/unittests/test_eager_deletion_padding_rnn.py
+10
-10
python/paddle/fluid/tests/unittests/test_imperative_basic.py
python/paddle/fluid/tests/unittests/test_imperative_basic.py
+1
-3
python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py
...id/tests/unittests/test_imperative_ocr_attention_model.py
+3
-5
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
...n/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
+3
-3
python/paddle/fluid/tests/unittests/test_imperative_save_load.py
...paddle/fluid/tests/unittests/test_imperative_save_load.py
+3
-3
python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py
...dle/fluid/tests/unittests/test_imperative_save_load_v2.py
+3
-3
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+1
-1
python/paddle/fluid/tests/unittests/test_nn_grad.py
python/paddle/fluid/tests/unittests/test_nn_grad.py
+1
-1
python/paddle/fluid/tests/unittests/test_static_save_load.py
python/paddle/fluid/tests/unittests/test_static_save_load.py
+3
-3
未找到文件。
python/paddle/fluid/contrib/layers/nn.py
浏览文件 @
0707c0af
...
@@ -36,7 +36,6 @@ from paddle.fluid import core
...
@@ -36,7 +36,6 @@ from paddle.fluid import core
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.framework
import
Variable
,
convert_np_dtype_to_dtype_
from
paddle.fluid.framework
import
Variable
,
convert_np_dtype_to_dtype_
from
paddle.fluid.layers
import
slice
import
paddle
import
paddle
import
warnings
import
warnings
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
...
@@ -1540,13 +1539,13 @@ def tdm_sampler(
...
@@ -1540,13 +1539,13 @@ def tdm_sampler(
for
layer_sample_num
in
neg_samples_num_list
:
for
layer_sample_num
in
neg_samples_num_list
:
end_offset
=
start_offset
+
layer_sample_num
+
positive_flag
end_offset
=
start_offset
+
layer_sample_num
+
positive_flag
layer_samples
=
slice
(
layer_samples
=
paddle
.
slice
(
out
,
axes
=
[
1
],
starts
=
[
start_offset
],
ends
=
[
end_offset
]
out
,
axes
=
[
1
],
starts
=
[
start_offset
],
ends
=
[
end_offset
]
)
)
layer_labels
=
slice
(
layer_labels
=
paddle
.
slice
(
labels
,
axes
=
[
1
],
starts
=
[
start_offset
],
ends
=
[
end_offset
]
labels
,
axes
=
[
1
],
starts
=
[
start_offset
],
ends
=
[
end_offset
]
)
)
layer_mask
=
slice
(
layer_mask
=
paddle
.
slice
(
mask
,
axes
=
[
1
],
starts
=
[
start_offset
],
ends
=
[
end_offset
]
mask
,
axes
=
[
1
],
starts
=
[
start_offset
],
ends
=
[
end_offset
]
)
)
...
...
python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py
浏览文件 @
0707c0af
...
@@ -29,7 +29,6 @@ from paddle.fluid.layers import (
...
@@ -29,7 +29,6 @@ from paddle.fluid.layers import (
from
paddle.fluid.layers
import
(
from
paddle.fluid.layers
import
(
assign
,
assign
,
fill_constant
,
fill_constant
,
slice
,
reduce_all
,
reduce_all
,
reduce_any
,
reduce_any
,
)
)
...
@@ -819,7 +818,7 @@ def _slice_tensor_array(array, start, end):
...
@@ -819,7 +818,7 @@ def _slice_tensor_array(array, start, end):
return
null_array
return
null_array
def
false_fn
(
array
,
start
,
end
):
def
false_fn
(
array
,
start
,
end
):
new_array
=
slice
(
array
,
starts
=
[
start
],
ends
=
[
end
],
axes
=
[
0
])
new_array
=
paddle
.
slice
(
array
,
starts
=
[
start
],
ends
=
[
end
],
axes
=
[
0
])
return
new_array
return
new_array
new_array
=
cond
(
start
==
end
,
true_fn
,
lambda
:
false_fn
(
array
,
start
,
end
))
new_array
=
cond
(
start
==
end
,
true_fn
,
lambda
:
false_fn
(
array
,
start
,
end
))
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
0707c0af
...
@@ -1768,7 +1768,7 @@ def ssd_loss(
...
@@ -1768,7 +1768,7 @@ def ssd_loss(
target_label
.
stop_gradient
=
True
target_label
.
stop_gradient
=
True
conf_loss
=
softmax_with_cross_entropy
(
confidence
,
target_label
)
conf_loss
=
softmax_with_cross_entropy
(
confidence
,
target_label
)
# 3. Mining hard examples
# 3. Mining hard examples
actual_shape
=
nn
.
slice
(
conf_shape
,
axes
=
[
0
],
starts
=
[
0
],
ends
=
[
2
])
actual_shape
=
paddle
.
slice
(
conf_shape
,
axes
=
[
0
],
starts
=
[
0
],
ends
=
[
2
])
actual_shape
.
stop_gradient
=
True
actual_shape
.
stop_gradient
=
True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
# actual_shape in runtime.
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
0707c0af
...
@@ -110,7 +110,6 @@ __all__ = [
...
@@ -110,7 +110,6 @@ __all__ = [
'gaussian_random'
,
'gaussian_random'
,
'sampling_id'
,
'sampling_id'
,
'sum'
,
'sum'
,
'slice'
,
'shape'
,
'shape'
,
'clip'
,
'clip'
,
'clip_by_norm'
,
'clip_by_norm'
,
...
@@ -6007,261 +6006,6 @@ def sum(x):
...
@@ -6007,261 +6006,6 @@ def sum(x):
return
paddle
.
add_n
(
x
)
return
paddle
.
add_n
(
x
)
@
templatedoc
()
def
slice
(
input
,
axes
,
starts
,
ends
):
"""
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` (here 0 is the initial position).
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``.
Following examples will explain how slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0.
Then:
result = [ [2, 3, 4], ] # result = data[0:1, 1:4]
Args:
input (Tensor): A ``Tensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to .
starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Tensor, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Tensor, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
Returns:
Tensor: A ``Tensor``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Tensor.
TypeError: The type of ``ends`` must be list, tuple or Tensor.
Examples:
.. code-block:: python
import paddle
input = paddle.rand(shape=[4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
sliced_1 = paddle.slice(input, axes=axes, starts=starts, ends=ends)
# sliced_1 is input[0:3, 0:2, 2:4].
# example 2:
# attr starts is a list which contain tensor.
minus_3 = paddle.full([1], -3, "int32")
sliced_2 = paddle.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
# sliced_2 is input[0:3, 0:2, 2:4].
"""
if
in_dygraph_mode
():
attrs
=
()
starts_tensor
=
None
ends_tensor
=
None
if
isinstance
(
axes
,
(
list
,
tuple
)):
axes
=
list
(
axes
)
if
len
(
axes
)
==
0
:
raise
ValueError
(
"Input axes should not be an empty list/tuple."
)
for
i
in
range
(
len
(
axes
)):
if
axes
[
i
]
<
0
:
axes
[
i
]
=
max
(
0
,
axes
[
i
]
+
len
(
input
.
shape
))
else
:
axes
[
i
]
=
min
(
len
(
input
.
shape
)
-
1
,
axes
[
i
])
else
:
raise
ValueError
(
"Input axes must be a python list or tuple, but reveived {}"
.
format
(
type
(
axes
)
)
)
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
tmp_tensor_type
=
core
.
eager
.
Tensor
if
isinstance
(
starts
,
(
list
,
tuple
)):
starts
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
starts
]
elif
isinstance
(
starts
,
tmp_tensor_type
):
tensor_t
=
starts
.
numpy
()
starts
=
[
ele
for
ele
in
tensor_t
]
if
isinstance
(
ends
,
(
list
,
tuple
)):
ends
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
ends
]
attrs
+=
(
'ends'
,
ends
)
elif
isinstance
(
ends
,
tmp_tensor_type
):
tensor_t
=
ends
.
numpy
()
ends
=
[
ele
for
ele
in
tensor_t
]
return
_C_ops
.
slice
(
input
,
axes
,
starts
,
ends
,
infer_flags
,
[])
else
:
if
_in_legacy_dygraph
():
attrs
=
()
starts_tensor
=
None
ends_tensor
=
None
if
isinstance
(
axes
,
(
list
,
tuple
)):
axes
=
list
(
axes
)
if
len
(
axes
)
==
0
:
raise
ValueError
(
"Input axes should not be an empty list/tuple."
)
for
i
in
range
(
len
(
axes
)):
if
axes
[
i
]
<
0
:
axes
[
i
]
=
max
(
0
,
axes
[
i
]
+
len
(
input
.
shape
))
else
:
axes
[
i
]
=
min
(
len
(
input
.
shape
)
-
1
,
axes
[
i
])
else
:
raise
ValueError
(
"Input axes must be a python list or tuple, but reveived {}"
.
format
(
type
(
axes
)
)
)
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
tmp_tensor_type
=
Variable
if
isinstance
(
starts
,
(
list
,
tuple
)):
starts
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
starts
]
attrs
+=
(
'starts'
,
starts
)
elif
isinstance
(
starts
,
tmp_tensor_type
):
starts_tensor
=
starts
starts
.
stop_gradient
=
True
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
if
isinstance
(
ends
,
(
list
,
tuple
)):
ends
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
ends
]
attrs
+=
(
'ends'
,
ends
)
elif
isinstance
(
ends
,
tmp_tensor_type
):
ends_tensor
=
ends
ends_tensor
.
stop_gradient
=
True
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
return
_legacy_C_ops
.
slice
(
input
,
starts_tensor
,
ends_tensor
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs
,
)
if
not
isinstance
(
starts
,
(
list
,
tuple
,
Variable
)):
raise
ValueError
(
"Input starts must be an Variable, python list or tuple."
)
if
not
isinstance
(
ends
,
(
list
,
tuple
,
Variable
)):
raise
ValueError
(
"Input ends must be an Variable, python list or tuple."
)
helper
=
LayerHelper
(
'slice'
,
**
locals
())
inputs
=
{
'Input'
:
input
}
attrs
=
{
'axes'
:
axes
}
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
# starts
if
isinstance
(
starts
,
Variable
):
starts
.
stop_gradient
=
True
inputs
[
'StartsTensor'
]
=
starts
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
elif
isinstance
(
starts
,
(
list
,
tuple
)):
attrs
[
'starts'
]
=
[]
if
utils
.
_contain_var
(
starts
):
inputs
[
'StartsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
starts
)
for
i
,
dim
in
enumerate
(
starts
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'starts'
].
append
(
-
1
)
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'starts'
].
append
(
dim
)
else
:
attrs
[
'starts'
]
=
starts
# ends
if
isinstance
(
ends
,
Variable
):
ends
.
stop_gradient
=
True
inputs
[
'EndsTensor'
]
=
ends
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
elif
isinstance
(
ends
,
(
list
,
tuple
)):
attrs
[
'ends'
]
=
[]
if
utils
.
_contain_var
(
ends
):
inputs
[
'EndsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
ends
)
for
i
,
dim
in
enumerate
(
ends
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'ends'
].
append
(
-
1
)
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'ends'
].
append
(
dim
)
else
:
attrs
[
'ends'
]
=
ends
# infer_flags
attrs
[
'infer_flags'
]
=
infer_flags
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'input'
)
)
helper
.
append_op
(
type
=
'slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
}
)
return
out
def
shape
(
input
):
def
shape
(
input
):
"""
"""
:alias_main: paddle.shape
:alias_main: paddle.shape
...
...
python/paddle/fluid/layers/rnn.py
浏览文件 @
0707c0af
...
@@ -2089,7 +2089,7 @@ class TrainingHelper(DecodeHelper):
...
@@ -2089,7 +2089,7 @@ class TrainingHelper(DecodeHelper):
def
_slice
(
x
):
# TODO: use Variable.__getitem__
def
_slice
(
x
):
# TODO: use Variable.__getitem__
axes
=
[
0
if
self
.
time_major
else
1
]
axes
=
[
0
if
self
.
time_major
else
1
]
return
paddle
.
squeeze
(
return
paddle
.
squeeze
(
nn
.
slice
(
paddle
.
slice
(
x
,
axes
=
axes
,
starts
=
[
next_time
],
ends
=
[
next_time
+
1
]
x
,
axes
=
axes
,
starts
=
[
next_time
],
ends
=
[
next_time
+
1
]
),
),
axis
=
axes
,
axis
=
axes
,
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/bert_dygraph_model.py
浏览文件 @
0707c0af
...
@@ -291,7 +291,7 @@ class BertModelLayer(Layer):
...
@@ -291,7 +291,7 @@ class BertModelLayer(Layer):
#
#
# if not self.return_pooled_out:
# if not self.return_pooled_out:
# return enc_output
# return enc_output
next_sent_feat
=
fluid
.
layers
.
slice
(
next_sent_feat
=
paddle
.
slice
(
input
=
enc_output
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
1
]
input
=
enc_output
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
1
]
)
)
next_sent_feat
=
self
.
pooled_fc
(
next_sent_feat
)
next_sent_feat
=
self
.
pooled_fc
(
next_sent_feat
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
浏览文件 @
0707c0af
...
@@ -412,10 +412,10 @@ def bmn_loss_func(
...
@@ -412,10 +412,10 @@ def bmn_loss_func(
return
loss
return
loss
pred_bm_reg
=
paddle
.
squeeze
(
pred_bm_reg
=
paddle
.
squeeze
(
fluid
.
layers
.
slice
(
pred_bm
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
1
]),
axis
=
[
1
]
paddle
.
slice
(
pred_bm
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
1
]),
axis
=
[
1
]
)
)
pred_bm_cls
=
paddle
.
squeeze
(
pred_bm_cls
=
paddle
.
squeeze
(
fluid
.
layers
.
slice
(
pred_bm
,
axes
=
[
1
],
starts
=
[
1
],
ends
=
[
2
]),
axis
=
[
1
]
paddle
.
slice
(
pred_bm
,
axes
=
[
1
],
starts
=
[
1
],
ends
=
[
2
]),
axis
=
[
1
]
)
)
bm_mask
=
_get_mask
(
cfg
)
bm_mask
=
_get_mask
(
cfg
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py
浏览文件 @
0707c0af
...
@@ -85,9 +85,7 @@ class DynamicGRU(fluid.dygraph.Layer):
...
@@ -85,9 +85,7 @@ class DynamicGRU(fluid.dygraph.Layer):
j
=
i
j
=
i
# input_ = inputs[:, j:j+1, :] # original code
# input_ = inputs[:, j:j+1, :] # original code
input_
=
fluid
.
layers
.
slice
(
input_
=
paddle
.
slice
(
inputs
,
axes
=
[
1
],
starts
=
[
j
],
ends
=
[
j
+
1
])
inputs
,
axes
=
[
1
],
starts
=
[
j
],
ends
=
[
j
+
1
]
)
input_
=
paddle
.
reshape
(
input_
,
[
-
1
,
input_
.
shape
[
2
]])
input_
=
paddle
.
reshape
(
input_
,
[
-
1
,
input_
.
shape
[
2
]])
hidden
,
reset
,
gate
=
self
.
gru_unit
(
input_
,
hidden
)
hidden
,
reset
,
gate
=
self
.
gru_unit
(
input_
,
hidden
)
hidden_
=
paddle
.
reshape
(
hidden
,
[
-
1
,
1
,
hidden
.
shape
[
1
]])
hidden_
=
paddle
.
reshape
(
hidden
,
[
-
1
,
1
,
hidden
.
shape
[
1
]])
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py
浏览文件 @
0707c0af
...
@@ -206,9 +206,7 @@ class Upsample(fluid.dygraph.Layer):
...
@@ -206,9 +206,7 @@ class Upsample(fluid.dygraph.Layer):
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
# get dynamic upsample output shape
# get dynamic upsample output shape
shape_nchw
=
fluid
.
layers
.
shape
(
inputs
)
shape_nchw
=
fluid
.
layers
.
shape
(
inputs
)
shape_hw
=
fluid
.
layers
.
slice
(
shape_hw
=
paddle
.
slice
(
shape_nchw
,
axes
=
[
0
],
starts
=
[
2
],
ends
=
[
4
])
shape_nchw
,
axes
=
[
0
],
starts
=
[
2
],
ends
=
[
4
]
)
shape_hw
.
stop_gradient
=
True
shape_hw
.
stop_gradient
=
True
in_shape
=
fluid
.
layers
.
cast
(
shape_hw
,
dtype
=
'int32'
)
in_shape
=
fluid
.
layers
.
cast
(
shape_hw
,
dtype
=
'int32'
)
out_shape
=
in_shape
*
self
.
scale
out_shape
=
in_shape
*
self
.
scale
...
...
python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py
浏览文件 @
0707c0af
...
@@ -51,7 +51,7 @@ class TestBase(IPUOpTest):
...
@@ -51,7 +51,7 @@ class TestBase(IPUOpTest):
x
=
paddle
.
static
.
data
(
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
)
out
=
paddle
.
fluid
.
layers
.
slice
(
x
,
**
self
.
attrs
)
out
=
paddle
.
slice
(
x
,
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
self
.
fetch_list
=
[
out
.
name
]
def
run_model
(
self
,
exec_mode
):
def
run_model
(
self
,
exec_mode
):
...
@@ -105,9 +105,7 @@ class TestCase2(TestBase):
...
@@ -105,9 +105,7 @@ class TestCase2(TestBase):
ends
=
paddle
.
static
.
data
(
ends
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
2
],
shape
=
self
.
feed_shape
[
2
],
dtype
=
'int32'
name
=
self
.
feed_list
[
2
],
shape
=
self
.
feed_shape
[
2
],
dtype
=
'int32'
)
)
out
=
paddle
.
fluid
.
layers
.
slice
(
out
=
paddle
.
slice
(
x
,
starts
=
starts
,
ends
=
ends
,
**
self
.
attrs
)
x
,
starts
=
starts
,
ends
=
ends
,
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
self
.
fetch_list
=
[
out
.
name
]
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py
浏览文件 @
0707c0af
...
@@ -17,6 +17,7 @@ import unittest
...
@@ -17,6 +17,7 @@ import unittest
import
numpy
as
np
import
numpy
as
np
from
inference_pass_test
import
InferencePassTest
from
inference_pass_test
import
InferencePassTest
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
paddle.fluid.core
import
AnalysisConfig
from
paddle.fluid.core
import
AnalysisConfig
...
@@ -49,9 +50,7 @@ class SlicePluginTRTDynamicTest(InferencePassTest):
...
@@ -49,9 +50,7 @@ class SlicePluginTRTDynamicTest(InferencePassTest):
axes
=
self
.
params_axes
axes
=
self
.
params_axes
starts
=
self
.
params_starts
starts
=
self
.
params_starts
ends
=
self
.
params_ends
ends
=
self
.
params_ends
slice_out
=
fluid
.
layers
.
slice
(
slice_out
=
paddle
.
slice
(
data
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
data
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
self
.
feeds
=
{
self
.
feeds
=
{
"data"
:
np
.
random
.
random
((
3
,
3
,
3
,
3
)).
astype
(
"float32"
),
"data"
:
np
.
random
.
random
((
3
,
3
,
3
,
3
)).
astype
(
"float32"
),
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py
浏览文件 @
0707c0af
...
@@ -17,6 +17,7 @@ import unittest
...
@@ -17,6 +17,7 @@ import unittest
import
numpy
as
np
import
numpy
as
np
from
inference_pass_test
import
InferencePassTest
from
inference_pass_test
import
InferencePassTest
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
paddle.fluid.core
import
AnalysisConfig
from
paddle.fluid.core
import
AnalysisConfig
...
@@ -43,9 +44,7 @@ class SlicePluginTRTTest(InferencePassTest):
...
@@ -43,9 +44,7 @@ class SlicePluginTRTTest(InferencePassTest):
axes
=
self
.
params_axes
axes
=
self
.
params_axes
starts
=
self
.
params_starts
starts
=
self
.
params_starts
ends
=
self
.
params_ends
ends
=
self
.
params_ends
slice_out
=
fluid
.
layers
.
slice
(
slice_out
=
paddle
.
slice
(
data
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
data
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
out
=
fluid
.
layers
.
batch_norm
(
slice_out
,
is_test
=
True
)
out
=
fluid
.
layers
.
batch_norm
(
slice_out
,
is_test
=
True
)
self
.
feeds
=
{
self
.
feeds
=
{
...
@@ -114,9 +113,7 @@ class SlicePluginTRTTestInt32(SlicePluginTRTTest):
...
@@ -114,9 +113,7 @@ class SlicePluginTRTTestInt32(SlicePluginTRTTest):
axes
=
self
.
params_axes
axes
=
self
.
params_axes
starts
=
self
.
params_starts
starts
=
self
.
params_starts
ends
=
self
.
params_ends
ends
=
self
.
params_ends
slice_out
=
fluid
.
layers
.
slice
(
slice_out
=
paddle
.
slice
(
data
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
data
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
cast_out
=
fluid
.
layers
.
cast
(
slice_out
,
'float32'
)
cast_out
=
fluid
.
layers
.
cast
(
slice_out
,
'float32'
)
out
=
fluid
.
layers
.
batch_norm
(
cast_out
,
is_test
=
True
)
out
=
fluid
.
layers
.
batch_norm
(
cast_out
,
is_test
=
True
)
...
@@ -141,9 +138,7 @@ class StaticSlicePluginTRTTestInt32(SlicePluginTRTTest):
...
@@ -141,9 +138,7 @@ class StaticSlicePluginTRTTestInt32(SlicePluginTRTTest):
axes
=
self
.
params_axes
axes
=
self
.
params_axes
starts
=
self
.
params_starts
starts
=
self
.
params_starts
ends
=
self
.
params_ends
ends
=
self
.
params_ends
slice_out
=
fluid
.
layers
.
slice
(
slice_out
=
paddle
.
slice
(
data
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
data
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
cast_out
=
fluid
.
layers
.
cast
(
slice_out
,
'float32'
)
cast_out
=
fluid
.
layers
.
cast
(
slice_out
,
'float32'
)
out
=
fluid
.
layers
.
batch_norm
(
cast_out
,
is_test
=
True
)
out
=
fluid
.
layers
.
batch_norm
(
cast_out
,
is_test
=
True
)
...
...
python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py
浏览文件 @
0707c0af
...
@@ -143,10 +143,10 @@ def lm_model(
...
@@ -143,10 +143,10 @@ def lm_model(
)
)
bias_arr
.
append
(
bias_1
)
bias_arr
.
append
(
bias_1
)
pre_hidden
=
layers
.
slice
(
pre_hidden
=
paddle
.
slice
(
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_cell
=
layers
.
slice
(
pre_cell
=
paddle
.
slice
(
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_hidden
=
paddle
.
reshape
(
pre_hidden
,
shape
=
[
-
1
,
hidden_size
])
pre_hidden
=
paddle
.
reshape
(
pre_hidden
,
shape
=
[
-
1
,
hidden_size
])
...
@@ -169,22 +169,22 @@ def lm_model(
...
@@ -169,22 +169,22 @@ def lm_model(
gate_input
=
layers
.
matmul
(
x
=
nn
,
y
=
weight_1
)
gate_input
=
layers
.
matmul
(
x
=
nn
,
y
=
weight_1
)
gate_input
=
layers
.
elementwise_add
(
gate_input
,
bias
)
gate_input
=
layers
.
elementwise_add
(
gate_input
,
bias
)
i
=
layers
.
slice
(
i
=
paddle
.
slice
(
gate_input
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
hidden_size
]
gate_input
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
hidden_size
]
)
)
j
=
layers
.
slice
(
j
=
paddle
.
slice
(
gate_input
,
gate_input
,
axes
=
[
1
],
axes
=
[
1
],
starts
=
[
hidden_size
],
starts
=
[
hidden_size
],
ends
=
[
hidden_size
*
2
],
ends
=
[
hidden_size
*
2
],
)
)
f
=
layers
.
slice
(
f
=
paddle
.
slice
(
gate_input
,
gate_input
,
axes
=
[
1
],
axes
=
[
1
],
starts
=
[
hidden_size
*
2
],
starts
=
[
hidden_size
*
2
],
ends
=
[
hidden_size
*
3
],
ends
=
[
hidden_size
*
3
],
)
)
o
=
layers
.
slice
(
o
=
paddle
.
slice
(
gate_input
,
gate_input
,
axes
=
[
1
],
axes
=
[
1
],
starts
=
[
hidden_size
*
3
],
starts
=
[
hidden_size
*
3
],
...
@@ -222,11 +222,11 @@ def lm_model(
...
@@ -222,11 +222,11 @@ def lm_model(
c
=
rnnout
[
i
*
2
+
1
]
c
=
rnnout
[
i
*
2
+
1
]
m
.
stop_gradient
=
True
m
.
stop_gradient
=
True
c
.
stop_gradient
=
True
c
.
stop_gradient
=
True
last_h
=
layers
.
slice
(
last_h
=
paddle
.
slice
(
m
,
axes
=
[
0
],
starts
=
[
num_steps
-
1
],
ends
=
[
num_steps
]
m
,
axes
=
[
0
],
starts
=
[
num_steps
-
1
],
ends
=
[
num_steps
]
)
)
last_hidden_array
.
append
(
last_h
)
last_hidden_array
.
append
(
last_h
)
last_c
=
layers
.
slice
(
last_c
=
paddle
.
slice
(
c
,
axes
=
[
0
],
starts
=
[
num_steps
-
1
],
ends
=
[
num_steps
]
c
,
axes
=
[
0
],
starts
=
[
num_steps
-
1
],
ends
=
[
num_steps
]
)
)
last_cell_array
.
append
(
last_c
)
last_cell_array
.
append
(
last_c
)
...
@@ -264,10 +264,10 @@ def lm_model(
...
@@ -264,10 +264,10 @@ def lm_model(
)
)
bias_arr
.
append
(
bias_1
)
bias_arr
.
append
(
bias_1
)
pre_hidden
=
layers
.
slice
(
pre_hidden
=
paddle
.
slice
(
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_cell
=
layers
.
slice
(
pre_cell
=
paddle
.
slice
(
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_hidden
=
paddle
.
reshape
(
pre_hidden
,
shape
=
[
-
1
,
hidden_size
])
pre_hidden
=
paddle
.
reshape
(
pre_hidden
,
shape
=
[
-
1
,
hidden_size
])
...
...
python/paddle/fluid/tests/unittests/test_imperative_basic.py
浏览文件 @
0707c0af
...
@@ -137,9 +137,7 @@ class SimpleRNN(fluid.Layer):
...
@@ -137,9 +137,7 @@ class SimpleRNN(fluid.Layer):
)
)
pre_hidden
=
init_hidden
pre_hidden
=
init_hidden
for
i
in
range
(
self
.
seq_len
):
for
i
in
range
(
self
.
seq_len
):
input
=
fluid
.
layers
.
slice
(
input
=
paddle
.
slice
(
inputs
,
axes
=
[
1
],
starts
=
[
i
],
ends
=
[
i
+
1
])
inputs
,
axes
=
[
1
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
input
=
paddle
.
reshape
(
input
,
shape
=
[
1
,
3
])
input
=
paddle
.
reshape
(
input
,
shape
=
[
1
,
3
])
out_softmax
,
pre_hidden
=
self
.
_cell
(
input
,
pre_hidden
)
out_softmax
,
pre_hidden
=
self
.
_cell
(
input
,
pre_hidden
)
outs
.
append
(
out_softmax
)
outs
.
append
(
out_softmax
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py
浏览文件 @
0707c0af
...
@@ -192,9 +192,7 @@ class DynamicGRU(fluid.dygraph.Layer):
...
@@ -192,9 +192,7 @@ class DynamicGRU(fluid.dygraph.Layer):
for
i
in
range
(
inputs
.
shape
[
1
]):
for
i
in
range
(
inputs
.
shape
[
1
]):
if
self
.
is_reverse
:
if
self
.
is_reverse
:
i
=
inputs
.
shape
[
1
]
-
1
-
i
i
=
inputs
.
shape
[
1
]
-
1
-
i
input_
=
fluid
.
layers
.
slice
(
input_
=
paddle
.
slice
(
inputs
,
axes
=
[
1
],
starts
=
[
i
],
ends
=
[
i
+
1
])
inputs
,
axes
=
[
1
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
input_
=
paddle
.
reshape
(
input_
,
[
-
1
,
input_
.
shape
[
2
]])
input_
=
paddle
.
reshape
(
input_
,
[
-
1
,
input_
.
shape
[
2
]])
hidden
,
reset
,
gate
=
self
.
gru_unit
(
input_
,
hidden
)
hidden
,
reset
,
gate
=
self
.
gru_unit
(
input_
,
hidden
)
hidden_
=
paddle
.
reshape
(
hidden
,
[
-
1
,
1
,
hidden
.
shape
[
1
]])
hidden_
=
paddle
.
reshape
(
hidden
,
[
-
1
,
1
,
hidden
.
shape
[
1
]])
...
@@ -356,7 +354,7 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer):
...
@@ -356,7 +354,7 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer):
res
=
[]
res
=
[]
hidden_mem
=
decoder_boot
hidden_mem
=
decoder_boot
for
i
in
range
(
target_embedding
.
shape
[
1
]):
for
i
in
range
(
target_embedding
.
shape
[
1
]):
current_word
=
fluid
.
layers
.
slice
(
current_word
=
paddle
.
slice
(
target_embedding
,
axes
=
[
1
],
starts
=
[
i
],
ends
=
[
i
+
1
]
target_embedding
,
axes
=
[
1
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
current_word
=
paddle
.
reshape
(
current_word
=
paddle
.
reshape
(
...
@@ -399,7 +397,7 @@ class OCRAttention(fluid.dygraph.Layer):
...
@@ -399,7 +397,7 @@ class OCRAttention(fluid.dygraph.Layer):
def
forward
(
self
,
inputs
,
label_in
):
def
forward
(
self
,
inputs
,
label_in
):
gru_backward
,
encoded_vector
,
encoded_proj
=
self
.
encoder_net
(
inputs
)
gru_backward
,
encoded_vector
,
encoded_proj
=
self
.
encoder_net
(
inputs
)
backward_first
=
fluid
.
layers
.
slice
(
backward_first
=
paddle
.
slice
(
gru_backward
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
1
]
gru_backward
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
1
]
)
)
backward_first
=
paddle
.
reshape
(
backward_first
=
paddle
.
reshape
(
...
...
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
浏览文件 @
0707c0af
...
@@ -79,10 +79,10 @@ class SimpleLSTMRNN(fluid.Layer):
...
@@ -79,10 +79,10 @@ class SimpleLSTMRNN(fluid.Layer):
self
.
hidden_array
=
[]
self
.
hidden_array
=
[]
for
i
in
range
(
self
.
_num_layers
):
for
i
in
range
(
self
.
_num_layers
):
pre_hidden
=
fluid
.
layers
.
slice
(
pre_hidden
=
paddle
.
slice
(
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_cell
=
fluid
.
layers
.
slice
(
pre_cell
=
paddle
.
slice
(
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_hidden
=
paddle
.
reshape
(
pre_hidden
=
paddle
.
reshape
(
...
@@ -94,7 +94,7 @@ class SimpleLSTMRNN(fluid.Layer):
...
@@ -94,7 +94,7 @@ class SimpleLSTMRNN(fluid.Layer):
res
=
[]
res
=
[]
for
index
in
range
(
self
.
_num_steps
):
for
index
in
range
(
self
.
_num_steps
):
self
.
_input
=
fluid
.
layers
.
slice
(
self
.
_input
=
paddle
.
slice
(
input_embedding
,
axes
=
[
1
],
starts
=
[
index
],
ends
=
[
index
+
1
]
input_embedding
,
axes
=
[
1
],
starts
=
[
index
],
ends
=
[
index
+
1
]
)
)
self
.
_input
=
paddle
.
reshape
(
self
.
_input
=
paddle
.
reshape
(
...
...
python/paddle/fluid/tests/unittests/test_imperative_save_load.py
浏览文件 @
0707c0af
...
@@ -74,10 +74,10 @@ class SimpleLSTMRNN(fluid.Layer):
...
@@ -74,10 +74,10 @@ class SimpleLSTMRNN(fluid.Layer):
self
.
hidden_array
=
[]
self
.
hidden_array
=
[]
for
i
in
range
(
self
.
_num_layers
):
for
i
in
range
(
self
.
_num_layers
):
pre_hidden
=
fluid
.
layers
.
slice
(
pre_hidden
=
paddle
.
slice
(
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_cell
=
fluid
.
layers
.
slice
(
pre_cell
=
paddle
.
slice
(
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_hidden
=
paddle
.
reshape
(
pre_hidden
=
paddle
.
reshape
(
...
@@ -89,7 +89,7 @@ class SimpleLSTMRNN(fluid.Layer):
...
@@ -89,7 +89,7 @@ class SimpleLSTMRNN(fluid.Layer):
res
=
[]
res
=
[]
for
index
in
range
(
self
.
_num_steps
):
for
index
in
range
(
self
.
_num_steps
):
self
.
_input
=
fluid
.
layers
.
slice
(
self
.
_input
=
paddle
.
slice
(
input_embedding
,
axes
=
[
1
],
starts
=
[
index
],
ends
=
[
index
+
1
]
input_embedding
,
axes
=
[
1
],
starts
=
[
index
],
ends
=
[
index
+
1
]
)
)
self
.
_input
=
paddle
.
reshape
(
self
.
_input
=
paddle
.
reshape
(
...
...
python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py
浏览文件 @
0707c0af
...
@@ -77,10 +77,10 @@ class SimpleLSTMRNN(fluid.Layer):
...
@@ -77,10 +77,10 @@ class SimpleLSTMRNN(fluid.Layer):
self
.
hidden_array
=
[]
self
.
hidden_array
=
[]
for
i
in
range
(
self
.
_num_layers
):
for
i
in
range
(
self
.
_num_layers
):
pre_hidden
=
fluid
.
layers
.
slice
(
pre_hidden
=
paddle
.
slice
(
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_cell
=
fluid
.
layers
.
slice
(
pre_cell
=
paddle
.
slice
(
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_hidden
=
paddle
.
reshape
(
pre_hidden
=
paddle
.
reshape
(
...
@@ -92,7 +92,7 @@ class SimpleLSTMRNN(fluid.Layer):
...
@@ -92,7 +92,7 @@ class SimpleLSTMRNN(fluid.Layer):
res
=
[]
res
=
[]
for
index
in
range
(
self
.
_num_steps
):
for
index
in
range
(
self
.
_num_steps
):
self
.
_input
=
fluid
.
layers
.
slice
(
self
.
_input
=
paddle
.
slice
(
input_embedding
,
axes
=
[
1
],
starts
=
[
index
],
ends
=
[
index
+
1
]
input_embedding
,
axes
=
[
1
],
starts
=
[
index
],
ends
=
[
index
+
1
]
)
)
self
.
_input
=
paddle
.
reshape
(
self
.
_input
=
paddle
.
reshape
(
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
0707c0af
...
@@ -3584,7 +3584,7 @@ class TestBook(LayerTest):
...
@@ -3584,7 +3584,7 @@ class TestBook(LayerTest):
name
=
"input"
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
'float32'
name
=
"input"
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
'float32'
)
)
out
=
layers
.
slice
(
input
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
out
=
paddle
.
slice
(
input
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
return
out
return
out
def
make_scale_variable
(
self
):
def
make_scale_variable
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_nn_grad.py
浏览文件 @
0707c0af
...
@@ -30,7 +30,7 @@ class TestSliceOpDoubleGradCheck(unittest.TestCase):
...
@@ -30,7 +30,7 @@ class TestSliceOpDoubleGradCheck(unittest.TestCase):
def
func
(
self
,
place
):
def
func
(
self
,
place
):
self
.
config
()
self
.
config
()
out
=
fluid
.
layers
.
slice
(
out
=
paddle
.
slice
(
self
.
inputs
,
axes
=
self
.
axes
,
starts
=
self
.
starts
,
ends
=
self
.
ends
self
.
inputs
,
axes
=
self
.
axes
,
starts
=
self
.
starts
,
ends
=
self
.
ends
)
)
gradient_checker
.
double_grad_check
(
gradient_checker
.
double_grad_check
(
...
...
python/paddle/fluid/tests/unittests/test_static_save_load.py
浏览文件 @
0707c0af
...
@@ -85,10 +85,10 @@ class SimpleLSTMRNN(fluid.Layer):
...
@@ -85,10 +85,10 @@ class SimpleLSTMRNN(fluid.Layer):
self
.
hidden_array
=
[]
self
.
hidden_array
=
[]
for
i
in
range
(
self
.
_num_layers
):
for
i
in
range
(
self
.
_num_layers
):
pre_hidden
=
fluid
.
layers
.
slice
(
pre_hidden
=
paddle
.
slice
(
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_cell
=
fluid
.
layers
.
slice
(
pre_cell
=
paddle
.
slice
(
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
]
)
)
pre_hidden
=
paddle
.
reshape
(
pre_hidden
=
paddle
.
reshape
(
...
@@ -100,7 +100,7 @@ class SimpleLSTMRNN(fluid.Layer):
...
@@ -100,7 +100,7 @@ class SimpleLSTMRNN(fluid.Layer):
res
=
[]
res
=
[]
for
index
in
range
(
self
.
_num_steps
):
for
index
in
range
(
self
.
_num_steps
):
self
.
_input
=
fluid
.
layers
.
slice
(
self
.
_input
=
paddle
.
slice
(
input_embedding
,
axes
=
[
1
],
starts
=
[
index
],
ends
=
[
index
+
1
]
input_embedding
,
axes
=
[
1
],
starts
=
[
index
],
ends
=
[
index
+
1
]
)
)
self
.
_input
=
paddle
.
reshape
(
self
.
_input
=
paddle
.
reshape
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录