Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
4d42f4fa
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4d42f4fa
编写于
3月 10, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update
上级
263b4773
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
874 addition
and
913 deletion
+874
-913
paddle/fluid/operators/lu_op.h
paddle/fluid/operators/lu_op.h
+10
-7
paddle/fluid/operators/set_value_op.h
paddle/fluid/operators/set_value_op.h
+6
-4
paddle/fluid/operators/slice_op.cc
paddle/fluid/operators/slice_op.cc
+9
-6
paddle/fluid/operators/slice_op.h
paddle/fluid/operators/slice_op.h
+0
-1
paddle/phi/kernels/cpu/slice_grad_kernel.cc
paddle/phi/kernels/cpu/slice_grad_kernel.cc
+1
-2
paddle/phi/kernels/funcs/slice_utils.h
paddle/phi/kernels/funcs/slice_utils.h
+3
-0
paddle/phi/kernels/gpu/slice_grad_kernel.cu
paddle/phi/kernels/gpu/slice_grad_kernel.cu
+0
-33
paddle/phi/kernels/gpu/slice_kernel.cu.cc
paddle/phi/kernels/gpu/slice_kernel.cu.cc
+2
-1
paddle/phi/kernels/impl/slice_grad_kernel_impl.h
paddle/phi/kernels/impl/slice_grad_kernel_impl.h
+123
-116
paddle/phi/kernels/impl/slice_kernel_impl.h
paddle/phi/kernels/impl/slice_kernel_impl.h
+4
-4
python/paddle/fluid/tests/unittests/test_slice_op.py
python/paddle/fluid/tests/unittests/test_slice_op.py
+716
-739
未找到文件。
paddle/fluid/operators/lu_op.h
浏览文件 @
4d42f4fa
...
@@ -41,9 +41,12 @@ void SetValueCompute(const framework::ExecutionContext& ctx,
...
@@ -41,9 +41,12 @@ void SetValueCompute(const framework::ExecutionContext& ctx,
auto
dtype
=
framework
::
TransToProtoVarType
(
in
->
dtype
());
auto
dtype
=
framework
::
TransToProtoVarType
(
in
->
dtype
());
auto
in_dims
=
in
->
dims
();
auto
in_dims
=
in
->
dims
();
CheckAndUpdateSliceAttrs
<
int64_t
>
(
in_dims
,
axes
,
starts
,
ends
,
&
steps
);
phi
::
funcs
::
CheckAndUpdateSliceAttrs
<
int64_t
>
(
in_dims
,
axes
,
starts
,
ends
,
auto
slice_dims
=
GetSliceDims
(
in_dims
,
axes
,
*
starts
,
*
ends
,
&
steps
);
&
steps
);
auto
decrease_slice_dims
=
GetDecreasedDims
(
slice_dims
,
decrease_axes
);
auto
slice_dims
=
phi
::
funcs
::
GetSliceDims
(
in_dims
,
axes
,
*
starts
,
*
ends
,
&
steps
);
auto
decrease_slice_dims
=
phi
::
funcs
::
GetDecreasedDims
(
slice_dims
,
decrease_axes
);
auto
slice_dims_for_assign
=
decrease_slice_dims
;
auto
slice_dims_for_assign
=
decrease_slice_dims
;
if
(
!
none_axes
.
empty
())
{
if
(
!
none_axes
.
empty
())
{
...
@@ -281,10 +284,10 @@ void SliceCompute(const framework::ExecutionContext& ctx,
...
@@ -281,10 +284,10 @@ void SliceCompute(const framework::ExecutionContext& ctx,
}
}
}
}
CheckAndUpdateSliceAttrs
(
in_dims
,
axes
,
&
starts
,
&
ends
);
phi
::
funcs
::
CheckAndUpdateSliceAttrs
(
in_dims
,
axes
,
&
starts
,
&
ends
);
slice_dims
=
slice_dims
=
phi
::
funcs
::
GetSliceDims
<
int64_t
>
(
in_dims
,
axes
,
starts
,
ends
,
GetSliceDims
<
int64_t
>
(
in_dims
,
axes
,
starts
,
ends
,
nullptr
,
nullptr
);
nullptr
,
nullptr
);
out_dims
=
GetDecreasedDims
(
slice_dims
,
decrease_axis
);
out_dims
=
phi
::
funcs
::
GetDecreasedDims
(
slice_dims
,
decrease_axis
);
// 2.2 Get output
// 2.2 Get output
auto
offsets
=
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
D
>
();
auto
offsets
=
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
D
>
();
...
...
paddle/fluid/operators/set_value_op.h
浏览文件 @
4d42f4fa
...
@@ -25,10 +25,10 @@
...
@@ -25,10 +25,10 @@
#include "paddle/fluid/operators/assign_value_op.h"
#include "paddle/fluid/operators/assign_value_op.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/slice_utils.h"
#include "paddle/fluid/operators/strided_slice_op.h"
#include "paddle/fluid/operators/strided_slice_op.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/kernels/funcs/slice_utils.h"
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
@@ -188,9 +188,11 @@ class SetValueKernel : public framework::OpKernel<T> {
...
@@ -188,9 +188,11 @@ class SetValueKernel : public framework::OpKernel<T> {
}
}
auto
in_dims
=
in
->
dims
();
auto
in_dims
=
in
->
dims
();
CheckAndUpdateSliceAttrs
(
in_dims
,
axes
,
&
starts
,
&
ends
,
&
steps
);
phi
::
funcs
::
CheckAndUpdateSliceAttrs
(
in_dims
,
axes
,
&
starts
,
&
ends
,
&
steps
);
auto
slice_dims
=
GetSliceDims
(
in_dims
,
axes
,
starts
,
ends
,
&
steps
);
auto
slice_dims
=
auto
decrease_slice_dims
=
GetDecreasedDims
(
slice_dims
,
decrease_axes
);
phi
::
funcs
::
GetSliceDims
(
in_dims
,
axes
,
starts
,
ends
,
&
steps
);
auto
decrease_slice_dims
=
phi
::
funcs
::
GetDecreasedDims
(
slice_dims
,
decrease_axes
);
auto
slice_dims_for_assign
=
decrease_slice_dims
;
auto
slice_dims_for_assign
=
decrease_slice_dims
;
if
(
!
none_axes
.
empty
())
{
if
(
!
none_axes
.
empty
())
{
...
...
paddle/fluid/operators/slice_op.cc
浏览文件 @
4d42f4fa
...
@@ -17,6 +17,7 @@ limitations under the License. */
...
@@ -17,6 +17,7 @@ limitations under the License. */
#include <memory>
#include <memory>
#include <string>
#include <string>
#include <vector>
#include <vector>
#include "paddle/phi/kernels/funcs/slice_utils.h"
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
@@ -101,15 +102,17 @@ class SliceOp : public framework::OperatorWithKernel {
...
@@ -101,15 +102,17 @@ class SliceOp : public framework::OperatorWithKernel {
"The size of ends must be equal to the size of axes."
));
"The size of ends must be equal to the size of axes."
));
}
}
CheckAndUpdateSliceAttrs
<
int
>
(
in_dims
,
axes
,
&
starts
,
&
ends
,
nullptr
,
phi
::
funcs
::
CheckAndUpdateSliceAttrs
<
int
>
(
in_dims
,
axes
,
&
starts
,
&
ends
,
&
infer_flags
);
nullptr
,
&
infer_flags
);
auto
slice_dims
=
auto
slice_dims
=
phi
::
funcs
::
GetSliceDims
<
int
>
(
in_dims
,
axes
,
starts
,
ends
,
GetSliceDims
<
int
>
(
in_dims
,
axes
,
starts
,
ends
,
nullptr
,
&
infer_flags
);
nullptr
,
&
infer_flags
);
if
(
ctx
->
IsRuntime
())
{
if
(
ctx
->
IsRuntime
())
{
out_dims
=
GetDecreasedDims
<
int
>
(
slice_dims
,
decrease_axis
,
&
infer_flags
);
out_dims
=
phi
::
funcs
::
GetDecreasedDims
<
int
>
(
slice_dims
,
decrease_axis
,
&
infer_flags
);
}
else
{
}
else
{
out_dims
=
GetDecreasedDims
<
int
>
(
slice_dims
,
decrease_axis
,
nullptr
);
out_dims
=
phi
::
funcs
::
GetDecreasedDims
<
int
>
(
slice_dims
,
decrease_axis
,
nullptr
);
}
}
ctx
->
SetOutputDim
(
"Out"
,
out_dims
);
ctx
->
SetOutputDim
(
"Out"
,
out_dims
);
...
...
paddle/fluid/operators/slice_op.h
浏览文件 @
4d42f4fa
...
@@ -18,7 +18,6 @@ limitations under the License. */
...
@@ -18,7 +18,6 @@ limitations under the License. */
#include <vector>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/operators/slice_utils.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
...
...
paddle/phi/kernels/cpu/slice_grad_kernel.cc
浏览文件 @
4d42f4fa
...
@@ -29,5 +29,4 @@ PD_REGISTER_KERNEL(slice_grad,
...
@@ -29,5 +29,4 @@ PD_REGISTER_KERNEL(slice_grad,
double
,
double
,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
,
phi
::
dtype
::
complex
<
double
>
,
phi
::
dtype
::
bfloat16
,
phi
::
dtype
::
bfloat16
)
{}
phi
::
dtype
::
float16
)
{}
paddle/phi/kernels/funcs/slice_utils.h
浏览文件 @
4d42f4fa
...
@@ -19,6 +19,8 @@ limitations under the License. */
...
@@ -19,6 +19,8 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
namespace
funcs
{
template
<
typename
T
=
int64_t
>
template
<
typename
T
=
int64_t
>
inline
void
CheckAndUpdateSliceAttrs
(
const
DDim
in_dims
,
inline
void
CheckAndUpdateSliceAttrs
(
const
DDim
in_dims
,
const
std
::
vector
<
T
>&
axes
,
const
std
::
vector
<
T
>&
axes
,
...
@@ -161,4 +163,5 @@ inline DDim GetDecreasedDims(const DDim slice_dims,
...
@@ -161,4 +163,5 @@ inline DDim GetDecreasedDims(const DDim slice_dims,
return
decreased_dims
;
return
decreased_dims
;
}
}
}
// namespace funcs
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/gpu/slice_grad_kernel.cu
已删除
100644 → 0
浏览文件 @
263b4773
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/impl/slice_grad_kernel_impl.h"
#include "paddle/phi/kernels/slice_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
PD_REGISTER_KERNEL
(
slice_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
SliceGradRawKernel
,
bool
,
int
,
int64_t
,
float
,
double
,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
,
phi
::
dtype
::
bfloat16
,
phi
::
dtype
::
float16
)
{}
paddle/phi/kernels/gpu/slice_kernel.cu.cc
浏览文件 @
4d42f4fa
...
@@ -29,4 +29,5 @@ PD_REGISTER_KERNEL(slice,
...
@@ -29,4 +29,5 @@ PD_REGISTER_KERNEL(slice,
double
,
double
,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
,
phi
::
dtype
::
complex
<
double
>
,
phi
::
dtype
::
bfloat16
)
{}
phi
::
dtype
::
bfloat16
,
phi
::
dtype
::
float16
)
{}
paddle/phi/kernels/impl/slice_grad_kernel_impl.h
浏览文件 @
4d42f4fa
...
@@ -66,136 +66,143 @@ void EigenPaddingCompute(
...
@@ -66,136 +66,143 @@ void EigenPaddingCompute(
// if dimension less than 3, cannot reduce dimension
// if dimension less than 3, cannot reduce dimension
LaunchEigenPadding
<
T
,
Context
,
D
>
(
LaunchEigenPadding
<
T
,
Context
,
D
>
(
context
,
d_input
,
in_dims
,
d_out
,
out_dims
,
paddings
);
context
,
d_input
,
in_dims
,
d_out
,
out_dims
,
paddings
);
}
}
else
{
// else we can reduce dimension
// } else { // else we can reduce dimension
// count not-zero padding number, and record the dimension
// // count not-zero padding number, and record the dimension
int
need_pad_num
=
0
,
pad_dim
=
-
1
;
// int need_pad_num = 0, pad_dim = -1;
for
(
size_t
i
=
0
;
i
<
D
;
i
++
)
{
// for (size_t i = 0; i < D; i++) {
if
(
paddings
[
i
].
first
!=
0
||
paddings
[
i
].
second
!=
0
)
{
// if (paddings[i].first != 0 || paddings[i].second != 0) {
need_pad_num
++
;
// need_pad_num++;
pad_dim
=
i
;
// pad_dim = i;
}
// }
}
// }
//
if (need_pad_num == 1) {
if
(
need_pad_num
==
1
)
{
//
// only need padding one dimension, we can reduce dimension.
// only need padding one dimension, we can reduce dimension.
//
// only the padding dimension is available for us.
// only the padding dimension is available for us.
//
// How to reduce dimension(5 to 3 for example):
// How to reduce dimension(5 to 3 for example):
//
// before(D=5):
// before(D=5):
//
// in_dims: [x1, x2, x3, x4, x5]
// in_dims: [x1, x2, x3, x4, x5]
//
// padding.first: [0, 0, a, 0, 0]
// padding.first: [0, 0, a, 0, 0]
//
// padding.second: [0, 0, b, 0, 0]
// padding.second: [0, 0, b, 0, 0]
//
// | |
// | |
//
// V V
// V V
//
// after(D=3):
// after(D=3):
//
// reshaped_in_dims: [x1*x2, x3, x4*x5]
// reshaped_in_dims: [x1*x2, x3, x4*x5]
//
// reshaped_padding.first: [0, a, 0]
// reshaped_padding.first: [0, a, 0]
//
// reshaped_padding.second: [0, b, 0]
// reshaped_padding.second: [0, b, 0]
//
if (pad_dim == D - 1) {
if
(
pad_dim
==
D
-
1
)
{
//
// only last dimension need padding,
// only last dimension need padding,
//
// reshape the dimension of tensor in 2: [preceding, padding]
// reshape the dimension of tensor in 2: [preceding, padding]
//
std::vector<int64_t> in_tore_shape(2, 1), out_tore_shape(2, 1);
std
::
vector
<
int64_t
>
in_tore_shape
(
2
,
1
),
out_tore_shape
(
2
,
1
);
//
Eigen::array<std::pair<int64_t, int64_t>, 2> reshaped_padding;
Eigen
::
array
<
std
::
pair
<
int64_t
,
int64_t
>
,
2
>
reshaped_padding
;
//
// first dimension is the accumulate of preceding dimension
// first dimension is the accumulate of preceding dimension
//
for (int i = 0; i < pad_dim; i++) {
for
(
int
i
=
0
;
i
<
pad_dim
;
i
++
)
{
//
in_tore_shape[0] *= in_dims[i];
in_tore_shape
[
0
]
*=
in_dims
[
i
];
//
out_tore_shape[0] *= out_dims[i];
out_tore_shape
[
0
]
*=
out_dims
[
i
];
//
}
}
//
// second dimension is the padding dimension
// second dimension is the padding dimension
//
in_tore_shape[1] = in_dims[pad_dim];
in_tore_shape
[
1
]
=
in_dims
[
pad_dim
];
//
out_tore_shape[1] = out_dims[pad_dim];
out_tore_shape
[
1
]
=
out_dims
[
pad_dim
];
//
// convert array from std::vector to DDim
// convert array from std::vector to DDim
//
DDim reshaped_in_dims = make_ddim(in_tore_shape);
DDim
reshaped_in_dims
=
make_ddim
(
in_tore_shape
);
//
DDim reshaped_out_dims = make_ddim(out_tore_shape);
DDim
reshaped_out_dims
=
make_ddim
(
out_tore_shape
);
//
// after reshape: the first dimension do not need padding,
// after reshape: the first dimension do not need padding,
//
// set padding[0] zero
// set padding[0] zero
//
reshaped_padding[0].first = reshaped_padding[0].second = 0;
reshaped_padding
[
0
].
first
=
reshaped_padding
[
0
].
second
=
0
;
//
// the second dimension is the previous padding dimension
// the second dimension is the previous padding dimension
//
reshaped_padding[1].first = paddings[pad_dim].first;
reshaped_padding
[
1
].
first
=
paddings
[
pad_dim
].
first
;
//
reshaped_padding[1].second = paddings[pad_dim].second;
reshaped_padding
[
1
].
second
=
paddings
[
pad_dim
].
second
;
// LaunchEigenPadding<T, Context, D>(context, d_input, reshaped_in_dims,
LaunchEigenPadding
<
T
,
Context
>
(
context
,
// d_out,
d_input
,
// reshaped_out_dims, reshaped_padding);
reshaped_in_dims
,
// } else if (pad_dim == 0) {
d_out
,
// // only first dimension need padding,
reshaped_out_dims
,
// // reshape the dimension of tensor in 2: [padding, succeeding]
reshaped_padding
);
// // similar to (D - 1)
}
else
if
(
pad_dim
==
0
)
{
// std::vector<int64_t> in_tore_shape(2, 1), out_tore_shape(2, 1);
// only first dimension need padding,
// Eigen::array<std::pair<int64_t, int64_t>, 2> reshaped_padding;
// reshape the dimension of tensor in 2: [padding, succeeding]
// similar to (D - 1)
std
::
vector
<
int64_t
>
in_tore_shape
(
2
,
1
),
out_tore_shape
(
2
,
1
);
Eigen
::
array
<
std
::
pair
<
int64_t
,
int64_t
>
,
2
>
reshaped_padding
;
//
// first dimension is the padding dimension
// first dimension is the padding dimension
//
in_tore_shape[0] = in_dims[pad_dim];
in_tore_shape
[
0
]
=
in_dims
[
pad_dim
];
//
out_tore_shape[0] = out_dims[pad_dim];
out_tore_shape
[
0
]
=
out_dims
[
pad_dim
];
//
// sencond dimension is the accumulate of succeeding dimension
// sencond dimension is the accumulate of succeeding dimension
//
for (size_t i = pad_dim + 1; i < D; i++) {
for
(
size_t
i
=
pad_dim
+
1
;
i
<
D
;
i
++
)
{
//
in_tore_shape[1] *= in_dims[i];
in_tore_shape
[
1
]
*=
in_dims
[
i
];
//
out_tore_shape[1] *= out_dims[i];
out_tore_shape
[
1
]
*=
out_dims
[
i
];
//
}
}
//
// convert array from std::vector to DDim
// convert array from std::vector to DDim
//
DDim reshaped_in_dims = make_ddim(in_tore_shape);
DDim
reshaped_in_dims
=
make_ddim
(
in_tore_shape
);
//
DDim reshaped_out_dims = make_ddim(out_tore_shape);
DDim
reshaped_out_dims
=
make_ddim
(
out_tore_shape
);
//
// after reshape:
// after reshape:
//
// the first dimension is the previous padding dimension
// the first dimension is the previous padding dimension
//
reshaped_padding[0].first = paddings[pad_dim].first;
reshaped_padding
[
0
].
first
=
paddings
[
pad_dim
].
first
;
//
reshaped_padding[0].second = paddings[pad_dim].second;
reshaped_padding
[
0
].
second
=
paddings
[
pad_dim
].
second
;
//
// the second dimension do not need padding, set padding[1] zero
// the second dimension do not need padding, set padding[1] zero
//
reshaped_padding[1].first = reshaped_padding[1].second = 0;
reshaped_padding
[
1
].
first
=
reshaped_padding
[
1
].
second
=
0
;
// LaunchEigenPadding<T, Context, D>(context, d_input, reshaped_in_dims,
LaunchEigenPadding
<
T
,
Context
>
(
context
,
// d_out,
d_input
,
// reshaped_out_dims, reshaped_padding);
reshaped_in_dims
,
// } else {
d_out
,
// // other dimension need padding
reshaped_out_dims
,
// // reshape the dimension of tensor in 3:
reshaped_padding
);
// // [preceding, padding, succeeding]
}
else
{
// std::vector<int64_t> in_tore_shape(3, 1), out_tore_shape(3, 1);
// other dimension need padding
// Eigen::array<std::pair<int64_t, int64_t>, 3> reshaped_padding;
// reshape the dimension of tensor in 3:
// [preceding, padding, succeeding]
std
::
vector
<
int64_t
>
in_tore_shape
(
3
,
1
),
out_tore_shape
(
3
,
1
);
Eigen
::
array
<
std
::
pair
<
int64_t
,
int64_t
>
,
3
>
reshaped_padding
;
//
// first dimension is the accumulate of preceding dimension
// first dimension is the accumulate of preceding dimension
//
for (int i = 0; i < pad_dim; i++) {
for
(
int
i
=
0
;
i
<
pad_dim
;
i
++
)
{
//
in_tore_shape[0] *= in_dims[i];
in_tore_shape
[
0
]
*=
in_dims
[
i
];
//
out_tore_shape[0] *= out_dims[i];
out_tore_shape
[
0
]
*=
out_dims
[
i
];
//
}
}
//
// second dimension is the padding dimension
// second dimension is the padding dimension
//
in_tore_shape[1] = in_dims[pad_dim];
in_tore_shape
[
1
]
=
in_dims
[
pad_dim
];
//
out_tore_shape[1] = out_dims[pad_dim];
out_tore_shape
[
1
]
=
out_dims
[
pad_dim
];
//
// third dimension is the accumulate of succeeding dimension
// third dimension is the accumulate of succeeding dimension
//
for (size_t i = pad_dim + 1; i < D; i++) {
for
(
size_t
i
=
pad_dim
+
1
;
i
<
D
;
i
++
)
{
//
in_tore_shape[2] *= in_dims[i];
in_tore_shape
[
2
]
*=
in_dims
[
i
];
//
out_tore_shape[2] *= out_dims[i];
out_tore_shape
[
2
]
*=
out_dims
[
i
];
//
}
}
//
// convert array from std::vector to DDim
// convert array from std::vector to DDim
//
DDim reshaped_in_dims = make_ddim(in_tore_shape);
DDim
reshaped_in_dims
=
make_ddim
(
in_tore_shape
);
//
DDim reshaped_out_dims = make_ddim(out_tore_shape);
DDim
reshaped_out_dims
=
make_ddim
(
out_tore_shape
);
//
// after reshape:
// after reshape:
//
// the first dimension do not need padding, set padding[0] zero
// the first dimension do not need padding, set padding[0] zero
//
reshaped_padding[0].first = reshaped_padding[2].second = 0;
reshaped_padding
[
0
].
first
=
reshaped_padding
[
2
].
second
=
0
;
//
// the second dimension is the previous padding dimension
// the second dimension is the previous padding dimension
//
reshaped_padding[1].first = paddings[pad_dim].first;
reshaped_padding
[
1
].
first
=
paddings
[
pad_dim
].
first
;
//
reshaped_padding[1].second = paddings[pad_dim].second;
reshaped_padding
[
1
].
second
=
paddings
[
pad_dim
].
second
;
//
// the third dimension do not need padding, set padding[2] zero
// the third dimension do not need padding, set padding[2] zero
//
reshaped_padding[2].first = reshaped_padding[2].second = 0;
reshaped_padding
[
2
].
first
=
reshaped_padding
[
2
].
second
=
0
;
// LaunchEigenPadding<T, Context, D>(context, d_input, reshaped_in_dims,
LaunchEigenPadding
<
T
,
Context
>
(
context
,
// d_out,
d_input
,
// reshaped_out_dims, reshaped_padding);
reshaped_in_dims
,
// }
d_out
,
// } else {
reshaped_out_dims
,
// // need padding at many dimension, cannot reduce dimension
reshaped_padding
);
// LaunchEigenPadding<T, Context, D>(context, d_input, in_dims, d_out,
}
// out_dims,
}
else
{
// paddings);
// need padding at many dimension, cannot reduce dimension
// }
LaunchEigenPadding
<
T
,
Context
>
(
// }
context
,
d_input
,
in_dims
,
d_out
,
out_dims
,
paddings
);
}
}
}
}
template
<
typename
T
,
typename
Context
,
size_t
D
>
template
<
typename
T
,
typename
Context
,
size_t
D
>
...
...
paddle/phi/kernels/impl/slice_kernel_impl.h
浏览文件 @
4d42f4fa
...
@@ -60,10 +60,10 @@ void SliceCompute(const Context& ctx,
...
@@ -60,10 +60,10 @@ void SliceCompute(const Context& ctx,
}
}
}
}
CheckAndUpdateSliceAttrs
<
int64_t
>
(
in_dims
,
axes
,
&
starts
,
&
ends
);
funcs
::
CheckAndUpdateSliceAttrs
<
int64_t
>
(
in_dims
,
axes
,
&
starts
,
&
ends
);
slice_dims
=
slice_dims
=
funcs
::
GetSliceDims
<
int64_t
>
(
GetSliceDims
<
int64_t
>
(
in_dims
,
axes
,
starts
,
ends
,
nullptr
,
nullptr
);
in_dims
,
axes
,
starts
,
ends
,
nullptr
,
nullptr
);
out_dims
=
GetDecreasedDims
<
int64_t
>
(
slice_dims
,
decrease_axis
);
out_dims
=
funcs
::
GetDecreasedDims
<
int64_t
>
(
slice_dims
,
decrease_axis
);
// 2.2 Get output
// 2.2 Get output
auto
offsets
=
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
D
>
();
auto
offsets
=
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
D
>
();
...
...
python/paddle/fluid/tests/unittests/test_slice_op.py
浏览文件 @
4d42f4fa
...
@@ -55,745 +55,722 @@ class TestSliceOp(OpTest):
...
@@ -55,745 +55,722 @@ class TestSliceOp(OpTest):
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
class
TestCase1
(
TestSliceOp
):
# class TestCase1(TestSliceOp):
def
config
(
self
):
# def config(self):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self
.
starts
=
[
-
3
,
0
,
2
]
# self.starts = [-3, 0, 2]
self
.
ends
=
[
3
,
100
,
-
1
]
# self.ends = [3, 100, -1]
self
.
axes
=
[
0
,
1
,
2
]
# self.axes = [0, 1, 2]
self
.
infer_flags
=
[
1
,
1
,
1
]
# self.infer_flags = [1, 1, 1]
self
.
out
=
self
.
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:]
# self.out = self.input[-3:3, 0:100, 2:-1, :]
# class TestCase2(TestSliceOp):
class
TestCase2
(
TestSliceOp
):
# def config(self):
def
config
(
self
):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
# self.starts = [-3, 0, 2]
self
.
starts
=
[
-
3
,
0
,
2
]
# self.ends = [3, 100, -1]
self
.
ends
=
[
3
,
100
,
-
1
]
# self.axes = [0, 1, 3]
self
.
axes
=
[
0
,
1
,
3
]
# self.infer_flags = [1, 1, 1]
self
.
infer_flags
=
[
1
,
1
,
1
]
# self.out = self.input[-3:3, 0:100, :, 2:-1]
self
.
out
=
self
.
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
]
# # 1.2 with attr(decrease)
# class TestSliceOp_decs_dim(OpTest):
# 1.2 with attr(decrease)
# def setUp(self):
class
TestSliceOp_decs_dim
(
OpTest
):
# self.op_type = "slice"
def
setUp
(
self
):
# self.config()
self
.
op_type
=
"slice"
# self.inputs = {'Input': self.input}
self
.
config
()
# self.outputs = {'Out': self.out}
self
.
inputs
=
{
'Input'
:
self
.
input
}
# self.attrs = {
self
.
outputs
=
{
'Out'
:
self
.
out
}
# 'axes': self.axes,
self
.
attrs
=
{
# 'starts': self.starts,
'axes'
:
self
.
axes
,
# 'ends': self.ends,
'starts'
:
self
.
starts
,
# 'infer_flags': self.infer_flags,
'ends'
:
self
.
ends
,
# 'decrease_axis': self.decrease_axis,
'infer_flags'
:
self
.
infer_flags
,
# }
'decrease_axis'
:
self
.
decrease_axis
,
}
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
def
config
(
self
):
# self.starts = [1, 0, 2]
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
# self.ends = [2, 3, 4]
self
.
starts
=
[
1
,
0
,
2
]
# self.axes = [0, 1, 2]
self
.
ends
=
[
2
,
3
,
4
]
# self.decrease_axis = [0]
self
.
axes
=
[
0
,
1
,
2
]
# self.infer_flags = [1, 1, 1]
self
.
decrease_axis
=
[
0
]
# self.out = self.input[1, 0:3, 2:4, :]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
1
,
0
:
3
,
2
:
4
,
:]
# def test_check_output(self):
# self.check_output()
def
test_check_output
(
self
):
self
.
check_output
()
# def test_check_grad_normal(self):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
class
TestSliceOp_decs_dim_2
(
TestSliceOp_decs_dim
):
# self.starts = [1, 0, 2]
def
config
(
self
):
# self.ends = [2, 1, 4]
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
# self.axes = [0, 1, 2]
self
.
starts
=
[
1
,
0
,
2
]
# self.decrease_axis = [0, 1]
self
.
ends
=
[
2
,
1
,
4
]
# self.infer_flags = [1, 1, 1]
self
.
axes
=
[
0
,
1
,
2
]
# self.out = self.input[1, 0, 2:4, :]
self
.
decrease_axis
=
[
0
,
1
]
self
.
infer_flags
=
[
1
,
1
,
1
]
# class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
self
.
out
=
self
.
input
[
1
,
0
,
2
:
4
,
:]
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [-1, 0, 2]
class
TestSliceOp_decs_dim_3
(
TestSliceOp_decs_dim
):
# self.ends = [1000000, 1, 4]
def
config
(
self
):
# self.axes = [0, 1, 2]
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
# self.decrease_axis = [0, 1]
self
.
starts
=
[
-
1
,
0
,
2
]
# self.infer_flags = [1, 1, 1]
self
.
ends
=
[
1000000
,
1
,
4
]
# self.out = self.input[-1, 0, 2:4, :]
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
,
1
]
# class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
self
.
infer_flags
=
[
1
,
1
,
1
]
# def config(self):
self
.
out
=
self
.
input
[
-
1
,
0
,
2
:
4
,
:]
# self.input = np.random.random([3, 4, 5, 7]).astype("float64")
# self.starts = [0, 1, 2, 3]
# self.ends = [1, 2, 3, 4]
class
TestSliceOp_decs_dim_4
(
TestSliceOp_decs_dim
):
# self.axes = [0, 1, 2, 3]
def
config
(
self
):
# self.decrease_axis = [0, 1, 2, 3]
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
7
]).
astype
(
"float64"
)
# self.infer_flags = [1, 1, 1]
self
.
starts
=
[
0
,
1
,
2
,
3
]
# self.out = self.input[0, 1, 2, 3:4]
self
.
ends
=
[
1
,
2
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
,
3
]
# class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
self
.
decrease_axis
=
[
0
,
1
,
2
,
3
]
# def config(self):
self
.
infer_flags
=
[
1
,
1
,
1
]
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self
.
out
=
self
.
input
[
0
,
1
,
2
,
3
:
4
]
# self.starts = [-1]
# self.ends = [1000000]
# self.axes = [3]
class
TestSliceOp_decs_dim_5
(
TestSliceOp_decs_dim
):
# self.decrease_axis = [3]
def
config
(
self
):
# self.infer_flags = [1, 1, 1]
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
# self.out = self.input[:, :, :, -1]
self
.
starts
=
[
-
1
]
self
.
ends
=
[
1000000
]
# class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
self
.
axes
=
[
3
]
# def config(self):
self
.
decrease_axis
=
[
3
]
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self
.
infer_flags
=
[
1
,
1
,
1
]
# self.starts = [0, 1, 2, 3]
self
.
out
=
self
.
input
[:,
:,
:,
-
1
]
# self.ends = [1, 2, 3, 4]
# self.axes = [0, 1, 2, 3]
# self.decrease_axis = [0, 1, 2, 3]
class
TestSliceOp_decs_dim_6
(
TestSliceOp_decs_dim
):
# self.infer_flags = [1, 1, 1]
def
config
(
self
):
# self.out = self.input[0, 1, 2, 3:4]
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
0
,
1
,
2
,
3
]
# # Situation 2: starts(list, have tensor), ends(list, no tensor)
self
.
ends
=
[
1
,
2
,
3
,
4
]
# # without attr(decrease)
self
.
axes
=
[
0
,
1
,
2
,
3
]
# class TestSliceOp_starts_ListTensor(OpTest):
self
.
decrease_axis
=
[
0
,
1
,
2
,
3
]
# def setUp(self):
self
.
infer_flags
=
[
1
,
1
,
1
]
# self.op_type = "slice"
self
.
out
=
self
.
input
[
0
,
1
,
2
,
3
:
4
]
# self.config()
# starts_tensor = []
# Situation 2: starts(list, have tensor), ends(list, no tensor)
# for index, ele in enumerate(self.starts):
# without attr(decrease)
# starts_tensor.append(("x" + str(index), np.ones(
class
TestSliceOp_starts_ListTensor
(
OpTest
):
# (1)).astype('int64') * ele))
def
setUp
(
self
):
self
.
op_type
=
"slice"
# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
self
.
config
()
# self.outputs = {'Out': self.out}
# self.attrs = {
starts_tensor
=
[]
# 'axes': self.axes,
for
index
,
ele
in
enumerate
(
self
.
starts
):
# 'starts': self.starts_infer,
starts_tensor
.
append
((
"x"
+
str
(
index
),
np
.
ones
(
# 'ends': self.ends,
(
1
)).
astype
(
'int64'
)
*
ele
))
# 'infer_flags': self.infer_flags
# }
self
.
inputs
=
{
'Input'
:
self
.
input
,
'StartsTensorList'
:
starts_tensor
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
# def config(self):
self
.
attrs
=
{
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
'axes'
:
self
.
axes
,
# self.starts = [1, 0, 2]
'starts'
:
self
.
starts_infer
,
# self.ends = [3, 3, 4]
'ends'
:
self
.
ends
,
# self.axes = [0, 1, 2]
'infer_flags'
:
self
.
infer_flags
# self.infer_flags = [-1, 1, -1]
}
# self.out = self.input[1:3, 0:3, 2:4, :]
def
config
(
self
):
# self.starts_infer = [-1, 0, -1]
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
# def test_check_output(self):
self
.
ends
=
[
3
,
3
,
4
]
# self.check_output()
self
.
axes
=
[
0
,
1
,
2
]
self
.
infer_flags
=
[
-
1
,
1
,
-
1
]
# def test_check_grad_normal(self):
self
.
out
=
self
.
input
[
1
:
3
,
0
:
3
,
2
:
4
,
:]
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
self
.
starts_infer
=
[
-
1
,
0
,
-
1
]
# # Situation 2: starts(list, have tensor), ends(list, no tensor)
# # with attr(decrease)
def
test_check_output
(
self
):
# class TestSliceOp_decs_dim_starts_ListTensor(OpTest):
self
.
check_output
()
# def setUp(self):
# self.op_type = "slice"
def
test_check_grad_normal
(
self
):
# self.config()
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# starts_tensor = []
# for index, ele in enumerate(self.starts):
# Situation 2: starts(list, have tensor), ends(list, no tensor)
# starts_tensor.append(("x" + str(index), np.ones(
# with attr(decrease)
# (1)).astype('int32') * ele))
class
TestSliceOp_decs_dim_starts_ListTensor
(
OpTest
):
def
setUp
(
self
):
# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
self
.
op_type
=
"slice"
self
.
config
()
# self.outputs = {'Out': self.out}
# self.attrs = {
starts_tensor
=
[]
# 'axes': self.axes,
for
index
,
ele
in
enumerate
(
self
.
starts
):
# 'starts': self.starts_infer,
starts_tensor
.
append
((
"x"
+
str
(
index
),
np
.
ones
(
# 'ends': self.ends,
(
1
)).
astype
(
'int32'
)
*
ele
))
# 'infer_flags': self.infer_flags,
# 'decrease_axis': self.decrease_axis,
self
.
inputs
=
{
'Input'
:
self
.
input
,
'StartsTensorList'
:
starts_tensor
}
# }
self
.
outputs
=
{
'Out'
:
self
.
out
}
# def config(self):
self
.
attrs
=
{
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
'axes'
:
self
.
axes
,
# self.starts = [1, 0, 2]
'starts'
:
self
.
starts_infer
,
# self.ends = [2, 3, 4]
'ends'
:
self
.
ends
,
# self.axes = [0, 1, 2]
'infer_flags'
:
self
.
infer_flags
,
# self.decrease_axis = [0]
'decrease_axis'
:
self
.
decrease_axis
,
# self.infer_flags = [1, -1, 1]
}
# self.out = self.input[1, 0:3, 2:4, :]
def
config
(
self
):
# self.starts_infer = [1, -1, 2]
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
# def test_check_output(self):
self
.
ends
=
[
2
,
3
,
4
]
# self.check_output()
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
]
# def test_check_grad_normal(self):
self
.
infer_flags
=
[
1
,
-
1
,
1
]
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
self
.
out
=
self
.
input
[
1
,
0
:
3
,
2
:
4
,
:]
# class TestSliceOp_decs_dim_5_starts_ListTensor(
self
.
starts_infer
=
[
1
,
-
1
,
2
]
# TestSliceOp_decs_dim_starts_ListTensor):
# def config(self):
def
test_check_output
(
self
):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self
.
check_output
()
# self.starts = [-1]
# self.ends = [1000000]
def
test_check_grad_normal
(
self
):
# self.axes = [3]
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# self.decrease_axis = [3]
# self.infer_flags = [-1]
# self.out = self.input[:, :, :, -1]
class
TestSliceOp_decs_dim_5_starts_ListTensor
(
TestSliceOp_decs_dim_starts_ListTensor
):
# self.starts_infer = [-1]
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
# # Situation 3: starts(tensor), ends(list, no tensor)
self
.
starts
=
[
-
1
]
# # with attr(decrease)
self
.
ends
=
[
1000000
]
# class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
self
.
axes
=
[
3
]
# def setUp(self):
self
.
decrease_axis
=
[
3
]
# self.op_type = "slice"
self
.
infer_flags
=
[
-
1
]
# self.config()
self
.
out
=
self
.
input
[:,
:,
:,
-
1
]
# self.inputs = {
# 'Input': self.input,
self
.
starts_infer
=
[
-
1
]
# "StartsTensor": np.array(
# self.starts, dtype="int32")
# }
# Situation 3: starts(tensor), ends(list, no tensor)
# self.outputs = {'Out': self.out}
# with attr(decrease)
# self.attrs = {
class
TestSliceOp_decs_dim_starts_OneTensor
(
OpTest
):
# 'axes': self.axes,
def
setUp
(
self
):
# #'starts': self.starts,
self
.
op_type
=
"slice"
# 'ends': self.ends,
self
.
config
()
# 'infer_flags': self.infer_flags,
self
.
inputs
=
{
# 'decrease_axis': self.decrease_axis,
'Input'
:
self
.
input
,
# }
"StartsTensor"
:
np
.
array
(
self
.
starts
,
dtype
=
"int32"
)
# def config(self):
}
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self
.
outputs
=
{
'Out'
:
self
.
out
}
# self.starts = [1, 0, 2]
self
.
attrs
=
{
# self.ends = [2, 3, 4]
'axes'
:
self
.
axes
,
# self.axes = [0, 1, 2]
#'starts': self.starts,
# self.decrease_axis = [0]
'ends'
:
self
.
ends
,
# self.infer_flags = [-1, -1, -1]
'infer_flags'
:
self
.
infer_flags
,
# self.out = self.input[1, 0:3, 2:4, :]
'decrease_axis'
:
self
.
decrease_axis
,
}
# def test_check_output(self):
# self.check_output()
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
# def test_check_grad_normal(self):
self
.
starts
=
[
1
,
0
,
2
]
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
self
.
ends
=
[
2
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
# # Situation 4: starts(tensor), ends(tensor)
self
.
decrease_axis
=
[
0
]
# # without attr(decrease)
self
.
infer_flags
=
[
-
1
,
-
1
,
-
1
]
# class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
self
.
out
=
self
.
input
[
1
,
0
:
3
,
2
:
4
,
:]
# def setUp(self):
# self.op_type = "slice"
def
test_check_output
(
self
):
# self.config()
self
.
check_output
()
# self.inputs = {
def
test_check_grad_normal
(
self
):
# 'Input': self.input,
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# "StartsTensor": np.array(
# self.starts, dtype="int64"),
# "EndsTensor": np.array(
# Situation 4: starts(tensor), ends(tensor)
# self.ends, dtype="int32")
# without attr(decrease)
# }
class
TestSliceOp_starts_OneTensor_ends_OneTensor
(
OpTest
):
# self.outputs = {'Out': self.out}
def
setUp
(
self
):
# self.attrs = {
self
.
op_type
=
"slice"
# 'axes': self.axes,
self
.
config
()
# #'starts': self.starts,
# #'ends': self.ends_infer,
self
.
inputs
=
{
# 'infer_flags': self.infer_flags
'Input'
:
self
.
input
,
# }
"StartsTensor"
:
np
.
array
(
self
.
starts
,
dtype
=
"int64"
),
# def config(self):
"EndsTensor"
:
np
.
array
(
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self
.
ends
,
dtype
=
"int32"
)
# self.starts = [1, 0, 2]
}
# self.ends = [3, 3, 4]
self
.
outputs
=
{
'Out'
:
self
.
out
}
# self.axes = [0, 1, 2]
self
.
attrs
=
{
# self.infer_flags = [-1, -1, -1]
'axes'
:
self
.
axes
,
# self.out = self.input[1:3, 0:3, 2:4, :]
#'starts': self.starts,
#'ends': self.ends_infer,
# def test_check_output(self):
'infer_flags'
:
self
.
infer_flags
# self.check_output()
}
# def test_check_grad_normal(self):
def
config
(
self
):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
# # Situation 5: starts(tensor), ends(tensor)
self
.
ends
=
[
3
,
3
,
4
]
# # with attr(decrease)
self
.
axes
=
[
0
,
1
,
2
]
# class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
self
.
infer_flags
=
[
-
1
,
-
1
,
-
1
]
# def setUp(self):
self
.
out
=
self
.
input
[
1
:
3
,
0
:
3
,
2
:
4
,
:]
# self.op_type = "slice"
# self.config()
def
test_check_output
(
self
):
# self.inputs = {
self
.
check_output
()
# 'Input': self.input,
# "StartsTensor": np.array(
def
test_check_grad_normal
(
self
):
# self.starts, dtype="int32"),
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# "EndsTensor": np.array(
# self.ends, dtype="int32")
# }
# Situation 5: starts(tensor), ends(tensor)
# self.outputs = {'Out': self.out}
# with attr(decrease)
# self.attrs = {
class
TestSliceOp_decs_dim_starts_and_ends_OneTensor
(
OpTest
):
# 'axes': self.axes,
def
setUp
(
self
):
# #'starts': self.starts,
self
.
op_type
=
"slice"
# #'ends': self.ends,
self
.
config
()
# 'infer_flags': self.infer_flags,
self
.
inputs
=
{
# 'decrease_axis': self.decrease_axis,
'Input'
:
self
.
input
,
# }
"StartsTensor"
:
np
.
array
(
self
.
starts
,
dtype
=
"int32"
),
# def config(self):
"EndsTensor"
:
np
.
array
(
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self
.
ends
,
dtype
=
"int32"
)
# self.starts = [1, 0, 2]
}
# self.ends = [2, 1, 4]
self
.
outputs
=
{
'Out'
:
self
.
out
}
# self.axes = [0, 1, 2]
self
.
attrs
=
{
# self.decrease_axis = [0, 1]
'axes'
:
self
.
axes
,
# self.infer_flags = [-1, -1, -1]
#'starts': self.starts,
# self.out = self.input[1, 0, 2:4, :]
#'ends': self.ends,
'infer_flags'
:
self
.
infer_flags
,
# def test_check_output(self):
'decrease_axis'
:
self
.
decrease_axis
,
# self.check_output()
}
# def test_check_grad_normal(self):
def
config
(
self
):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
# # Situation 6: starts(tensor), ends(list, have tensor)
self
.
ends
=
[
2
,
1
,
4
]
# # without attr(decrease)
self
.
axes
=
[
0
,
1
,
2
]
# class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
self
.
decrease_axis
=
[
0
,
1
]
# def setUp(self):
self
.
infer_flags
=
[
-
1
,
-
1
,
-
1
]
# self.op_type = "slice"
self
.
out
=
self
.
input
[
1
,
0
,
2
:
4
,
:]
# self.config()
def
test_check_output
(
self
):
# ends_tensor = []
self
.
check_output
()
# for index, ele in enumerate(self.ends):
# ends_tensor.append(("y" + str(index), np.ones(
def
test_check_grad_normal
(
self
):
# (1)).astype('int32') * ele))
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# self.inputs = {
# 'Input': self.input,
# Situation 6: starts(tensor), ends(list, have tensor)
# "StartsTensor": np.array(
# without attr(decrease)
# self.starts, dtype="int32"),
class
TestSliceOp_starts_OneTensor_ends_ListTensor
(
OpTest
):
# 'EndsTensorList': ends_tensor
def
setUp
(
self
):
# }
self
.
op_type
=
"slice"
# self.outputs = {'Out': self.out}
self
.
config
()
# self.attrs = {
# 'axes': self.axes,
ends_tensor
=
[]
# #'starts': self.starts,
for
index
,
ele
in
enumerate
(
self
.
ends
):
# 'ends': self.ends_infer,
ends_tensor
.
append
((
"y"
+
str
(
index
),
np
.
ones
(
# 'infer_flags': self.infer_flags
(
1
)).
astype
(
'int32'
)
*
ele
))
# }
self
.
inputs
=
{
# def config(self):
'Input'
:
self
.
input
,
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
"StartsTensor"
:
np
.
array
(
# self.starts = [1, 0, 2]
self
.
starts
,
dtype
=
"int32"
),
# self.ends = [3, 3, 4]
'EndsTensorList'
:
ends_tensor
# self.axes = [0, 1, 2]
}
# self.infer_flags = [-1, -1, -1]
self
.
outputs
=
{
'Out'
:
self
.
out
}
# self.out = self.input[1:3, 0:3, 2:4, :]
self
.
attrs
=
{
'axes'
:
self
.
axes
,
# self.ends_infer = [-1, 3, 4]
#'starts': self.starts,
'ends'
:
self
.
ends_infer
,
# def test_check_output(self):
'infer_flags'
:
self
.
infer_flags
# self.check_output()
}
# def test_check_grad_normal(self):
def
config
(
self
):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
# # Test CUDA float16
self
.
ends
=
[
3
,
3
,
4
]
# @unittest.skipIf(not core.is_compiled_with_cuda(),
self
.
axes
=
[
0
,
1
,
2
]
# "core is not compiled with CUDA")
self
.
infer_flags
=
[
-
1
,
-
1
,
-
1
]
# class TestFP16(OpTest):
self
.
out
=
self
.
input
[
1
:
3
,
0
:
3
,
2
:
4
,
:]
# def setUp(self):
# self.op_type = "slice"
self
.
ends_infer
=
[
-
1
,
3
,
4
]
# self.config()
# self.inputs = {'Input': self.input}
def
test_check_output
(
self
):
# self.outputs = {'Out': self.out}
self
.
check_output
()
# self.attrs = {
# 'axes': self.axes,
def
test_check_grad_normal
(
self
):
# 'starts': self.starts,
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# 'ends': self.ends,
# 'infer_flags': self.infer_flags
# }
# Test CUDA float16
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
(),
# def config(self):
"core is not compiled with CUDA"
)
# self.dtype = "float16"
class
TestFP16
(
OpTest
):
# self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
def
setUp
(
self
):
# self.starts = [-3, 0, 2]
self
.
op_type
=
"slice"
# self.ends = [3, 100, -1]
self
.
config
()
# self.axes = [0, 1, 3]
self
.
inputs
=
{
'Input'
:
self
.
input
}
# self.out = self.input[-3:3, 0:100, :, 2:-1]
self
.
outputs
=
{
'Out'
:
self
.
out
}
# self.infer_flags = [1, 1, 1]
self
.
attrs
=
{
'axes'
:
self
.
axes
,
# def test_check_output(self):
'starts'
:
self
.
starts
,
# place = core.CUDAPlace(0)
'ends'
:
self
.
ends
,
# if core.is_float16_supported(place):
'infer_flags'
:
self
.
infer_flags
# self.check_output_with_place(place, atol=1e-5)
}
# def test_check_grad_normal(self):
def
config
(
self
):
# place = core.CUDAPlace(0)
self
.
dtype
=
"float16"
# if core.is_float16_supported(place):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
self
.
dtype
)
# self.check_grad_with_place(
self
.
starts
=
[
-
3
,
0
,
2
]
# place, ['Input'], 'Out', max_relative_error=0.006)
self
.
ends
=
[
3
,
100
,
-
1
]
self
.
axes
=
[
0
,
1
,
3
]
# @unittest.skipIf(not core.is_compiled_with_cuda(),
self
.
out
=
self
.
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
]
# "core is not compiled with CUDA")
self
.
infer_flags
=
[
1
,
1
,
1
]
# class TestFP16_2(OpTest):
# def setUp(self):
def
test_check_output
(
self
):
# self.op_type = "slice"
place
=
core
.
CUDAPlace
(
0
)
# self.config()
if
core
.
is_float16_supported
(
place
):
# self.inputs = {'Input': self.input}
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
# self.outputs = {'Out': self.out}
# self.attrs = {
def
test_check_grad_normal
(
self
):
# 'axes': self.axes,
place
=
core
.
CUDAPlace
(
0
)
# 'starts': self.starts,
if
core
.
is_float16_supported
(
place
):
# 'ends': self.ends,
self
.
check_grad_with_place
(
# 'infer_flags': self.infer_flags
place
,
[
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# }
# def config(self):
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
(),
# self.dtype = "float16"
"core is not compiled with CUDA"
)
# self.input = np.random.random([3, 4, 10]).astype(self.dtype)
class
TestFP16_2
(
OpTest
):
# self.starts = [0]
def
setUp
(
self
):
# self.ends = [1]
self
.
op_type
=
"slice"
# self.axes = [1]
self
.
config
()
# self.out = self.input[:, 0:1, :]
self
.
inputs
=
{
'Input'
:
self
.
input
}
# self.infer_flags = [1]
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
# def test_check_output(self):
'axes'
:
self
.
axes
,
# place = core.CUDAPlace(0)
'starts'
:
self
.
starts
,
# if core.is_float16_supported(place):
'ends'
:
self
.
ends
,
# self.check_output_with_place(place, atol=1e-5)
'infer_flags'
:
self
.
infer_flags
}
# def test_check_grad_normal(self):
# place = core.CUDAPlace(0)
def
config
(
self
):
# if core.is_float16_supported(place):
self
.
dtype
=
"float16"
# self.check_grad_with_place(
self
.
input
=
np
.
random
.
random
([
3
,
4
,
10
]).
astype
(
self
.
dtype
)
# place, ['Input'],
self
.
starts
=
[
0
]
# 'Out',
self
.
ends
=
[
1
]
# max_relative_error=0.006,
self
.
axes
=
[
1
]
# numeric_grad_delta=0.5)
self
.
out
=
self
.
input
[:,
0
:
1
,
:]
self
.
infer_flags
=
[
1
]
# class TestBF16(OpTest):
# def setUp(self):
def
test_check_output
(
self
):
# self.op_type = "slice"
place
=
core
.
CUDAPlace
(
0
)
# self.config()
if
core
.
is_float16_supported
(
place
):
# self.inputs = {'Input': convert_float_to_uint16(self.input)}
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
# self.outputs = {'Out': convert_float_to_uint16(self.out)}
# self.attrs = {
def
test_check_grad_normal
(
self
):
# 'axes': self.axes,
place
=
core
.
CUDAPlace
(
0
)
# 'starts': self.starts,
if
core
.
is_float16_supported
(
place
):
# 'ends': self.ends,
self
.
check_grad_with_place
(
# 'infer_flags': self.infer_flags
place
,
[
'Input'
],
# }
'Out'
,
max_relative_error
=
0.006
,
# def config(self):
numeric_grad_delta
=
0.5
)
# self.dtype = np.uint16
# self.input = np.random.random([3, 4, 5, 6]).astype(np.float32)
# self.starts = [-3, 0, 2]
class
TestBF16
(
OpTest
):
# self.ends = [3, 100, -1]
def
setUp
(
self
):
# self.axes = [0, 1, 3]
self
.
op_type
=
"slice"
# self.out = self.input[-3:3, 0:100, :, 2:-1]
self
.
config
()
# self.infer_flags = [1, 1, 1]
self
.
inputs
=
{
'Input'
:
convert_float_to_uint16
(
self
.
input
)}
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
self
.
out
)}
# def test_check_output(self):
self
.
attrs
=
{
# self.check_output()
'axes'
:
self
.
axes
,
'starts'
:
self
.
starts
,
# def test_check_grad_normal(self):
'ends'
:
self
.
ends
,
# self.check_grad(['Input'], 'Out')
'infer_flags'
:
self
.
infer_flags
}
# # Test python API
# class TestSliceAPI(unittest.TestCase):
def
config
(
self
):
# def test_1(self):
self
.
dtype
=
np
.
uint16
# input = np.random.random([3, 4, 5, 6]).astype("float64")
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
np
.
float32
)
# minus_1 = fluid.layers.fill_constant([1], "int32", -1)
self
.
starts
=
[
-
3
,
0
,
2
]
# minus_3 = fluid.layers.fill_constant([1], "int64", -3)
self
.
ends
=
[
3
,
100
,
-
1
]
# starts = fluid.layers.data(
self
.
axes
=
[
0
,
1
,
3
]
# name='starts', shape=[1, 3], append_batch_size=False)
self
.
out
=
self
.
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
]
# ends = fluid.layers.data(
self
.
infer_flags
=
[
1
,
1
,
1
]
# name='ends', shape=[3], append_batch_size=False)
def
test_check_output
(
self
):
# x = fluid.layers.data(
self
.
check_output
()
# name="x",
# shape=[3, 4, 5, 6],
def
test_check_grad_normal
(
self
):
# append_batch_size=False,
self
.
check_grad
([
'Input'
],
'Out'
)
# dtype="float64")
# # value_int64 is greater than 2147483647 which is the max of int32
# Test python API
# value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648)
class
TestSliceAPI
(
unittest
.
TestCase
):
def
test_1
(
self
):
# out_1 = fluid.layers.slice(
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
# x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1])
minus_1
=
fluid
.
layers
.
fill_constant
([
1
],
"int32"
,
-
1
)
# out_2 = fluid.layers.slice(
minus_3
=
fluid
.
layers
.
fill_constant
([
1
],
"int64"
,
-
3
)
# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1])
starts
=
fluid
.
layers
.
data
(
# out_3 = fluid.layers.slice(
name
=
'starts'
,
shape
=
[
1
,
3
],
append_batch_size
=
False
)
# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1])
ends
=
fluid
.
layers
.
data
(
# out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)
name
=
'ends'
,
shape
=
[
3
],
append_batch_size
=
False
)
# out_5 = x[-3:3, 0:100, 2:-1]
x
=
fluid
.
layers
.
data
(
# out_6 = x[minus_3:3, 0:100, :, 2:-1]
name
=
"x"
,
# out_7 = x[minus_1, 0:100, :, 2:minus_1]
shape
=
[
3
,
4
,
5
,
6
],
append_batch_size
=
False
,
# exe = fluid.Executor(place=fluid.CPUPlace())
dtype
=
"float64"
)
# res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
# fluid.default_main_program(),
# value_int64 is greater than 2147483647 which is the max of int32
# feed={
value_int64
=
fluid
.
layers
.
fill_constant
([
1
],
"int64"
,
2147483648
)
# "x": input,
# 'starts': np.array([-3, 0, 2]).astype("int32"),
out_1
=
fluid
.
layers
.
slice
(
# 'ends': np.array([3, 100, -1]).astype("int32")
x
,
axes
=
[
0
,
1
,
2
],
starts
=
[
-
3
,
0
,
2
],
ends
=
[
value_int64
,
100
,
-
1
])
# },
out_2
=
fluid
.
layers
.
slice
(
# fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7])
x
,
axes
=
[
0
,
1
,
3
],
starts
=
[
minus_3
,
0
,
2
],
ends
=
[
3
,
100
,
-
1
])
out_3
=
fluid
.
layers
.
slice
(
# assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :])
x
,
axes
=
[
0
,
1
,
3
],
starts
=
[
minus_3
,
0
,
2
],
ends
=
[
3
,
100
,
minus_1
])
# assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1])
out_4
=
fluid
.
layers
.
slice
(
x
,
axes
=
[
0
,
1
,
2
],
starts
=
starts
,
ends
=
ends
)
# assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1])
# assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :])
out_5
=
x
[
-
3
:
3
,
0
:
100
,
2
:
-
1
]
# assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :])
out_6
=
x
[
minus_3
:
3
,
0
:
100
,
:,
2
:
-
1
]
# assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1])
out_7
=
x
[
minus_1
,
0
:
100
,
:,
2
:
minus_1
]
# assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1])
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
# class TestSliceApiWithTensor(unittest.TestCase):
res_1
,
res_2
,
res_3
,
res_4
,
res_5
,
res_6
,
res_7
=
exe
.
run
(
# def test_starts_ends_is_tensor(self):
fluid
.
default_main_program
(),
# with paddle.fluid.dygraph.guard():
feed
=
{
# a = paddle.rand(shape=[4, 5, 6], dtype='float32')
"x"
:
input
,
# axes = [0, 1, 2]
'starts'
:
np
.
array
([
-
3
,
0
,
2
]).
astype
(
"int32"
),
# starts = [-3, 0, 2]
'ends'
:
np
.
array
([
3
,
100
,
-
1
]).
astype
(
"int32"
)
# ends = [3, 2, 4]
},
# a_1 = paddle.slice(
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
,
out_7
])
# a,
# axes=axes,
assert
np
.
array_equal
(
res_1
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
# starts=paddle.to_tensor(
assert
np
.
array_equal
(
res_2
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
# starts, dtype='int32'),
assert
np
.
array_equal
(
res_3
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
# ends=paddle.to_tensor(
assert
np
.
array_equal
(
res_4
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
# ends, dtype='int32'))
assert
np
.
array_equal
(
res_5
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
# a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends)
assert
np
.
array_equal
(
res_6
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_7
,
input
[
-
1
,
0
:
100
,
:,
2
:
-
1
])
# self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy()))
# def test_bool_tensor(self):
class
TestSliceApiWithTensor
(
unittest
.
TestCase
):
# with paddle.fluid.dygraph.guard():
def
test_starts_ends_is_tensor
(
self
):
# array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool')
with
paddle
.
fluid
.
dygraph
.
guard
():
# tt = paddle.to_tensor(array)
a
=
paddle
.
rand
(
shape
=
[
4
,
5
,
6
],
dtype
=
'float32'
)
# tt.stop_gradient = False
axes
=
[
0
,
1
,
2
]
starts
=
[
-
3
,
0
,
2
]
# starts = [0, 1, 2]
ends
=
[
3
,
2
,
4
]
# ends = [3, 5, 4]
a_1
=
paddle
.
slice
(
# axes = [0, 1, 2]
a
,
axes
=
axes
,
# y_paddle = paddle.slice(tt, axes, starts, ends)
starts
=
paddle
.
to_tensor
(
# y_np = tt[0:3, 1:5, 2:4]
starts
,
dtype
=
'int32'
),
ends
=
paddle
.
to_tensor
(
# self.assertTrue(paddle.bool == y_paddle.dtype)
ends
,
dtype
=
'int32'
))
# self.assertTrue(np.array_equal(y_paddle.numpy(), y_np))
a_2
=
paddle
.
slice
(
a
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
# class TestSliceApiWithLoDTensorArray(unittest.TestCase):
self
.
assertTrue
(
np
.
array_equal
(
a_1
.
numpy
(),
a_2
.
numpy
()))
# def setUp(self):
# self.shape = (3, 4)
def
test_bool_tensor
(
self
):
# self.data = np.random.random(size=self.shape).astype('float32')
with
paddle
.
fluid
.
dygraph
.
guard
():
# self.idx = 0
array
=
(
np
.
arange
(
60
).
reshape
([
3
,
4
,
5
])
%
3
).
astype
(
'bool'
)
# self.start = 0
tt
=
paddle
.
to_tensor
(
array
)
# self.end = 2
tt
.
stop_gradient
=
False
# self.axis = 1
starts
=
[
0
,
1
,
2
]
# self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
ends
=
[
3
,
5
,
4
]
# ) else fluid.CPUPlace()
axes
=
[
0
,
1
,
2
]
# self.exe = fluid.Executor(self.place)
y_paddle
=
paddle
.
slice
(
tt
,
axes
,
starts
,
ends
)
# def set_program_and_run(self, main_program, case_num):
y_np
=
tt
[
0
:
3
,
1
:
5
,
2
:
4
]
# with fluid.program_guard(main_program):
# x = [
self
.
assertTrue
(
paddle
.
bool
==
y_paddle
.
dtype
)
# fluid.data(
self
.
assertTrue
(
np
.
array_equal
(
y_paddle
.
numpy
(),
y_np
))
# name='x0', shape=self.shape, dtype="float32"), fluid.data(
# name='x1', shape=self.shape, dtype="float32"),
# fluid.data(
class
TestSliceApiWithLoDTensorArray
(
unittest
.
TestCase
):
# name='x2', shape=self.shape, dtype="float32")
def
setUp
(
self
):
# ]
self
.
shape
=
(
3
,
4
)
self
.
data
=
np
.
random
.
random
(
size
=
self
.
shape
).
astype
(
'float32'
)
# for each_x in x:
self
.
idx
=
0
# each_x.stop_gradient = False
self
.
start
=
0
self
.
end
=
2
# arr = layers.create_array(dtype="float32")
self
.
axis
=
1
# for i in range(3):
# idx = layers.array_length(arr)
self
.
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
is_compiled_with_cuda
(
# arr = layers.array_write(x=x[i], i=idx, array=arr)
)
else
fluid
.
CPUPlace
()
self
.
exe
=
fluid
.
Executor
(
self
.
place
)
# if case_num == 1:
# self.sliced_arr = output = arr[0]
def
set_program_and_run
(
self
,
main_program
,
case_num
):
with
fluid
.
program_guard
(
main_program
):
# elif case_num == 2:
x
=
[
# end = fluid.layers.array_length(
fluid
.
data
(
# arr) - 1 # dtype of end is int64
name
=
'x0'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
fluid
.
data
(
# self.sliced_arr = slice_arr = arr[self.start:end]
name
=
'x1'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
# output, _ = fluid.layers.tensor_array_to_tensor(
fluid
.
data
(
# slice_arr, axis=self.axis, use_stack=True)
name
=
'x2'
,
shape
=
self
.
shape
,
dtype
=
"float32"
)
# elif case_num == 3:
]
# value_int64 = fluid.layers.fill_constant([1], "int64",
# 2147483648)
for
each_x
in
x
:
# self.sliced_arr = slice_arr = arr[self.start:value_int64]
each_x
.
stop_gradient
=
False
# output, _ = fluid.layers.tensor_array_to_tensor(
# slice_arr, axis=self.axis, use_stack=True)
arr
=
layers
.
create_array
(
dtype
=
"float32"
)
for
i
in
range
(
3
):
# loss = fluid.layers.reduce_sum(output)
idx
=
layers
.
array_length
(
arr
)
# fluid.backward.append_backward(loss)
arr
=
layers
.
array_write
(
x
=
x
[
i
],
i
=
idx
,
array
=
arr
)
# g_vars = list(
# map(main_program.global_block().var,
if
case_num
==
1
:
# [each_x.name + "@GRAD" for each_x in x]))
self
.
sliced_arr
=
output
=
arr
[
0
]
# self.out, self.g_x0, self.g_x1, self.g_x2 = \
# self.exe.run(main_program,
elif
case_num
==
2
:
# feed = {'x0': self.data,
end
=
fluid
.
layers
.
array_length
(
# 'x1': self.data,
arr
)
-
1
# dtype of end is int64
# 'x2': self.data},
self
.
sliced_arr
=
slice_arr
=
arr
[
self
.
start
:
end
]
# fetch_list=[output] + g_vars)
output
,
_
=
fluid
.
layers
.
tensor_array_to_tensor
(
slice_arr
,
axis
=
self
.
axis
,
use_stack
=
True
)
# def test_case_1(self):
elif
case_num
==
3
:
# main_program = fluid.Program()
value_int64
=
fluid
.
layers
.
fill_constant
([
1
],
"int64"
,
# self.set_program_and_run(main_program, 1)
2147483648
)
self
.
sliced_arr
=
slice_arr
=
arr
[
self
.
start
:
value_int64
]
# self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR)
output
,
_
=
fluid
.
layers
.
tensor_array_to_tensor
(
# self.assertEqual(self.sliced_arr.shape, self.shape)
slice_arr
,
axis
=
self
.
axis
,
use_stack
=
True
)
# self.assertTrue(np.array_equal(self.out, self.data))
# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
loss
=
fluid
.
layers
.
reduce_sum
(
output
)
# self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data)))
fluid
.
backward
.
append_backward
(
loss
)
# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))
g_vars
=
list
(
map
(
main_program
.
global_block
().
var
,
# def test_case_2(self):
[
each_x
.
name
+
"@GRAD"
for
each_x
in
x
]))
# main_program = fluid.Program()
self
.
out
,
self
.
g_x0
,
self
.
g_x1
,
self
.
g_x2
=
\
# self.set_program_and_run(main_program, 2)
self
.
exe
.
run
(
main_program
,
feed
=
{
'x0'
:
self
.
data
,
# self.assertTrue(
'x1'
:
self
.
data
,
# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
'x2'
:
self
.
data
},
# self.assertEqual(self.sliced_arr.shape, self.shape)
fetch_list
=
[
output
]
+
g_vars
)
# self.assertTrue(
# np.array_equal(
def
test_case_1
(
self
):
# self.out, np.stack(
main_program
=
fluid
.
Program
()
# [self.data, self.data], axis=self.axis)))
self
.
set_program_and_run
(
main_program
,
1
)
# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
self
.
assertTrue
(
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR
)
# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
self
.
assertTrue
(
np
.
array_equal
(
self
.
out
,
self
.
data
))
# def test_case_3(self):
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
)))
# main_program = fluid.Program()
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x1
,
np
.
zeros_like
(
self
.
data
)))
# self.set_program_and_run(main_program, 3)
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x2
,
np
.
zeros_like
(
self
.
data
)))
# self.assertTrue(
def
test_case_2
(
self
):
# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
main_program
=
fluid
.
Program
()
# self.assertEqual(self.sliced_arr.shape, self.shape)
self
.
set_program_and_run
(
main_program
,
2
)
# self.assertTrue(
# np.array_equal(
self
.
assertTrue
(
# self.out,
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
)
# np.stack(
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
# [self.data, self.data, self.data], axis=self.axis)))
self
.
assertTrue
(
# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
np
.
array_equal
(
# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
self
.
out
,
np
.
stack
(
# self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data)))
[
self
.
data
,
self
.
data
],
axis
=
self
.
axis
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
)))
# class TestImperativeVarBaseGetItem(unittest.TestCase):
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x1
,
np
.
ones_like
(
self
.
data
)))
# def test_getitem_with_long(self):
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x2
,
np
.
zeros_like
(
self
.
data
)))
# with fluid.dygraph.guard():
# data = np.random.random((2, 80, 16128)).astype('float32')
def
test_case_3
(
self
):
# var = fluid.dygraph.to_variable(data)
main_program
=
fluid
.
Program
()
# sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here
self
.
set_program_and_run
(
main_program
,
3
)
# self.assertEqual(sliced.shape, [2, 70, 80])
self
.
assertTrue
(
# sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]]
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
)
# self.assertEqual(sliced.shape, [2, 78, 78])
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
self
.
assertTrue
(
# def test_getitem_with_float(self):
np
.
array_equal
(
# def test_float_in_slice_item():
self
.
out
,
# with fluid.dygraph.guard():
np
.
stack
(
# data = np.random.random((2, 80, 16128)).astype('float32')
[
self
.
data
,
self
.
data
,
self
.
data
],
axis
=
self
.
axis
)))
# var = fluid.dygraph.to_variable(data)
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
)))
# sliced = var[:, 1.1:, :var.shape[1]]
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x1
,
np
.
ones_like
(
self
.
data
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x2
,
np
.
ones_like
(
self
.
data
)))
# self.assertRaises(Exception, test_float_in_slice_item)
# def test_float_in_index():
class
TestImperativeVarBaseGetItem
(
unittest
.
TestCase
):
# with fluid.dygraph.guard():
def
test_getitem_with_long
(
self
):
# data = np.random.random((2, 80, 16128)).astype('float32')
with
fluid
.
dygraph
.
guard
():
# var = fluid.dygraph.to_variable(data)
data
=
np
.
random
.
random
((
2
,
80
,
16128
)).
astype
(
'float32'
)
# sliced = var[1.1]
var
=
fluid
.
dygraph
.
to_variable
(
data
)
sliced
=
var
[:,
10
:,
:
var
.
shape
[
1
]]
# var.shape[1] is 80L here
# self.assertRaises(Exception, test_float_in_index)
self
.
assertEqual
(
sliced
.
shape
,
[
2
,
70
,
80
])
# class TestInferShape(unittest.TestCase):
sliced
=
var
[:,
var
.
shape
[
0
]:,
var
.
shape
[
0
]:
var
.
shape
[
1
]]
# def test(self):
self
.
assertEqual
(
sliced
.
shape
,
[
2
,
78
,
78
])
# x = paddle.ones(shape=[3, 4, 5])
# x.desc.set_shape([3, -1, 5])
def
test_getitem_with_float
(
self
):
# self.assertEqual(x.shape, (3, -1, 5))
def
test_float_in_slice_item
():
with
fluid
.
dygraph
.
guard
():
# out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3])
data
=
np
.
random
.
random
((
2
,
80
,
16128
)).
astype
(
'float32'
)
# self.assertEqual(out0.shape, (3, 3, 5))
var
=
fluid
.
dygraph
.
to_variable
(
data
)
sliced
=
var
[:,
1.1
:,
:
var
.
shape
[
1
]]
# def test_axis_less_than_zero(self):
self
.
assertRaises
(
Exception
,
test_float_in_slice_item
)
# # Using paddle.disable_static will make other unittests fail.
# with fluid.dygraph.guard():
def
test_float_in_index
():
# x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4])
with
fluid
.
dygraph
.
guard
():
# x = paddle.to_tensor(x_arr)
data
=
np
.
random
.
random
((
2
,
80
,
16128
)).
astype
(
'float32'
)
var
=
fluid
.
dygraph
.
to_variable
(
data
)
# pp_slice = paddle.slice(x, [100, ], [0], [1])
sliced
=
var
[
1.1
]
# np_slice = x_arr[:, :, 0:1]
# self.assertTrue(np.array_equal(pp_slice, np_slice))
self
.
assertRaises
(
Exception
,
test_float_in_index
)
# pp_slice = paddle.slice(x, (-100, ), [0], [1])
# np_slice = x_arr[0:1]
class
TestInferShape
(
unittest
.
TestCase
):
# self.assertTrue(np.array_equal(pp_slice, np_slice))
def
test
(
self
):
x
=
paddle
.
ones
(
shape
=
[
3
,
4
,
5
])
# x_arr = np.array([], dtype=np.float32)
x
.
desc
.
set_shape
([
3
,
-
1
,
5
])
# x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0)))
self
.
assertEqual
(
x
.
shape
,
(
3
,
-
1
,
5
))
# starts = paddle.to_tensor(
out0
=
paddle
.
slice
(
x
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
3
])
# np.reshape(
self
.
assertEqual
(
out0
.
shape
,
(
3
,
3
,
5
))
# np.array(
# [], dtype=np.int32), (0, )))
def
test_axis_less_than_zero
(
self
):
# ends = paddle.to_tensor(
# np.reshape(
# Using paddle.disable_static will make other unittests fail.
# np.array(
with
fluid
.
dygraph
.
guard
():
# [], dtype=np.int32), (0, )))
x_arr
=
np
.
arange
(
0
,
24
,
dtype
=
np
.
float32
).
reshape
([
2
,
3
,
4
])
x
=
paddle
.
to_tensor
(
x_arr
)
# with self.assertRaises(ValueError):
# paddle.slice(x, [-1000000], starts, ends)
pp_slice
=
paddle
.
slice
(
x
,
[
100
,
],
[
0
],
[
1
])
np_slice
=
x_arr
[:,
:,
0
:
1
]
# with self.assertRaises(ValueError):
self
.
assertTrue
(
np
.
array_equal
(
pp_slice
,
np_slice
))
# paddle.slice(x, [1000000], starts, ends)
pp_slice
=
paddle
.
slice
(
x
,
(
-
100
,
),
[
0
],
[
1
])
# with self.assertRaises(ValueError):
np_slice
=
x_arr
[
0
:
1
]
# paddle.slice(x, [], starts, ends)
self
.
assertTrue
(
np
.
array_equal
(
pp_slice
,
np_slice
))
# with self.assertRaises(ValueError):
x_arr
=
np
.
array
([],
dtype
=
np
.
float32
)
# paddle.slice(x, 0, starts, ends)
x
=
paddle
.
to_tensor
(
np
.
reshape
(
x_arr
,
(
0
,
0
,
0
)))
# @unittest.skipIf(not core.is_compiled_with_cuda(),
starts
=
paddle
.
to_tensor
(
# "core is not compiled with CUDA")
np
.
reshape
(
# class TestImperativeCUDAPinnedInput(unittest.TestCase):
np
.
array
(
# def test_input_cuda_pinned_var(self):
[],
dtype
=
np
.
int32
),
(
0
,
)))
# with fluid.dygraph.guard():
ends
=
paddle
.
to_tensor
(
# data = np.random.random((2, 80, 16128)).astype('float32')
np
.
reshape
(
# var = core.VarBase(
np
.
array
(
# value=data,
[],
dtype
=
np
.
int32
),
(
0
,
)))
# name='',
# persistable=False,
with
self
.
assertRaises
(
ValueError
):
# place=fluid.CUDAPinnedPlace(),
paddle
.
slice
(
x
,
[
-
1000000
],
starts
,
ends
)
# zero_copy=False)
# sliced = var[:, 10:, :var.shape[1]]
with
self
.
assertRaises
(
ValueError
):
# self.assertEqual(sliced.shape, [2, 70, 80])
paddle
.
slice
(
x
,
[
1000000
],
starts
,
ends
)
with
self
.
assertRaises
(
ValueError
):
paddle
.
slice
(
x
,
[],
starts
,
ends
)
with
self
.
assertRaises
(
ValueError
):
paddle
.
slice
(
x
,
0
,
starts
,
ends
)
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
(),
"core is not compiled with CUDA"
)
class
TestImperativeCUDAPinnedInput
(
unittest
.
TestCase
):
def
test_input_cuda_pinned_var
(
self
):
with
fluid
.
dygraph
.
guard
():
data
=
np
.
random
.
random
((
2
,
80
,
16128
)).
astype
(
'float32'
)
var
=
core
.
VarBase
(
value
=
data
,
name
=
''
,
persistable
=
False
,
place
=
fluid
.
CUDAPinnedPlace
(),
zero_copy
=
False
)
sliced
=
var
[:,
10
:,
:
var
.
shape
[
1
]]
self
.
assertEqual
(
sliced
.
shape
,
[
2
,
70
,
80
])
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录