Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
586671ea
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
586671ea
编写于
3月 11, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix error
上级
d35f5882
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
1086 addition
and
751 deletion
+1086
-751
paddle/fluid/operators/slice_op.h
paddle/fluid/operators/slice_op.h
+161
-2
paddle/phi/kernels/gpu/slice_grad_kernel.cu.cc
paddle/phi/kernels/gpu/slice_grad_kernel.cu.cc
+2
-1
paddle/phi/kernels/impl/slice_grad_kernel_impl.h
paddle/phi/kernels/impl/slice_grad_kernel_impl.h
+22
-15
paddle/phi/kernels/impl/slice_kernel_impl.h
paddle/phi/kernels/impl/slice_kernel_impl.h
+5
-2
paddle/phi/kernels/slice_grad_kernel.h
paddle/phi/kernels/slice_grad_kernel.h
+4
-2
paddle/phi/kernels/slice_kernel.h
paddle/phi/kernels/slice_kernel.h
+3
-2
paddle/phi/ops/compat/slice_sig.cc
paddle/phi/ops/compat/slice_sig.cc
+146
-10
paddle/pten/kernels/slice_kernel.h
paddle/pten/kernels/slice_kernel.h
+3
-2
python/paddle/fluid/tests/unittests/test_slice_op.py
python/paddle/fluid/tests/unittests/test_slice_op.py
+740
-715
未找到文件。
paddle/fluid/operators/slice_op.h
浏览文件 @
586671ea
...
...
@@ -28,10 +28,103 @@ using Variable = framework::Variable;
using
LoDTensorArray
=
framework
::
LoDTensorArray
;
using
DDim
=
framework
::
DDim
;
inline
void
DealTensorArray
(
const
framework
::
ExecutionContext
&
ctx
,
const
std
::
vector
<
int64_t
>&
starts
,
const
std
::
vector
<
int64_t
>&
ends
,
bool
out_is_array
)
{
auto
in_array
=
ctx
.
Input
<
LoDTensorArray
>
(
"Input"
);
// If the input is LoDTensorArray, the rank of input is 1.
int64_t
in_size
=
in_array
->
size
();
int64_t
start
=
starts
[
0
]
<
0
?
(
starts
[
0
]
+
in_size
)
:
starts
[
0
];
int64_t
end
=
ends
[
0
]
<
0
?
(
ends
[
0
]
+
in_size
)
:
ends
[
0
];
start
=
std
::
max
(
start
,
static_cast
<
int64_t
>
(
0
));
end
=
std
::
max
(
end
,
static_cast
<
int64_t
>
(
0
));
end
=
std
::
min
(
end
,
in_size
);
if
(
starts
[
0
]
==
-
1
&&
end
==
0
)
{
end
=
start
+
1
;
}
PADDLE_ENFORCE_GT
(
end
,
start
,
platform
::
errors
::
InvalidArgument
(
"Attr(ends) should be greater than attr(starts) in "
"slice op. But received end = %d, start = %d."
,
ends
[
0
],
starts
[
0
]));
int64_t
out_size
=
end
-
start
;
if
(
out_is_array
)
{
auto
out_array
=
ctx
.
Output
<
LoDTensorArray
>
(
"Out"
);
out_array
->
resize
(
out_size
);
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
auto
*
out_tensor
=
&
out_array
->
at
(
i
);
auto
in_tensor
=
in_array
->
at
(
i
+
start
);
out_tensor
->
set_lod
(
in_tensor
.
lod
());
if
(
in_tensor
.
memory_size
()
>
0
)
{
paddle
::
framework
::
TensorCopy
(
in_tensor
,
ctx
.
GetPlace
(),
out_tensor
);
}
else
{
VLOG
(
10
)
<<
"WARNING: The input tensor 'x_tensor' holds no memory, so "
"nothing has been written to output array["
<<
i
<<
"]."
;
}
}
}
else
{
auto
out
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
auto
in_tensor
=
in_array
->
at
(
start
);
paddle
::
framework
::
TensorCopy
(
in_tensor
,
ctx
.
GetPlace
(),
out
);
}
}
template
<
typename
DeviceContext
,
typename
T
>
class
SliceKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{}
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
const
Variable
*
input_var
=
ctx
.
InputVar
(
"Input"
);
Variable
*
out_var
=
ctx
.
OutputVar
(
"Out"
);
bool
input_is_array
=
input_var
->
IsType
<
LoDTensorArray
>
();
bool
out_is_array
=
out_var
->
IsType
<
LoDTensorArray
>
();
auto
axes_int
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"axes"
);
auto
starts_int
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"starts"
);
auto
ends_int
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"ends"
);
std
::
vector
<
int64_t
>
axes
(
axes_int
.
begin
(),
axes_int
.
end
());
std
::
vector
<
int64_t
>
starts
(
starts_int
.
begin
(),
starts_int
.
end
());
std
::
vector
<
int64_t
>
ends
(
ends_int
.
begin
(),
ends_int
.
end
());
auto
decrease_axis
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"decrease_axis"
);
auto
infer_flags
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"infer_flags"
);
// Step 1: Get the accurate attribute value of starts and ends
auto
starts_tensor_list
=
ctx
.
MultiInput
<
Tensor
>
(
"StartsTensorList"
);
if
(
ctx
.
HasInput
(
"StartsTensor"
))
{
starts
=
GetDataFromTensor
<
int64_t
>
(
ctx
.
Input
<
Tensor
>
(
"StartsTensor"
));
}
else
if
(
starts_tensor_list
.
size
()
>
0
)
{
starts
=
GetDataFromTensorList
<
int64_t
>
(
starts_tensor_list
);
}
auto
ends_tensor_list
=
ctx
.
MultiInput
<
Tensor
>
(
"EndsTensorList"
);
if
(
ctx
.
HasInput
(
"EndsTensor"
))
{
ends
=
GetDataFromTensor
<
int64_t
>
(
ctx
.
Input
<
Tensor
>
(
"EndsTensor"
));
}
else
if
(
ends_tensor_list
.
size
()
>
0
)
{
ends
=
GetDataFromTensorList
<
int64_t
>
(
ends_tensor_list
);
}
PADDLE_ENFORCE_EQ
(
starts
.
size
(),
axes
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The size of starts must be equal to the size of axes."
));
PADDLE_ENFORCE_EQ
(
ends
.
size
(),
axes
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The size of ends must be equal to the size of axes."
));
// Step 2: Compute output
if
(
input_is_array
)
{
DealTensorArray
(
ctx
,
starts
,
ends
,
out_is_array
);
return
;
}
}
private:
};
...
...
@@ -39,7 +132,73 @@ class SliceKernel : public framework::OpKernel<T> {
template
<
typename
DeviceContext
,
typename
T
>
class
SliceGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{}
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
axes
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"axes"
);
auto
starts_int
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"starts"
);
auto
ends_int
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"ends"
);
std
::
vector
<
int64_t
>
starts
(
starts_int
.
begin
(),
starts_int
.
end
());
std
::
vector
<
int64_t
>
ends
(
ends_int
.
begin
(),
ends_int
.
end
());
// Get the accurate attribute value of starts and ends
auto
starts_tensor_list
=
ctx
.
MultiInput
<
Tensor
>
(
"StartsTensorList"
);
if
(
ctx
.
HasInput
(
"StartsTensor"
))
{
starts
=
GetDataFromTensor
<
int64_t
>
(
ctx
.
Input
<
Tensor
>
(
"StartsTensor"
));
}
else
if
(
starts_tensor_list
.
size
()
>
0
)
{
starts
=
GetDataFromTensorList
<
int64_t
>
(
starts_tensor_list
);
}
auto
ends_tensor_list
=
ctx
.
MultiInput
<
Tensor
>
(
"EndsTensorList"
);
if
(
ctx
.
HasInput
(
"EndsTensor"
))
{
ends
=
GetDataFromTensor
<
int64_t
>
(
ctx
.
Input
<
Tensor
>
(
"EndsTensor"
));
}
else
if
(
ends_tensor_list
.
size
()
>
0
)
{
ends
=
GetDataFromTensorList
<
int64_t
>
(
ends_tensor_list
);
}
Variable
*
d_input_var
=
ctx
.
OutputVar
(
framework
::
GradVarName
(
"Input"
));
const
Variable
*
d_out_var
=
ctx
.
InputVar
(
framework
::
GradVarName
(
"Out"
));
bool
d_input_is_array
=
d_input_var
->
IsType
<
LoDTensorArray
>
();
bool
d_out_is_array
=
d_out_var
->
IsType
<
LoDTensorArray
>
();
if
(
d_input_is_array
)
{
auto
*
input_array
=
ctx
.
Input
<
LoDTensorArray
>
(
"Input"
);
auto
*
d_in_arr
=
ctx
.
Output
<
LoDTensorArray
>
(
framework
::
GradVarName
(
"Input"
));
int64_t
d_in_size
=
input_array
->
size
();
d_in_arr
->
resize
(
d_in_size
);
// If the input is LoDTensorArray, the rank of input is 1.
// So only use the 0th element of starts.
int64_t
start
=
starts
[
0
]
<
0
?
(
starts
[
0
]
+
d_in_size
)
:
starts
[
0
];
start
=
std
::
max
(
start
,
static_cast
<
int64_t
>
(
0
));
// set zero
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
auto
&
dev_ctx
=
*
pool
.
Get
(
ctx
.
GetPlace
());
phi
::
funcs
::
SetConstant
<
DeviceContext
,
T
>
functor
;
for
(
int
i
=
0
;
i
<
d_in_size
;
++
i
)
{
auto
dim
=
input_array
->
at
(
i
).
dims
();
d_in_arr
->
at
(
i
).
Resize
(
dim
);
d_in_arr
->
at
(
i
).
mutable_data
<
T
>
(
ctx
.
GetPlace
());
functor
(
reinterpret_cast
<
const
DeviceContext
&>
(
dev_ctx
),
&
d_in_arr
->
at
(
i
),
static_cast
<
T
>
(
0
));
}
if
(
d_out_is_array
)
{
auto
*
d_out_arr
=
ctx
.
Input
<
LoDTensorArray
>
(
framework
::
GradVarName
(
"Out"
));
int
d_out_size
=
d_out_arr
->
size
();
for
(
int
i
=
0
;
i
<
d_out_size
;
++
i
)
{
paddle
::
framework
::
TensorCopy
(
d_out_arr
->
at
(
i
),
ctx
.
GetPlace
(),
&
(
d_in_arr
->
at
(
start
+
i
)));
}
}
else
{
auto
*
d_out
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
paddle
::
framework
::
TensorCopy
(
*
d_out
,
ctx
.
GetPlace
(),
&
(
d_in_arr
->
at
(
start
)));
}
return
;
}
}
private:
};
...
...
paddle/phi/kernels/gpu/slice_grad_kernel.cu.cc
浏览文件 @
586671ea
...
...
@@ -29,4 +29,5 @@ PD_REGISTER_KERNEL(slice_grad,
double
,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
,
phi
::
dtype
::
bfloat16
)
{}
phi
::
dtype
::
bfloat16
,
phi
::
dtype
::
float16
)
{}
paddle/phi/kernels/impl/slice_grad_kernel_impl.h
浏览文件 @
586671ea
...
...
@@ -30,6 +30,8 @@ void LaunchEigenPadding(
const
DDim
&
out_dims
,
const
Eigen
::
array
<
std
::
pair
<
int64_t
,
int64_t
>
,
D
>&
paddings
)
{
auto
&
place
=
*
context
.
template
eigen_device
();
LOG
(
ERROR
)
<<
D
<<
"
\t
"
<<
in_dims
;
LOG
(
ERROR
)
<<
out_dims
;
auto
d_in_t
=
EigenTensor
<
T
,
D
,
Eigen
::
RowMajor
,
Eigen
::
DenseIndex
>::
From
(
*
d_input
,
in_dims
);
auto
d_out_t
=
EigenTensor
<
T
,
D
,
Eigen
::
RowMajor
,
Eigen
::
DenseIndex
>::
From
(
...
...
@@ -150,12 +152,12 @@ void EigenPaddingCompute(
// the second dimension do not need padding, set padding[1] zero
reshaped_padding
[
1
].
first
=
reshaped_padding
[
1
].
second
=
0
;
LaunchEigenPadding
<
T
,
Context
>
(
context
,
d_input
,
reshaped_in_dims
,
d_out
,
reshaped_out_dims
,
reshaped_padding
);
LaunchEigenPadding
<
T
,
Context
,
2
>
(
context
,
d_input
,
reshaped_in_dims
,
d_out
,
reshaped_out_dims
,
reshaped_padding
);
}
else
{
// other dimension need padding
// reshape the dimension of tensor in 3:
...
...
@@ -190,12 +192,13 @@ void EigenPaddingCompute(
// the third dimension do not need padding, set padding[2] zero
reshaped_padding
[
2
].
first
=
reshaped_padding
[
2
].
second
=
0
;
LaunchEigenPadding
<
T
,
Context
>
(
context
,
d_input
,
reshaped_in_dims
,
d_out
,
reshaped_out_dims
,
reshaped_padding
);
LOG
(
ERROR
)
<<
"run here"
;
LaunchEigenPadding
<
T
,
Context
,
3
>
(
context
,
d_input
,
reshaped_in_dims
,
d_out
,
reshaped_out_dims
,
reshaped_padding
);
}
}
else
{
// need padding at many dimension, cannot reduce dimension
...
...
@@ -270,14 +273,18 @@ void SliceGradCompute(const Context& ctx,
template
<
typename
T
,
typename
Context
>
void
SliceGradRawKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
input
,
const
DenseTensor
&
out_grad
,
const
std
::
vector
<
int64_t
>&
axes
,
const
std
::
vector
<
int64_t
>&
starts
,
const
std
::
vector
<
int64_t
>&
ends
,
const
ScalarArray
&
starts_arr
,
const
ScalarArray
&
ends_arr
,
const
std
::
vector
<
int64_t
>&
infer_flags
,
const
std
::
vector
<
int64_t
>&
decrease_axis
,
DenseTensor
*
input_grad
)
{
size_t
rank
=
out_grad
.
dims
().
size
();
size_t
rank
=
input
.
dims
().
size
();
auto
&
starts
=
starts_arr
.
GetData
();
auto
&
ends
=
ends_arr
.
GetData
();
switch
(
rank
)
{
case
1
:
...
...
paddle/phi/kernels/impl/slice_kernel_impl.h
浏览文件 @
586671ea
...
...
@@ -110,13 +110,16 @@ template <typename T, typename Context>
void
SliceRawKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
input
,
const
std
::
vector
<
int64_t
>&
axes
,
const
std
::
vector
<
int64_t
>&
starts
,
const
std
::
vector
<
int64_t
>&
ends
,
const
ScalarArray
&
starts_arr
,
const
ScalarArray
&
ends_arr
,
const
std
::
vector
<
int64_t
>&
infer_flags
,
const
std
::
vector
<
int64_t
>&
decrease_axis
,
DenseTensor
*
out
)
{
int
rank
=
input
.
dims
().
size
();
auto
&
starts
=
starts_arr
.
GetData
();
auto
&
ends
=
ends_arr
.
GetData
();
switch
(
rank
)
{
case
1
:
SliceCompute
<
T
,
Context
,
1
>
(
...
...
paddle/phi/kernels/slice_grad_kernel.h
浏览文件 @
586671ea
...
...
@@ -14,16 +14,18 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
SliceGradRawKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
input
,
const
DenseTensor
&
out_grad
,
const
std
::
vector
<
int64_t
>&
axes
,
const
std
::
vector
<
int64_t
>
&
starts
,
const
std
::
vector
<
int64_t
>
&
ends
,
const
ScalarArray
&
starts
,
const
ScalarArray
&
ends
,
const
std
::
vector
<
int64_t
>&
infer_flags
,
const
std
::
vector
<
int64_t
>&
decrease_axis
,
DenseTensor
*
input_grad
);
...
...
paddle/phi/kernels/slice_kernel.h
浏览文件 @
586671ea
...
...
@@ -14,6 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
...
...
@@ -22,8 +23,8 @@ template <typename T, typename Context>
void
SliceRawKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
input
,
const
std
::
vector
<
int64_t
>&
axes
,
const
std
::
vector
<
int64_t
>
&
starts
,
const
std
::
vector
<
int64_t
>
&
ends
,
const
ScalarArray
&
starts
,
const
ScalarArray
&
ends
,
const
std
::
vector
<
int64_t
>&
infer_flags
,
const
std
::
vector
<
int64_t
>&
decrease_axis
,
DenseTensor
*
out
);
...
...
paddle/phi/ops/compat/slice_sig.cc
浏览文件 @
586671ea
...
...
@@ -17,19 +17,155 @@
namespace
phi
{
KernelSignature
SliceOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"starts"
,
"ends"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
if
(
ctx
.
HasInput
(
"StartsTensor"
))
{
if
(
ctx
.
HasInput
(
"EndsTensor"
))
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"StartsTensor"
,
"EndsTensor"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
}
else
if
(
ctx
.
InputSize
(
"EndsTensorList"
)
>
0
)
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"StartsTensor"
,
"EndsTensorList"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
}
else
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"StartsTensor"
,
"ends"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
}
}
else
if
(
ctx
.
InputSize
(
"StartsTensorList"
)
>
0
)
{
if
(
ctx
.
HasInput
(
"EndsTensor"
))
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"StartsTensorList"
,
"EndsTensor"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
}
else
if
(
ctx
.
InputSize
(
"EndsTensorList"
)
>
0
)
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"StartsTensorList"
,
"EndsTensorList"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
}
else
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"StartsTensorList"
,
"ends"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
}
}
else
{
if
(
ctx
.
HasInput
(
"EndsTensor"
))
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"starts"
,
"EndsTensor"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
}
else
if
(
ctx
.
InputSize
(
"EndsTensorList"
)
>
0
)
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"starts"
,
"EndsTensorList"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
}
else
{
return
KernelSignature
(
"slice"
,
{
"Input"
},
{
"axes"
,
"starts"
,
"ends"
,
"infer_flags"
,
"decrease_axis"
},
{
"Out"
});
}
}
}
KernelSignature
SliceGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"slice_grad"
,
{
GradVarName
(
"Out"
)},
{
"axes"
,
"starts"
,
"ends"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
if
(
ctx
.
HasInput
(
"StartsTensor"
))
{
if
(
ctx
.
HasInput
(
"EndsTensor"
))
{
return
KernelSignature
(
"slice_grad"
,
{
"Input"
,
GradVarName
(
"Out"
)},
{
"axes"
,
"StartsTensor"
,
"EndsTensor"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
}
else
if
(
ctx
.
InputSize
(
"EndsTensorList"
)
>
0
)
{
return
KernelSignature
(
"slice_grad"
,
{
"Input"
,
GradVarName
(
"Out"
)},
{
"axes"
,
"StartsTensor"
,
"EndsTensorList"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
}
else
{
return
KernelSignature
(
"slice_grad"
,
{
"Input"
,
GradVarName
(
"Out"
)},
{
"axes"
,
"StartsTensor"
,
"ends"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
}
}
else
if
(
ctx
.
InputSize
(
"StartsTensorList"
)
>
0
)
{
if
(
ctx
.
HasInput
(
"EndsTensor"
))
{
return
KernelSignature
(
"slice_grad"
,
{
"Input"
,
GradVarName
(
"Out"
)},
{
"axes"
,
"StartsTensorList"
,
"EndsTensor"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
}
else
if
(
ctx
.
InputSize
(
"EndsTensorList"
)
>
0
)
{
return
KernelSignature
(
"slice_grad"
,
{
"Input"
,
GradVarName
(
"Out"
)},
{
"axes"
,
"StartsTensorList"
,
"EndsTensorList"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
}
else
{
return
KernelSignature
(
"slice_grad"
,
{
"Input"
,
GradVarName
(
"Out"
)},
{
"axes"
,
"StartsTensorList"
,
"ends"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
}
}
else
{
if
(
ctx
.
HasInput
(
"EndsTensor"
))
{
return
KernelSignature
(
"slice_grad"
,
{
"Input"
,
GradVarName
(
"Out"
)},
{
"axes"
,
"starts"
,
"EndsTensor"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
}
else
if
(
ctx
.
InputSize
(
"EndsTensorList"
)
>
0
)
{
return
KernelSignature
(
"slice_grad"
,
{
"Input"
,
GradVarName
(
"Out"
)},
{
"axes"
,
"starts"
,
"EndsTensorList"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
}
else
{
return
KernelSignature
(
"slice_grad"
,
{
"Input"
,
GradVarName
(
"Out"
)},
{
"axes"
,
"starts"
,
"ends"
,
"infer_flags"
,
"decrease_axis"
},
{
GradVarName
(
"Input"
)});
}
}
}
}
// namespace phi
...
...
paddle/pten/kernels/slice_kernel.h
浏览文件 @
586671ea
...
...
@@ -14,6 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
...
...
@@ -22,8 +23,8 @@ template <typename T, typename Context>
void
SliceRawKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
input
,
const
std
::
vector
<
int64_t
>&
axes
,
const
std
::
vector
<
int64_t
>
&
starts
,
const
std
::
vector
<
int64_t
>
&
ends
,
const
ScalarArray
&
starts
,
const
ScalarArray
&
ends
,
const
std
::
vector
<
int64_t
>&
infer_flags
,
const
std
::
vector
<
int64_t
>&
decrease_axis
,
DenseTensor
*
out
);
...
...
python/paddle/fluid/tests/unittests/test_slice_op.py
浏览文件 @
586671ea
...
...
@@ -55,721 +55,746 @@ class TestSliceOp(OpTest):
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# class TestCase1(TestSliceOp):
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [-3, 0, 2]
# self.ends = [3, 100, -1]
# self.axes = [0, 1, 2]
# self.infer_flags = [1, 1, 1]
# self.out = self.input[-3:3, 0:100, 2:-1, :]
# class TestCase2(TestSliceOp):
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [-3, 0, 2]
# self.ends = [3, 100, -1]
# self.axes = [0, 1, 3]
# self.infer_flags = [1, 1, 1]
# self.out = self.input[-3:3, 0:100, :, 2:-1]
# # 1.2 with attr(decrease)
# class TestSliceOp_decs_dim(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# self.inputs = {'Input': self.input}
# self.outputs = {'Out': self.out}
# self.attrs = {
# 'axes': self.axes,
# 'starts': self.starts,
# 'ends': self.ends,
# 'infer_flags': self.infer_flags,
# 'decrease_axis': self.decrease_axis,
# }
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2]
# self.ends = [2, 3, 4]
# self.axes = [0, 1, 2]
# self.decrease_axis = [0]
# self.infer_flags = [1, 1, 1]
# self.out = self.input[1, 0:3, 2:4, :]
# def test_check_output(self):
# self.check_output()
# def test_check_grad_normal(self):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2]
# self.ends = [2, 1, 4]
# self.axes = [0, 1, 2]
# self.decrease_axis = [0, 1]
# self.infer_flags = [1, 1, 1]
# self.out = self.input[1, 0, 2:4, :]
# class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [-1, 0, 2]
# self.ends = [1000000, 1, 4]
# self.axes = [0, 1, 2]
# self.decrease_axis = [0, 1]
# self.infer_flags = [1, 1, 1]
# self.out = self.input[-1, 0, 2:4, :]
# class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
# def config(self):
# self.input = np.random.random([3, 4, 5, 7]).astype("float64")
# self.starts = [0, 1, 2, 3]
# self.ends = [1, 2, 3, 4]
# self.axes = [0, 1, 2, 3]
# self.decrease_axis = [0, 1, 2, 3]
# self.infer_flags = [1, 1, 1]
# self.out = self.input[0, 1, 2, 3:4]
# class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [-1]
# self.ends = [1000000]
# self.axes = [3]
# self.decrease_axis = [3]
# self.infer_flags = [1, 1, 1]
# self.out = self.input[:, :, :, -1]
# class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [0, 1, 2, 3]
# self.ends = [1, 2, 3, 4]
# self.axes = [0, 1, 2, 3]
# self.decrease_axis = [0, 1, 2, 3]
# self.infer_flags = [1, 1, 1]
# self.out = self.input[0, 1, 2, 3:4]
# # Situation 2: starts(list, have tensor), ends(list, no tensor)
# # without attr(decrease)
# class TestSliceOp_starts_ListTensor(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# starts_tensor = []
# for index, ele in enumerate(self.starts):
# starts_tensor.append(("x" + str(index), np.ones(
# (1)).astype('int64') * ele))
# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
# self.outputs = {'Out': self.out}
# self.attrs = {
# 'axes': self.axes,
# 'starts': self.starts_infer,
# 'ends': self.ends,
# 'infer_flags': self.infer_flags
# }
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2]
# self.ends = [3, 3, 4]
# self.axes = [0, 1, 2]
# self.infer_flags = [-1, 1, -1]
# self.out = self.input[1:3, 0:3, 2:4, :]
# self.starts_infer = [-1, 0, -1]
# def test_check_output(self):
# self.check_output()
# def test_check_grad_normal(self):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# # Situation 2: starts(list, have tensor), ends(list, no tensor)
# # with attr(decrease)
# class TestSliceOp_decs_dim_starts_ListTensor(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# starts_tensor = []
# for index, ele in enumerate(self.starts):
# starts_tensor.append(("x" + str(index), np.ones(
# (1)).astype('int32') * ele))
# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
# self.outputs = {'Out': self.out}
# self.attrs = {
# 'axes': self.axes,
# 'starts': self.starts_infer,
# 'ends': self.ends,
# 'infer_flags': self.infer_flags,
# 'decrease_axis': self.decrease_axis,
# }
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2]
# self.ends = [2, 3, 4]
# self.axes = [0, 1, 2]
# self.decrease_axis = [0]
# self.infer_flags = [1, -1, 1]
# self.out = self.input[1, 0:3, 2:4, :]
# self.starts_infer = [1, -1, 2]
# def test_check_output(self):
# self.check_output()
# def test_check_grad_normal(self):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# class TestSliceOp_decs_dim_5_starts_ListTensor(
# TestSliceOp_decs_dim_starts_ListTensor):
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [-1]
# self.ends = [1000000]
# self.axes = [3]
# self.decrease_axis = [3]
# self.infer_flags = [-1]
# self.out = self.input[:, :, :, -1]
# self.starts_infer = [-1]
# # Situation 3: starts(tensor), ends(list, no tensor)
# # with attr(decrease)
# class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# self.inputs = {
# 'Input': self.input,
# "StartsTensor": np.array(
# self.starts, dtype="int32")
# }
# self.outputs = {'Out': self.out}
# self.attrs = {
# 'axes': self.axes,
# #'starts': self.starts,
# 'ends': self.ends,
# 'infer_flags': self.infer_flags,
# 'decrease_axis': self.decrease_axis,
# }
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2]
# self.ends = [2, 3, 4]
# self.axes = [0, 1, 2]
# self.decrease_axis = [0]
# self.infer_flags = [-1, -1, -1]
# self.out = self.input[1, 0:3, 2:4, :]
# def test_check_output(self):
# self.check_output()
# def test_check_grad_normal(self):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# # Situation 4: starts(tensor), ends(tensor)
# # without attr(decrease)
# class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# self.inputs = {
# 'Input': self.input,
# "StartsTensor": np.array(
# self.starts, dtype="int64"),
# "EndsTensor": np.array(
# self.ends, dtype="int32")
# }
# self.outputs = {'Out': self.out}
# self.attrs = {
# 'axes': self.axes,
# #'starts': self.starts,
# #'ends': self.ends_infer,
# 'infer_flags': self.infer_flags
# }
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2]
# self.ends = [3, 3, 4]
# self.axes = [0, 1, 2]
# self.infer_flags = [-1, -1, -1]
# self.out = self.input[1:3, 0:3, 2:4, :]
# def test_check_output(self):
# self.check_output()
# def test_check_grad_normal(self):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# # Situation 5: starts(tensor), ends(tensor)
# # with attr(decrease)
# class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# self.inputs = {
# 'Input': self.input,
# "StartsTensor": np.array(
# self.starts, dtype="int32"),
# "EndsTensor": np.array(
# self.ends, dtype="int32")
# }
# self.outputs = {'Out': self.out}
# self.attrs = {
# 'axes': self.axes,
# #'starts': self.starts,
# #'ends': self.ends,
# 'infer_flags': self.infer_flags,
# 'decrease_axis': self.decrease_axis,
# }
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2]
# self.ends = [2, 1, 4]
# self.axes = [0, 1, 2]
# self.decrease_axis = [0, 1]
# self.infer_flags = [-1, -1, -1]
# self.out = self.input[1, 0, 2:4, :]
# def test_check_output(self):
# self.check_output()
# def test_check_grad_normal(self):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# # Situation 6: starts(tensor), ends(list, have tensor)
# # without attr(decrease)
# class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# ends_tensor = []
# for index, ele in enumerate(self.ends):
# ends_tensor.append(("y" + str(index), np.ones(
# (1)).astype('int32') * ele))
# self.inputs = {
# 'Input': self.input,
# "StartsTensor": np.array(
# self.starts, dtype="int32"),
# 'EndsTensorList': ends_tensor
# }
# self.outputs = {'Out': self.out}
# self.attrs = {
# 'axes': self.axes,
# #'starts': self.starts,
# 'ends': self.ends_infer,
# 'infer_flags': self.infer_flags
# }
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2]
# self.ends = [3, 3, 4]
# self.axes = [0, 1, 2]
# self.infer_flags = [-1, -1, -1]
# self.out = self.input[1:3, 0:3, 2:4, :]
# self.ends_infer = [-1, 3, 4]
# def test_check_output(self):
# self.check_output()
# def test_check_grad_normal(self):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# # Test CUDA float16
# @unittest.skipIf(not core.is_compiled_with_cuda(),
# "core is not compiled with CUDA")
# class TestFP16(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# self.inputs = {'Input': self.input}
# self.outputs = {'Out': self.out}
# self.attrs = {
# 'axes': self.axes,
# 'starts': self.starts,
# 'ends': self.ends,
# 'infer_flags': self.infer_flags
# }
# def config(self):
# self.dtype = "float16"
# self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
# self.starts = [-3, 0, 2]
# self.ends = [3, 100, -1]
# self.axes = [0, 1, 3]
# self.out = self.input[-3:3, 0:100, :, 2:-1]
# self.infer_flags = [1, 1, 1]
# def test_check_output(self):
# place = core.CUDAPlace(0)
# if core.is_float16_supported(place):
# self.check_output_with_place(place, atol=1e-5)
# def test_check_grad_normal(self):
# place = core.CUDAPlace(0)
# if core.is_float16_supported(place):
# self.check_grad_with_place(
# place, ['Input'], 'Out', max_relative_error=0.006)
# @unittest.skipIf(not core.is_compiled_with_cuda(),
# "core is not compiled with CUDA")
# class TestFP16_2(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# self.inputs = {'Input': self.input}
# self.outputs = {'Out': self.out}
# self.attrs = {
# 'axes': self.axes,
# 'starts': self.starts,
# 'ends': self.ends,
# 'infer_flags': self.infer_flags
# }
# def config(self):
# self.dtype = "float16"
# self.input = np.random.random([3, 4, 10]).astype(self.dtype)
# self.starts = [0]
# self.ends = [1]
# self.axes = [1]
# self.out = self.input[:, 0:1, :]
# self.infer_flags = [1]
# def test_check_output(self):
# place = core.CUDAPlace(0)
# if core.is_float16_supported(place):
# self.check_output_with_place(place, atol=1e-5)
# def test_check_grad_normal(self):
# place = core.CUDAPlace(0)
# if core.is_float16_supported(place):
# self.check_grad_with_place(
# place, ['Input'],
# 'Out',
# max_relative_error=0.006,
# numeric_grad_delta=0.5)
# class TestBF16(OpTest):
# def setUp(self):
# self.op_type = "slice"
# self.config()
# self.inputs = {'Input': convert_float_to_uint16(self.input)}
# self.outputs = {'Out': convert_float_to_uint16(self.out)}
# self.attrs = {
# 'axes': self.axes,
# 'starts': self.starts,
# 'ends': self.ends,
# 'infer_flags': self.infer_flags
# }
# def config(self):
# self.dtype = np.uint16
# self.input = np.random.random([3, 4, 5, 6]).astype(np.float32)
# self.starts = [-3, 0, 2]
# self.ends = [3, 100, -1]
# self.axes = [0, 1, 3]
# self.out = self.input[-3:3, 0:100, :, 2:-1]
# self.infer_flags = [1, 1, 1]
# def test_check_output(self):
# self.check_output()
# def test_check_grad_normal(self):
# self.check_grad(['Input'], 'Out')
# # Test python API
# class TestSliceAPI(unittest.TestCase):
# def test_1(self):
# input = np.random.random([3, 4, 5, 6]).astype("float64")
# minus_1 = fluid.layers.fill_constant([1], "int32", -1)
# minus_3 = fluid.layers.fill_constant([1], "int64", -3)
# starts = fluid.layers.data(
# name='starts', shape=[1, 3], append_batch_size=False)
# ends = fluid.layers.data(
# name='ends', shape=[3], append_batch_size=False)
# x = fluid.layers.data(
# name="x",
# shape=[3, 4, 5, 6],
# append_batch_size=False,
# dtype="float64")
# # value_int64 is greater than 2147483647 which is the max of int32
# value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648)
# out_1 = fluid.layers.slice(
# x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1])
# out_2 = fluid.layers.slice(
# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1])
# out_3 = fluid.layers.slice(
# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1])
# out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)
# out_5 = x[-3:3, 0:100, 2:-1]
# out_6 = x[minus_3:3, 0:100, :, 2:-1]
# out_7 = x[minus_1, 0:100, :, 2:minus_1]
# exe = fluid.Executor(place=fluid.CPUPlace())
# res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
# fluid.default_main_program(),
# feed={
# "x": input,
# 'starts': np.array([-3, 0, 2]).astype("int32"),
# 'ends': np.array([3, 100, -1]).astype("int32")
# },
# fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7])
# assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :])
# assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1])
# assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1])
# assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :])
# assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :])
# assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1])
# assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1])
# class TestSliceApiWithTensor(unittest.TestCase):
# def test_starts_ends_is_tensor(self):
# with paddle.fluid.dygraph.guard():
# a = paddle.rand(shape=[4, 5, 6], dtype='float32')
# axes = [0, 1, 2]
# starts = [-3, 0, 2]
# ends = [3, 2, 4]
# a_1 = paddle.slice(
# a,
# axes=axes,
# starts=paddle.to_tensor(
# starts, dtype='int32'),
# ends=paddle.to_tensor(
# ends, dtype='int32'))
# a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends)
# self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy()))
# def test_bool_tensor(self):
# with paddle.fluid.dygraph.guard():
# array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool')
# tt = paddle.to_tensor(array)
# tt.stop_gradient = False
# starts = [0, 1, 2]
# ends = [3, 5, 4]
# axes = [0, 1, 2]
# y_paddle = paddle.slice(tt, axes, starts, ends)
# y_np = tt[0:3, 1:5, 2:4]
# self.assertTrue(paddle.bool == y_paddle.dtype)
# self.assertTrue(np.array_equal(y_paddle.numpy(), y_np))
# class TestSliceApiWithLoDTensorArray(unittest.TestCase):
# def setUp(self):
# self.shape = (3, 4)
# self.data = np.random.random(size=self.shape).astype('float32')
# self.idx = 0
# self.start = 0
# self.end = 2
# self.axis = 1
# self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
# ) else fluid.CPUPlace()
# self.exe = fluid.Executor(self.place)
# def set_program_and_run(self, main_program, case_num):
# with fluid.program_guard(main_program):
# x = [
# fluid.data(
# name='x0', shape=self.shape, dtype="float32"), fluid.data(
# name='x1', shape=self.shape, dtype="float32"),
# fluid.data(
# name='x2', shape=self.shape, dtype="float32")
# ]
# for each_x in x:
# each_x.stop_gradient = False
# arr = layers.create_array(dtype="float32")
# for i in range(3):
# idx = layers.array_length(arr)
# arr = layers.array_write(x=x[i], i=idx, array=arr)
# if case_num == 1:
# self.sliced_arr = output = arr[0]
# elif case_num == 2:
# end = fluid.layers.array_length(
# arr) - 1 # dtype of end is int64
# self.sliced_arr = slice_arr = arr[self.start:end]
# output, _ = fluid.layers.tensor_array_to_tensor(
# slice_arr, axis=self.axis, use_stack=True)
# elif case_num == 3:
# value_int64 = fluid.layers.fill_constant([1], "int64",
# 2147483648)
# self.sliced_arr = slice_arr = arr[self.start:value_int64]
# output, _ = fluid.layers.tensor_array_to_tensor(
# slice_arr, axis=self.axis, use_stack=True)
# loss = fluid.layers.reduce_sum(output)
# fluid.backward.append_backward(loss)
# g_vars = list(
# map(main_program.global_block().var,
# [each_x.name + "@GRAD" for each_x in x]))
# self.out, self.g_x0, self.g_x1, self.g_x2 = \
# self.exe.run(main_program,
# feed = {'x0': self.data,
# 'x1': self.data,
# 'x2': self.data},
# fetch_list=[output] + g_vars)
# def test_case_1(self):
# main_program = fluid.Program()
# self.set_program_and_run(main_program, 1)
# self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR)
# self.assertEqual(self.sliced_arr.shape, self.shape)
# self.assertTrue(np.array_equal(self.out, self.data))
# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
# self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data)))
# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))
# def test_case_2(self):
# main_program = fluid.Program()
# self.set_program_and_run(main_program, 2)
# self.assertTrue(
# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
# self.assertEqual(self.sliced_arr.shape, self.shape)
# self.assertTrue(
# np.array_equal(
# self.out, np.stack(
# [self.data, self.data], axis=self.axis)))
# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))
# def test_case_3(self):
# main_program = fluid.Program()
# self.set_program_and_run(main_program, 3)
# self.assertTrue(
# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
# self.assertEqual(self.sliced_arr.shape, self.shape)
# self.assertTrue(
# np.array_equal(
# self.out,
# np.stack(
# [self.data, self.data, self.data], axis=self.axis)))
# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
# self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data)))
# class TestImperativeVarBaseGetItem(unittest.TestCase):
# def test_getitem_with_long(self):
# with fluid.dygraph.guard():
# data = np.random.random((2, 80, 16128)).astype('float32')
# var = fluid.dygraph.to_variable(data)
# sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here
# self.assertEqual(sliced.shape, [2, 70, 80])
# sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]]
# self.assertEqual(sliced.shape, [2, 78, 78])
# def test_getitem_with_float(self):
# def test_float_in_slice_item():
# with fluid.dygraph.guard():
# data = np.random.random((2, 80, 16128)).astype('float32')
# var = fluid.dygraph.to_variable(data)
# sliced = var[:, 1.1:, :var.shape[1]]
# self.assertRaises(Exception, test_float_in_slice_item)
# def test_float_in_index():
# with fluid.dygraph.guard():
# data = np.random.random((2, 80, 16128)).astype('float32')
# var = fluid.dygraph.to_variable(data)
# sliced = var[1.1]
# self.assertRaises(Exception, test_float_in_index)
# class TestInferShape(unittest.TestCase):
# def test(self):
# x = paddle.ones(shape=[3, 4, 5])
# x.desc.set_shape([3, -1, 5])
# self.assertEqual(x.shape, (3, -1, 5))
# out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3])
# self.assertEqual(out0.shape, (3, 3, 5))
# def test_axis_less_than_zero(self):
# # Using paddle.disable_static will make other unittests fail.
# with fluid.dygraph.guard():
# x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4])
# x = paddle.to_tensor(x_arr)
# pp_slice = paddle.slice(x, [100, ], [0], [1])
# np_slice = x_arr[:, :, 0:1]
# self.assertTrue(np.array_equal(pp_slice, np_slice))
# pp_slice = paddle.slice(x, (-100, ), [0], [1])
# np_slice = x_arr[0:1]
# self.assertTrue(np.array_equal(pp_slice, np_slice))
# x_arr = np.array([], dtype=np.float32)
# x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0)))
# starts = paddle.to_tensor(
# np.reshape(
# np.array(
# [], dtype=np.int32), (0, )))
# ends = paddle.to_tensor(
# np.reshape(
# np.array(
# [], dtype=np.int32), (0, )))
# with self.assertRaises(ValueError):
# paddle.slice(x, [-1000000], starts, ends)
# with self.assertRaises(ValueError):
# paddle.slice(x, [1000000], starts, ends)
# with self.assertRaises(ValueError):
# paddle.slice(x, [], starts, ends)
# with self.assertRaises(ValueError):
# paddle.slice(x, 0, starts, ends)
# @unittest.skipIf(not core.is_compiled_with_cuda(),
# "core is not compiled with CUDA")
# class TestImperativeCUDAPinnedInput(unittest.TestCase):
# def test_input_cuda_pinned_var(self):
# with fluid.dygraph.guard():
# data = np.random.random((2, 80, 16128)).astype('float32')
# var = core.VarBase(
# value=data,
# name='',
# persistable=False,
# place=fluid.CUDAPinnedPlace(),
# zero_copy=False)
# sliced = var[:, 10:, :var.shape[1]]
# self.assertEqual(sliced.shape, [2, 70, 80])
class
TestCase1
(
TestSliceOp
):
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
-
3
,
0
,
2
]
self
.
ends
=
[
3
,
100
,
-
1
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:]
class
TestCase2
(
TestSliceOp
):
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
-
3
,
0
,
2
]
self
.
ends
=
[
3
,
100
,
-
1
]
self
.
axes
=
[
0
,
1
,
3
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
]
# 1.2 with attr(decrease)
class
TestSliceOp_decs_dim
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
'starts'
:
self
.
starts
,
'ends'
:
self
.
ends
,
'infer_flags'
:
self
.
infer_flags
,
'decrease_axis'
:
self
.
decrease_axis
,
}
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
self
.
ends
=
[
2
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
1
,
0
:
3
,
2
:
4
,
:]
# def test_check_output(self):
# self.check_output()
def
test_check_grad_normal
(
self
):
print
(
self
.
input
.
size
)
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
class
TestSliceOp_decs_dim_2
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
self
.
ends
=
[
2
,
1
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
,
1
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
1
,
0
,
2
:
4
,
:]
class
TestSliceOp_decs_dim_3
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
-
1
,
0
,
2
]
self
.
ends
=
[
1000000
,
1
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
,
1
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
-
1
,
0
,
2
:
4
,
:]
class
TestSliceOp_decs_dim_4
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
7
]).
astype
(
"float64"
)
self
.
starts
=
[
0
,
1
,
2
,
3
]
self
.
ends
=
[
1
,
2
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
,
3
]
self
.
decrease_axis
=
[
0
,
1
,
2
,
3
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
0
,
1
,
2
,
3
:
4
]
class
TestSliceOp_decs_dim_5
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
-
1
]
self
.
ends
=
[
1000000
]
self
.
axes
=
[
3
]
self
.
decrease_axis
=
[
3
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[:,
:,
:,
-
1
]
class
TestSliceOp_decs_dim_6
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
0
,
1
,
2
,
3
]
self
.
ends
=
[
1
,
2
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
,
3
]
self
.
decrease_axis
=
[
0
,
1
,
2
,
3
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
0
,
1
,
2
,
3
:
4
]
# Situation 2: starts(list, have tensor), ends(list, no tensor)
# without attr(decrease)
class
TestSliceOp_starts_ListTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
starts_tensor
=
[]
for
index
,
ele
in
enumerate
(
self
.
starts
):
starts_tensor
.
append
((
"x"
+
str
(
index
),
np
.
ones
(
(
1
)).
astype
(
'int64'
)
*
ele
))
self
.
inputs
=
{
'Input'
:
self
.
input
,
'StartsTensorList'
:
starts_tensor
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
'starts'
:
self
.
starts_infer
,
'ends'
:
self
.
ends
,
'infer_flags'
:
self
.
infer_flags
}
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
self
.
ends
=
[
3
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
infer_flags
=
[
-
1
,
1
,
-
1
]
self
.
out
=
self
.
input
[
1
:
3
,
0
:
3
,
2
:
4
,
:]
self
.
starts_infer
=
[
-
1
,
0
,
-
1
]
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# Situation 2: starts(list, have tensor), ends(list, no tensor)
# with attr(decrease)
class
TestSliceOp_decs_dim_starts_ListTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
starts_tensor
=
[]
for
index
,
ele
in
enumerate
(
self
.
starts
):
starts_tensor
.
append
((
"x"
+
str
(
index
),
np
.
ones
(
(
1
)).
astype
(
'int32'
)
*
ele
))
self
.
inputs
=
{
'Input'
:
self
.
input
,
'StartsTensorList'
:
starts_tensor
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
'starts'
:
self
.
starts_infer
,
'ends'
:
self
.
ends
,
'infer_flags'
:
self
.
infer_flags
,
'decrease_axis'
:
self
.
decrease_axis
,
}
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
self
.
ends
=
[
2
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
]
self
.
infer_flags
=
[
1
,
-
1
,
1
]
self
.
out
=
self
.
input
[
1
,
0
:
3
,
2
:
4
,
:]
self
.
starts_infer
=
[
1
,
-
1
,
2
]
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
class
TestSliceOp_decs_dim_5_starts_ListTensor
(
TestSliceOp_decs_dim_starts_ListTensor
):
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
-
1
]
self
.
ends
=
[
1000000
]
self
.
axes
=
[
3
]
self
.
decrease_axis
=
[
3
]
self
.
infer_flags
=
[
-
1
]
self
.
out
=
self
.
input
[:,
:,
:,
-
1
]
self
.
starts_infer
=
[
-
1
]
# Situation 3: starts(tensor), ends(list, no tensor)
# with attr(decrease)
class
TestSliceOp_decs_dim_starts_OneTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
,
"StartsTensor"
:
np
.
array
(
self
.
starts
,
dtype
=
"int32"
)
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
'ends'
:
self
.
ends
,
'infer_flags'
:
self
.
infer_flags
,
'decrease_axis'
:
self
.
decrease_axis
,
}
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
self
.
ends
=
[
2
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
]
self
.
infer_flags
=
[
-
1
,
-
1
,
-
1
]
self
.
out
=
self
.
input
[
1
,
0
:
3
,
2
:
4
,
:]
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# Situation 4: starts(tensor), ends(tensor)
# without attr(decrease)
class
TestSliceOp_starts_OneTensor_ends_OneTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
,
"StartsTensor"
:
np
.
array
(
self
.
starts
,
dtype
=
"int64"
),
"EndsTensor"
:
np
.
array
(
self
.
ends
,
dtype
=
"int32"
)
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
#'ends': self.ends_infer,
'infer_flags'
:
self
.
infer_flags
}
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
self
.
ends
=
[
3
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
infer_flags
=
[
-
1
,
-
1
,
-
1
]
self
.
out
=
self
.
input
[
1
:
3
,
0
:
3
,
2
:
4
,
:]
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# Situation 5: starts(tensor), ends(tensor)
# with attr(decrease)
class
TestSliceOp_decs_dim_starts_and_ends_OneTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
,
"StartsTensor"
:
np
.
array
(
self
.
starts
,
dtype
=
"int32"
),
"EndsTensor"
:
np
.
array
(
self
.
ends
,
dtype
=
"int32"
)
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
#'ends': self.ends,
'infer_flags'
:
self
.
infer_flags
,
'decrease_axis'
:
self
.
decrease_axis
,
}
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
self
.
ends
=
[
2
,
1
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
,
1
]
self
.
infer_flags
=
[
-
1
,
-
1
,
-
1
]
self
.
out
=
self
.
input
[
1
,
0
,
2
:
4
,
:]
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# Situation 6: starts(tensor), ends(list, have tensor)
# without attr(decrease)
class
TestSliceOp_starts_OneTensor_ends_ListTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
ends_tensor
=
[]
for
index
,
ele
in
enumerate
(
self
.
ends
):
ends_tensor
.
append
((
"y"
+
str
(
index
),
np
.
ones
(
(
1
)).
astype
(
'int32'
)
*
ele
))
self
.
inputs
=
{
'Input'
:
self
.
input
,
"StartsTensor"
:
np
.
array
(
self
.
starts
,
dtype
=
"int32"
),
'EndsTensorList'
:
ends_tensor
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
'ends'
:
self
.
ends_infer
,
'infer_flags'
:
self
.
infer_flags
}
def
config
(
self
):
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
self
.
ends
=
[
3
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
infer_flags
=
[
-
1
,
-
1
,
-
1
]
self
.
out
=
self
.
input
[
1
:
3
,
0
:
3
,
2
:
4
,
:]
self
.
ends_infer
=
[
-
1
,
3
,
4
]
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
# Test CUDA float16
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
(),
"core is not compiled with CUDA"
)
class
TestFP16
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
'starts'
:
self
.
starts
,
'ends'
:
self
.
ends
,
'infer_flags'
:
self
.
infer_flags
}
def
config
(
self
):
self
.
dtype
=
"float16"
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
self
.
dtype
)
self
.
starts
=
[
-
3
,
0
,
2
]
self
.
ends
=
[
3
,
100
,
-
1
]
self
.
axes
=
[
0
,
1
,
3
]
self
.
out
=
self
.
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
]
self
.
infer_flags
=
[
1
,
1
,
1
]
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
):
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
def
test_check_grad_normal
(
self
):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
):
self
.
check_grad_with_place
(
place
,
[
'Input'
],
'Out'
,
max_relative_error
=
0.006
)
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
(),
"core is not compiled with CUDA"
)
class
TestFP16_2
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
'starts'
:
self
.
starts
,
'ends'
:
self
.
ends
,
'infer_flags'
:
self
.
infer_flags
}
def
config
(
self
):
self
.
dtype
=
"float16"
self
.
input
=
np
.
random
.
random
([
3
,
4
,
10
]).
astype
(
self
.
dtype
)
self
.
starts
=
[
0
]
self
.
ends
=
[
1
]
self
.
axes
=
[
1
]
self
.
out
=
self
.
input
[:,
0
:
1
,
:]
self
.
infer_flags
=
[
1
]
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
):
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
def
test_check_grad_normal
(
self
):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
):
self
.
check_grad_with_place
(
place
,
[
'Input'
],
'Out'
,
max_relative_error
=
0.006
,
numeric_grad_delta
=
0.5
)
class
TestBF16
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
config
()
self
.
inputs
=
{
'Input'
:
convert_float_to_uint16
(
self
.
input
)}
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
self
.
out
)}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
'starts'
:
self
.
starts
,
'ends'
:
self
.
ends
,
'infer_flags'
:
self
.
infer_flags
}
def
config
(
self
):
self
.
dtype
=
np
.
uint16
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
np
.
float32
)
self
.
starts
=
[
-
3
,
0
,
2
]
self
.
ends
=
[
3
,
100
,
-
1
]
self
.
axes
=
[
0
,
1
,
3
]
self
.
out
=
self
.
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
]
self
.
infer_flags
=
[
1
,
1
,
1
]
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Input'
],
'Out'
)
# Test python API
class
TestSliceAPI
(
unittest
.
TestCase
):
def
test_1
(
self
):
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
minus_1
=
fluid
.
layers
.
fill_constant
([
1
],
"int32"
,
-
1
)
minus_3
=
fluid
.
layers
.
fill_constant
([
1
],
"int64"
,
-
3
)
starts
=
fluid
.
layers
.
data
(
name
=
'starts'
,
shape
=
[
1
,
3
],
append_batch_size
=
False
)
ends
=
fluid
.
layers
.
data
(
name
=
'ends'
,
shape
=
[
3
],
append_batch_size
=
False
)
x
=
fluid
.
layers
.
data
(
name
=
"x"
,
shape
=
[
3
,
4
,
5
,
6
],
append_batch_size
=
False
,
dtype
=
"float64"
)
# value_int64 is greater than 2147483647 which is the max of int32
value_int64
=
fluid
.
layers
.
fill_constant
([
1
],
"int64"
,
2147483648
)
out_1
=
fluid
.
layers
.
slice
(
x
,
axes
=
[
0
,
1
,
2
],
starts
=
[
-
3
,
0
,
2
],
ends
=
[
value_int64
,
100
,
-
1
])
out_2
=
fluid
.
layers
.
slice
(
x
,
axes
=
[
0
,
1
,
3
],
starts
=
[
minus_3
,
0
,
2
],
ends
=
[
3
,
100
,
-
1
])
out_3
=
fluid
.
layers
.
slice
(
x
,
axes
=
[
0
,
1
,
3
],
starts
=
[
minus_3
,
0
,
2
],
ends
=
[
3
,
100
,
minus_1
])
out_4
=
fluid
.
layers
.
slice
(
x
,
axes
=
[
0
,
1
,
2
],
starts
=
starts
,
ends
=
ends
)
out_5
=
x
[
-
3
:
3
,
0
:
100
,
2
:
-
1
]
out_6
=
x
[
minus_3
:
3
,
0
:
100
,
:,
2
:
-
1
]
out_7
=
x
[
minus_1
,
0
:
100
,
:,
2
:
minus_1
]
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res_1
,
res_2
,
res_3
,
res_4
,
res_5
,
res_6
,
res_7
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
input
,
'starts'
:
np
.
array
([
-
3
,
0
,
2
]).
astype
(
"int32"
),
'ends'
:
np
.
array
([
3
,
100
,
-
1
]).
astype
(
"int32"
)
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
,
out_7
])
assert
np
.
array_equal
(
res_1
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
assert
np
.
array_equal
(
res_2
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_3
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_4
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
assert
np
.
array_equal
(
res_5
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
assert
np
.
array_equal
(
res_6
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_7
,
input
[
-
1
,
0
:
100
,
:,
2
:
-
1
])
class
TestSliceApiWithTensor
(
unittest
.
TestCase
):
def
test_starts_ends_is_tensor
(
self
):
with
paddle
.
fluid
.
dygraph
.
guard
():
a
=
paddle
.
rand
(
shape
=
[
4
,
5
,
6
],
dtype
=
'float32'
)
axes
=
[
0
,
1
,
2
]
starts
=
[
-
3
,
0
,
2
]
ends
=
[
3
,
2
,
4
]
a_1
=
paddle
.
slice
(
a
,
axes
=
axes
,
starts
=
paddle
.
to_tensor
(
starts
,
dtype
=
'int32'
),
ends
=
paddle
.
to_tensor
(
ends
,
dtype
=
'int32'
))
a_2
=
paddle
.
slice
(
a
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
self
.
assertTrue
(
np
.
array_equal
(
a_1
.
numpy
(),
a_2
.
numpy
()))
def
test_bool_tensor
(
self
):
with
paddle
.
fluid
.
dygraph
.
guard
():
array
=
(
np
.
arange
(
60
).
reshape
([
3
,
4
,
5
])
%
3
).
astype
(
'bool'
)
tt
=
paddle
.
to_tensor
(
array
)
tt
.
stop_gradient
=
False
starts
=
[
0
,
1
,
2
]
ends
=
[
3
,
5
,
4
]
axes
=
[
0
,
1
,
2
]
y_paddle
=
paddle
.
slice
(
tt
,
axes
,
starts
,
ends
)
y_np
=
tt
[
0
:
3
,
1
:
5
,
2
:
4
]
self
.
assertTrue
(
paddle
.
bool
==
y_paddle
.
dtype
)
self
.
assertTrue
(
np
.
array_equal
(
y_paddle
.
numpy
(),
y_np
))
class
TestSliceApiWithLoDTensorArray
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
shape
=
(
3
,
4
)
self
.
data
=
np
.
random
.
random
(
size
=
self
.
shape
).
astype
(
'float32'
)
self
.
idx
=
0
self
.
start
=
0
self
.
end
=
2
self
.
axis
=
1
self
.
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
is_compiled_with_cuda
(
)
else
fluid
.
CPUPlace
()
self
.
exe
=
fluid
.
Executor
(
self
.
place
)
def
set_program_and_run
(
self
,
main_program
,
case_num
):
with
fluid
.
program_guard
(
main_program
):
x
=
[
fluid
.
data
(
name
=
'x0'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
fluid
.
data
(
name
=
'x1'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
fluid
.
data
(
name
=
'x2'
,
shape
=
self
.
shape
,
dtype
=
"float32"
)
]
for
each_x
in
x
:
each_x
.
stop_gradient
=
False
arr
=
layers
.
create_array
(
dtype
=
"float32"
)
for
i
in
range
(
3
):
idx
=
layers
.
array_length
(
arr
)
arr
=
layers
.
array_write
(
x
=
x
[
i
],
i
=
idx
,
array
=
arr
)
if
case_num
==
1
:
self
.
sliced_arr
=
output
=
arr
[
0
]
elif
case_num
==
2
:
end
=
fluid
.
layers
.
array_length
(
arr
)
-
1
# dtype of end is int64
self
.
sliced_arr
=
slice_arr
=
arr
[
self
.
start
:
end
]
output
,
_
=
fluid
.
layers
.
tensor_array_to_tensor
(
slice_arr
,
axis
=
self
.
axis
,
use_stack
=
True
)
elif
case_num
==
3
:
value_int64
=
fluid
.
layers
.
fill_constant
([
1
],
"int64"
,
2147483648
)
self
.
sliced_arr
=
slice_arr
=
arr
[
self
.
start
:
value_int64
]
output
,
_
=
fluid
.
layers
.
tensor_array_to_tensor
(
slice_arr
,
axis
=
self
.
axis
,
use_stack
=
True
)
loss
=
fluid
.
layers
.
reduce_sum
(
output
)
fluid
.
backward
.
append_backward
(
loss
)
g_vars
=
list
(
map
(
main_program
.
global_block
().
var
,
[
each_x
.
name
+
"@GRAD"
for
each_x
in
x
]))
self
.
out
,
self
.
g_x0
,
self
.
g_x1
,
self
.
g_x2
=
\
self
.
exe
.
run
(
main_program
,
feed
=
{
'x0'
:
self
.
data
,
'x1'
:
self
.
data
,
'x2'
:
self
.
data
},
fetch_list
=
[
output
]
+
g_vars
)
def
test_case_1
(
self
):
main_program
=
fluid
.
Program
()
self
.
set_program_and_run
(
main_program
,
1
)
self
.
assertTrue
(
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR
)
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
self
.
assertTrue
(
np
.
array_equal
(
self
.
out
,
self
.
data
))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x1
,
np
.
zeros_like
(
self
.
data
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x2
,
np
.
zeros_like
(
self
.
data
)))
def
test_case_2
(
self
):
main_program
=
fluid
.
Program
()
self
.
set_program_and_run
(
main_program
,
2
)
self
.
assertTrue
(
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
)
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
self
.
assertTrue
(
np
.
array_equal
(
self
.
out
,
np
.
stack
(
[
self
.
data
,
self
.
data
],
axis
=
self
.
axis
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x1
,
np
.
ones_like
(
self
.
data
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x2
,
np
.
zeros_like
(
self
.
data
)))
def
test_case_3
(
self
):
main_program
=
fluid
.
Program
()
self
.
set_program_and_run
(
main_program
,
3
)
self
.
assertTrue
(
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
)
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
self
.
assertTrue
(
np
.
array_equal
(
self
.
out
,
np
.
stack
(
[
self
.
data
,
self
.
data
,
self
.
data
],
axis
=
self
.
axis
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x1
,
np
.
ones_like
(
self
.
data
)))
self
.
assertTrue
(
np
.
array_equal
(
self
.
g_x2
,
np
.
ones_like
(
self
.
data
)))
class
TestImperativeVarBaseGetItem
(
unittest
.
TestCase
):
def
test_getitem_with_long
(
self
):
with
fluid
.
dygraph
.
guard
():
data
=
np
.
random
.
random
((
2
,
80
,
16128
)).
astype
(
'float32'
)
var
=
fluid
.
dygraph
.
to_variable
(
data
)
sliced
=
var
[:,
10
:,
:
var
.
shape
[
1
]]
# var.shape[1] is 80L here
self
.
assertEqual
(
sliced
.
shape
,
[
2
,
70
,
80
])
sliced
=
var
[:,
var
.
shape
[
0
]:,
var
.
shape
[
0
]:
var
.
shape
[
1
]]
self
.
assertEqual
(
sliced
.
shape
,
[
2
,
78
,
78
])
def
test_getitem_with_float
(
self
):
def
test_float_in_slice_item
():
with
fluid
.
dygraph
.
guard
():
data
=
np
.
random
.
random
((
2
,
80
,
16128
)).
astype
(
'float32'
)
var
=
fluid
.
dygraph
.
to_variable
(
data
)
sliced
=
var
[:,
1.1
:,
:
var
.
shape
[
1
]]
self
.
assertRaises
(
Exception
,
test_float_in_slice_item
)
def
test_float_in_index
():
with
fluid
.
dygraph
.
guard
():
data
=
np
.
random
.
random
((
2
,
80
,
16128
)).
astype
(
'float32'
)
var
=
fluid
.
dygraph
.
to_variable
(
data
)
sliced
=
var
[
1.1
]
self
.
assertRaises
(
Exception
,
test_float_in_index
)
class
TestInferShape
(
unittest
.
TestCase
):
def
test
(
self
):
x
=
paddle
.
ones
(
shape
=
[
3
,
4
,
5
])
x
.
desc
.
set_shape
([
3
,
-
1
,
5
])
self
.
assertEqual
(
x
.
shape
,
(
3
,
-
1
,
5
))
out0
=
paddle
.
slice
(
x
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
3
])
self
.
assertEqual
(
out0
.
shape
,
(
3
,
3
,
5
))
def
test_axis_less_than_zero
(
self
):
# Using paddle.disable_static will make other unittests fail.
with
fluid
.
dygraph
.
guard
():
x_arr
=
np
.
arange
(
0
,
24
,
dtype
=
np
.
float32
).
reshape
([
2
,
3
,
4
])
x
=
paddle
.
to_tensor
(
x_arr
)
pp_slice
=
paddle
.
slice
(
x
,
[
100
,
],
[
0
],
[
1
])
np_slice
=
x_arr
[:,
:,
0
:
1
]
self
.
assertTrue
(
np
.
array_equal
(
pp_slice
,
np_slice
))
pp_slice
=
paddle
.
slice
(
x
,
(
-
100
,
),
[
0
],
[
1
])
np_slice
=
x_arr
[
0
:
1
]
self
.
assertTrue
(
np
.
array_equal
(
pp_slice
,
np_slice
))
x_arr
=
np
.
array
([],
dtype
=
np
.
float32
)
x
=
paddle
.
to_tensor
(
np
.
reshape
(
x_arr
,
(
0
,
0
,
0
)))
starts
=
paddle
.
to_tensor
(
np
.
reshape
(
np
.
array
(
[],
dtype
=
np
.
int32
),
(
0
,
)))
ends
=
paddle
.
to_tensor
(
np
.
reshape
(
np
.
array
(
[],
dtype
=
np
.
int32
),
(
0
,
)))
with
self
.
assertRaises
(
ValueError
):
paddle
.
slice
(
x
,
[
-
1000000
],
starts
,
ends
)
with
self
.
assertRaises
(
ValueError
):
paddle
.
slice
(
x
,
[
1000000
],
starts
,
ends
)
with
self
.
assertRaises
(
ValueError
):
paddle
.
slice
(
x
,
[],
starts
,
ends
)
with
self
.
assertRaises
(
ValueError
):
paddle
.
slice
(
x
,
0
,
starts
,
ends
)
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
(),
"core is not compiled with CUDA"
)
class
TestImperativeCUDAPinnedInput
(
unittest
.
TestCase
):
def
test_input_cuda_pinned_var
(
self
):
with
fluid
.
dygraph
.
guard
():
data
=
np
.
random
.
random
((
2
,
80
,
16128
)).
astype
(
'float32'
)
var
=
core
.
VarBase
(
value
=
data
,
name
=
''
,
persistable
=
False
,
place
=
fluid
.
CUDAPinnedPlace
(),
zero_copy
=
False
)
sliced
=
var
[:,
10
:,
:
var
.
shape
[
1
]]
self
.
assertEqual
(
sliced
.
shape
,
[
2
,
70
,
80
])
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录