Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
ca41e552
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ca41e552
编写于
5月 15, 2020
作者:
L
lilong12
提交者:
GitHub
5月 15, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Cherry-pick 1.8] Improving error reporting messages for ops #24438 (#24534)
* improve error reporting messages
上级
56eead24
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
324 addition
and
144 deletion
+324
-144
paddle/fluid/operators/crop_op.cc
paddle/fluid/operators/crop_op.cc
+14
-10
paddle/fluid/operators/crop_op.h
paddle/fluid/operators/crop_op.h
+43
-12
paddle/fluid/operators/crop_tensor_op.cc
paddle/fluid/operators/crop_tensor_op.cc
+28
-23
paddle/fluid/operators/crop_tensor_op.h
paddle/fluid/operators/crop_tensor_op.h
+78
-32
paddle/fluid/operators/expand_op.cc
paddle/fluid/operators/expand_op.cc
+31
-17
paddle/fluid/operators/expand_op.h
paddle/fluid/operators/expand_op.h
+41
-19
paddle/fluid/operators/merge_selected_rows_op.cc
paddle/fluid/operators/merge_selected_rows_op.cc
+12
-10
paddle/fluid/operators/shard_index_op.cc
paddle/fluid/operators/shard_index_op.cc
+10
-6
paddle/fluid/operators/shard_index_op.cu
paddle/fluid/operators/shard_index_op.cu
+23
-4
paddle/fluid/operators/shard_index_op.h
paddle/fluid/operators/shard_index_op.h
+33
-7
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+3
-4
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+8
-0
未找到文件。
paddle/fluid/operators/crop_op.cc
浏览文件 @
ca41e552
...
...
@@ -27,16 +27,18 @@ class CropOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of CropOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of CropOp should not be null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"Crop"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"Crop"
);
auto
x_dim
=
ctx
->
GetInputDim
(
"X"
);
if
(
!
ctx
->
HasInput
(
"Y"
))
{
auto
shape
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape"
);
PADDLE_ENFORCE_EQ
(
int64_t
(
shape
.
size
()),
x_dim
.
size
(),
"Shape size should be equal to dimension size of input tensor."
);
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) of CropOp's "
"'shape' attribute should be equal to the number of dimensions "
"(%d) of the Input(X)."
,
shape
.
size
(),
x_dim
.
size
()));
std
::
vector
<
int64_t
>
tensor_shape
(
shape
.
size
());
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
tensor_shape
[
i
]
=
static_cast
<
int64_t
>
(
shape
[
i
]);
...
...
@@ -45,8 +47,10 @@ class CropOp : public framework::OperatorWithKernel {
}
else
{
auto
y_dim
=
ctx
->
GetInputDim
(
"Y"
);
PADDLE_ENFORCE_EQ
(
framework
::
arity
(
x_dim
),
framework
::
arity
(
y_dim
),
"Tensor rank of both CropOp's "
"inputs must be same."
);
platform
::
errors
::
InvalidArgument
(
"The number of dimensions (%d) of CropOp's input(X)"
" must be equal to that (%d) of input(Y)."
,
framework
::
arity
(
x_dim
),
framework
::
arity
(
y_dim
)));
ctx
->
SetOutputDim
(
"Out"
,
y_dim
);
}
}
...
...
@@ -163,9 +167,9 @@ class CropOpGrad : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) should not be null
"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
))
,
"Input(Out@GRAD) should not be null
"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"CropGrad
"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input"
,
framework
::
GradVarName
(
"Out"
),
"CropGrad
"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
x_grad_name
=
framework
::
GradVarName
(
"X"
);
if
(
ctx
->
HasOutput
(
x_grad_name
))
{
...
...
paddle/fluid/operators/crop_op.h
浏览文件 @
ca41e552
...
...
@@ -31,14 +31,23 @@ static std::vector<int> GetOffsets(const framework::ExecutionContext& ctx) {
std
::
vector
<
int
>
res
;
int
rank
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
();
if
(
ctx
.
HasInput
(
"Offsets"
))
{
PADDLE_ENFORCE
(
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"offsets"
).
empty
(),
"Input 'Offsets' and attribute 'offsets' should not be used "
"at the same time."
);
PADDLE_ENFORCE_EQ
(
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"offsets"
).
empty
(),
true
,
platform
::
errors
::
InvalidArgument
(
"Input 'Offsets' and attribute 'offsets' "
"should not be used at the same time for CropOp."
));
const
auto
*
offsets_tensor
=
ctx
.
Input
<
Tensor
>
(
"Offsets"
);
PADDLE_ENFORCE_EQ
(
offsets_tensor
->
dims
().
size
(),
1
);
PADDLE_ENFORCE_EQ
(
offsets_tensor
->
dims
().
size
(),
1
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of input 'Offsets' for "
"CropOp must be 1, but the value received is %d."
,
offsets_tensor
->
dims
().
size
()));
PADDLE_ENFORCE_EQ
(
rank
,
offsets_tensor
->
dims
()[
0
],
"Offsets size should be equal to dimension size of input tensor."
);
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) for "
"input 'Offsets' must be equal to "
"the number of dimensions (%d) "
"of the input tensor."
,
offsets_tensor
->
dims
()[
0
],
rank
));
const
int
*
offsets_data
;
framework
::
Tensor
cpu_tmp_tensor
;
if
(
platform
::
is_cpu_place
(
offsets_tensor
->
place
()))
{
...
...
@@ -53,7 +62,11 @@ static std::vector<int> GetOffsets(const framework::ExecutionContext& ctx) {
res
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"offsets"
);
PADDLE_ENFORCE_EQ
(
rank
,
static_cast
<
int
>
(
res
.
size
()),
"Offsets size should be equal to dimension size of input tensor."
);
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) for "
"input 'Offsets' must be equal to "
"the number of dimensions (%d) "
"of the input tensor."
,
res
.
size
(),
rank
));
}
return
res
;
}
...
...
@@ -92,6 +105,18 @@ class CropKernel : public framework::OpKernel<T> {
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
int
rank
=
context
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
();
PADDLE_ENFORCE_GE
(
rank
,
1
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the Input(X) for CropOp must be "
"greater than or equal to 1, but the value received is %d."
,
rank
));
PADDLE_ENFORCE_LE
(
rank
,
6
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the Input(X) for CropOp must be "
"less than or equal to 6, but the value received is %d."
,
rank
));
switch
(
rank
)
{
case
1
:
CropFunction
<
DeviceContext
,
T
,
1
>
(
context
);
...
...
@@ -111,9 +136,6 @@ class CropKernel : public framework::OpKernel<T> {
case
6
:
CropFunction
<
DeviceContext
,
T
,
6
>
(
context
);
break
;
default:
PADDLE_THROW
(
"CropOp only support tensors with no more than 6 dimensions."
);
}
}
};
...
...
@@ -145,6 +167,18 @@ class CropGradKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
size_t
rank
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
))
->
dims
().
size
();
PADDLE_ENFORCE_GE
(
rank
,
1
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input 'Out@GRAD' for "
"CropGrad must be greater than or equal "
"to 1, but the value received is %d."
,
rank
));
PADDLE_ENFORCE_LE
(
rank
,
6
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input 'Out@GRAD' for "
"CropGrad must be less than or equal "
"to 6, but the value received is %d."
,
rank
));
switch
(
rank
)
{
case
1
:
CropGradFunction
<
DeviceContext
,
T
,
1
>
(
context
);
...
...
@@ -164,9 +198,6 @@ class CropGradKernel : public framework::OpKernel<T> {
case
6
:
CropGradFunction
<
DeviceContext
,
T
,
6
>
(
context
);
break
;
default:
PADDLE_THROW
(
"CropOp only support tensors with no more than 6 dimensions."
);
}
}
};
...
...
paddle/fluid/operators/crop_tensor_op.cc
浏览文件 @
ca41e552
...
...
@@ -27,10 +27,8 @@ class CropTensorOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
"Input(X) of Op(crop_tensor) should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
"Output(Out) of Op(crop_tensor) should not be null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"CropTensor"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"CropTensor"
);
auto
x_dim
=
ctx
->
GetInputDim
(
"X"
);
auto
shape
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape"
);
auto
offsets
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"offsets"
);
...
...
@@ -39,9 +37,11 @@ class CropTensorOp : public framework::OperatorWithKernel {
auto
inputs_name
=
ctx
->
Inputs
(
"ShapeTensor"
);
PADDLE_ENFORCE_GT
(
inputs_name
.
size
(),
0
,
"Input(ShapeTensor)'size of Op(crop_tensor) can't be zero. "
"Please check the Attr(shape)'s size of "
"Op(fluid.layers.crop_tensor)."
);
platform
::
errors
::
InvalidArgument
(
"The number of elements of the input 'ShapeTensor' for "
"CropTensor must be greater than zero, "
"but the value received is %d."
,
inputs_name
.
size
()));
auto
out_dims
=
std
::
vector
<
int
>
(
inputs_name
.
size
(),
-
1
);
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
>
0
)
{
...
...
@@ -59,16 +59,18 @@ class CropTensorOp : public framework::OperatorWithKernel {
if
(
ctx
->
HasInput
(
"Shape"
))
{
auto
shape_dim
=
ctx
->
GetInputDim
(
"Shape"
);
PADDLE_ENFORCE_EQ
(
shape_dim
.
size
(),
1
,
"Input(Shape)'s dimension size of Op(crop_tensor) must be 1. "
"Please check the Attr(shape)'s dimension size of "
"Op(fluid.layers.crop_tensor)."
);
PADDLE_ENFORCE_EQ
(
shape_dim
.
size
(),
1
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input "
"'Shape' for CropTensor must be 1, "
"but the value received is %d."
,
shape_dim
.
size
()));
PADDLE_ENFORCE_EQ
(
shape_dim
[
0
],
x_dim
.
size
(),
"Input(Shape)'s size of Op(crop_tensor) must be equal "
"to dimension size of input tensor. "
"Please check the Attr(shape)'s size of "
"Op(fluid.layers.crop_tensor)."
);
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) of the input 'Shape' "
"for CropTensor must be equal to the number of"
" dimensions (%d) of the input."
,
shape_dim
[
0
],
x_dim
.
size
()));
if
(
ctx
->
IsRuntime
())
{
// If true, set the shape of Output(Out) according to Input(Shape) in
// CropTensorKernel with ExecutionContext. Also check LoD in
...
...
@@ -80,9 +82,13 @@ class CropTensorOp : public framework::OperatorWithKernel {
}
return
;
}
PADDLE_ENFORCE_EQ
(
int64_t
(
shape
.
size
()),
x_dim
.
size
(),
"Attr(shape)'size of Op(crop_tensor) should be equal to "
"dimension size of input tensor."
);
PADDLE_ENFORCE_EQ
(
int64_t
(
shape
.
size
()),
x_dim
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) of attribute 'shape' for "
"CropTensor must be equal to the number of "
"dimensions (%d) of the input."
,
shape
.
size
(),
x_dim
.
size
()));
std
::
vector
<
int64_t
>
out_shape
(
shape
.
size
(),
-
1
);
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
>
0
)
{
...
...
@@ -242,10 +248,9 @@ class CropTensorOpGrad : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
"Input(X) of Op(crop_tensor) should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
true
,
"Input(Out@GRAD) of Op(crop_tensor) should not be null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"CropTensorGrad"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input"
,
framework
::
GradVarName
(
"Out"
),
"CropTensorGrad"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
x_grad_name
=
framework
::
GradVarName
(
"X"
);
if
(
ctx
->
HasOutput
(
x_grad_name
))
{
...
...
paddle/fluid/operators/crop_tensor_op.h
浏览文件 @
ca41e552
...
...
@@ -35,7 +35,10 @@ inline std::vector<int> get_new_data(
auto
tensor
=
list_new_tensor
[
i
];
PADDLE_ENFORCE_EQ
(
tensor
->
dims
(),
framework
::
make_ddim
({
1
}),
"The tensor's shape in list of Op(crop_tensor) should be [1]."
);
platform
::
errors
::
InvalidArgument
(
"The tensor's shape in list of Op(crop_tensor) should be [1], "
"but the value received is %d."
,
tensor
->
dims
()));
if
(
platform
::
is_gpu_place
(
tensor
->
place
()))
{
framework
::
Tensor
temp
;
TensorCopySync
(
*
tensor
,
platform
::
CPUPlace
(),
&
temp
);
...
...
@@ -56,18 +59,23 @@ static framework::DDim ValidateShape(const std::vector<int> shape,
auto
shape_size
=
shape
.
size
();
PADDLE_ENFORCE_EQ
(
in_dim_size
,
shape_size
,
"Attr(shape)'s size of Op(crop_tensor) should be equal "
"to that of input Tensor. "
"Please check the Attr(shape)'s size of Op(fluid.layers.crop_tensor)."
);
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) for shape of Op(crop_tensor) should be "
"equal to the number of dimensions (%d) of the input tensor."
,
shape_size
,
in_dim_size
));
std
::
vector
<
int64_t
>
output_shape
(
shape
.
size
(),
0
);
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
<=
0
&&
in_dims
[
i
]
>
0
)
{
PADDLE_ENFORCE_NE
(
shape
[
i
],
0
,
"The element in Attr(shape) of Op(crop_tensor) should not be zero."
);
PADDLE_ENFORCE_EQ
(
shape
[
i
],
-
1
,
"When the element in Attr(shape) of Op(crop_tensor) is "
"negative, only -1 is supported."
);
PADDLE_ENFORCE_NE
(
shape
[
i
],
0
,
platform
::
errors
::
InvalidArgument
(
"The value (%d) of the %uth element for shape of "
"Op(crop_tensor) should not be zero."
,
shape
[
i
],
i
));
PADDLE_ENFORCE_EQ
(
shape
[
i
],
-
1
,
platform
::
errors
::
InvalidArgument
(
"When the value (%d) of the %uth "
"element for shape of Op(crop_tensor)"
" is negative, only -1 is supported."
,
shape
[
i
],
i
));
output_shape
[
i
]
=
in_dims
[
i
]
-
offsets
[
i
];
}
else
{
output_shape
[
i
]
=
static_cast
<
int64_t
>
(
shape
[
i
]);
...
...
@@ -83,9 +91,13 @@ static std::vector<int> GetShape(const framework::ExecutionContext& ctx) {
auto
list_new_shape_tensor
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"ShapeTensor"
);
if
(
list_new_shape_tensor
.
size
()
>
0
)
{
// have offsets tensor list
PADDLE_ENFORCE_EQ
(
list_new_shape_tensor
.
size
(),
rank
,
"Input(ShapeTensor)'s length of Op(crop_tensor) should "
"be equal to dimension size of input tensor."
);
PADDLE_ENFORCE_EQ
(
list_new_shape_tensor
.
size
(),
rank
,
platform
::
errors
::
InvalidArgument
(
"The number of tensors (%d) for the input ShapeTensor of "
"Op(crop_tensor) must be equal to the number of "
"dimensions (%d) of the input."
,
list_new_shape_tensor
.
size
(),
rank
));
res
=
get_new_data
(
list_new_shape_tensor
);
return
res
;
...
...
@@ -122,13 +134,21 @@ static std::vector<int> GetOffsets(const framework::ExecutionContext& ctx) {
if
(
ctx
.
HasInput
(
"Offsets"
))
{
PADDLE_ENFORCE_EQ
(
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"offsets"
).
empty
(),
true
,
"Input 'Offsets' and attribute 'offsets' should not be used "
"at the same time."
);
platform
::
errors
::
InvalidArgument
(
"Input 'Offsets' and attribute 'offsets' for Op(crop_tensor) "
"cannot be used at the same time."
));
const
auto
*
offsets_tensor
=
ctx
.
Input
<
Tensor
>
(
"Offsets"
);
PADDLE_ENFORCE_EQ
(
offsets_tensor
->
dims
().
size
(),
1
);
PADDLE_ENFORCE_EQ
(
rank
,
offsets_tensor
->
dims
()[
0
],
"Offsets size should be equal to dimension size of input tensor."
);
PADDLE_ENFORCE_EQ
(
offsets_tensor
->
dims
().
size
(),
1
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of input 'Offsets' must "
"be 1, but the value received is: %d."
,
offsets_tensor
->
dims
().
size
()));
PADDLE_ENFORCE_EQ
(
rank
,
offsets_tensor
->
dims
()[
0
],
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) for "
"input 'Offsets' must be equal to "
"the number of dimensions (%d) of the input tensor."
,
offsets_tensor
->
dims
()[
0
],
rank
));
const
int
*
offsets_data
;
framework
::
Tensor
cpu_tmp_tensor
;
if
(
platform
::
is_cpu_place
(
offsets_tensor
->
place
()))
{
...
...
@@ -143,7 +163,11 @@ static std::vector<int> GetOffsets(const framework::ExecutionContext& ctx) {
res
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"offsets"
);
PADDLE_ENFORCE_EQ
(
rank
,
static_cast
<
int
>
(
res
.
size
()),
"Offsets size should be equal to dimension size of input tensor."
);
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) for "
"input 'Offsets' must be equal to "
"the number of dimensions (%d) "
"of the input tensor."
,
static_cast
<
int
>
(
res
.
size
()),
rank
));
}
return
res
;
}
...
...
@@ -168,10 +192,13 @@ void CropTensorFunction(const framework::ExecutionContext& context) {
out_dims
=
ValidateShape
(
shape
,
offsets
,
x
->
dims
());
out
->
mutable_data
<
T
>
(
out_dims
,
context
.
GetPlace
());
for
(
size_t
i
=
0
;
i
<
offsets
.
size
();
++
i
)
{
PADDLE_ENFORCE_LE
(
offsets
[
i
]
+
shape
[
i
],
x_dims
[
i
],
"The sum of the Attr(offsets) and Attr(shape) of Op(crop_tensor) "
"should be less than or equal to corresponding input dimension size."
);
PADDLE_ENFORCE_LE
(
offsets
[
i
]
+
shape
[
i
],
x_dims
[
i
],
platform
::
errors
::
InvalidArgument
(
"The sum of the %uth elements of "
"offsets (%d) and shape (%d) of Op(crop_tensor) "
"should be less than or "
"equal to the size of %uth dimension of the input."
,
i
,
offsets
[
i
],
shape
[
i
],
i
));
}
auto
x_tensor
=
EigenTensor
<
T
,
D
>::
From
(
*
x
);
...
...
@@ -192,6 +219,19 @@ class CropTensorKernel : public framework::OpKernel<T> {
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
int
rank
=
context
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
();
PADDLE_ENFORCE_GE
(
rank
,
1
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input 'x' for "
"Op(crop_tensor) must be greater than or equal to 1, but the "
"value received is %d."
,
rank
));
PADDLE_ENFORCE_LE
(
rank
,
6
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input 'x' for "
"Op(crop_tensor) must be less than or equal to 6, but the "
"value received is %d."
,
rank
));
switch
(
rank
)
{
case
1
:
CropTensorFunction
<
DeviceContext
,
T
,
1
>
(
context
);
...
...
@@ -211,10 +251,6 @@ class CropTensorKernel : public framework::OpKernel<T> {
case
6
:
CropTensorFunction
<
DeviceContext
,
T
,
6
>
(
context
);
break
;
default:
PADDLE_THROW
(
"CropTensorOp only support tensors with no more than 6 "
"dimensions."
);
}
}
};
...
...
@@ -246,6 +282,20 @@ class CropTensorGradKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
size_t
rank
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
))
->
dims
().
size
();
PADDLE_ENFORCE_GE
(
rank
,
1
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input 'Out@GRAD' for "
"Op(crop_tensor_grad) must be greater than or equal to 1, but the "
"value received is %d."
,
rank
));
PADDLE_ENFORCE_LE
(
rank
,
6
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input 'Out@GRAD' for "
"Op(crop_tensor_grad) must be less than or equal to 6, but the "
"value received is %d."
,
rank
));
switch
(
rank
)
{
case
1
:
CropTensorGradFunction
<
DeviceContext
,
T
,
1
>
(
context
);
...
...
@@ -265,10 +315,6 @@ class CropTensorGradKernel : public framework::OpKernel<T> {
case
6
:
CropTensorGradFunction
<
DeviceContext
,
T
,
6
>
(
context
);
break
;
default:
PADDLE_THROW
(
"CropTensorOp only support tensors with no more than 6 "
"dimensions."
);
}
}
};
...
...
paddle/fluid/operators/expand_op.cc
浏览文件 @
ca41e552
...
...
@@ -28,9 +28,8 @@ class ExpandOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
"Input(X) should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
"Output(Out) should not be null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"Expand"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"Expand"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
expand_times
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"expand_times"
);
...
...
@@ -38,11 +37,19 @@ class ExpandOp : public framework::OperatorWithKernel {
expand_times
=
std
::
vector
<
int
>
(
x_dims
.
size
(),
-
1
);
}
PADDLE_ENFORCE_EQ
(
static_cast
<
size_t
>
(
x_dims
.
size
()),
expand_times
.
size
(),
"The number of Attr(expand_times)'s value must be equal "
"to the rank of Input(X)."
);
PADDLE_ENFORCE_LE
(
x_dims
.
size
(),
6
,
"The rank of Input(X) must not be greater than 6."
);
PADDLE_ENFORCE_EQ
(
static_cast
<
size_t
>
(
x_dims
.
size
()),
expand_times
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) of 'expand_times' for "
"Op(expand) must be equal to the number of dimensions "
"(%d) of the input."
,
expand_times
.
size
(),
static_cast
<
size_t
>
(
x_dims
.
size
())));
PADDLE_ENFORCE_LE
(
x_dims
.
size
(),
6
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input for Op(expand) "
"must not be greater than 6, but the value received is %d."
,
x_dims
.
size
()));
std
::
vector
<
int64_t
>
out_shape
(
x_dims
.
size
());
for
(
size_t
i
=
0
;
i
<
expand_times
.
size
();
++
i
)
{
...
...
@@ -51,7 +58,10 @@ class ExpandOp : public framework::OperatorWithKernel {
}
else
{
PADDLE_ENFORCE_GT
(
expand_times
[
i
],
0
,
"The element of Attr(expand_times) must greater than 0."
);
platform
::
errors
::
InvalidArgument
(
"The %uth element of 'expand_times' for Op(expand) must be "
"greater than 0, but the value given is %d."
,
i
,
expand_times
[
i
]));
out_shape
[
i
]
=
x_dims
[
i
]
*
expand_times
[
i
];
}
}
...
...
@@ -139,9 +149,9 @@ class ExpandGradOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
"Input(X) should not be null.
"
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
true
,
"Input(Out@GRAD) should not be null.
"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"ExpandGrad
"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input"
,
framework
::
GradVarName
(
"Out"
),
"ExpandGrad
"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
std
::
vector
<
int
>
expand_times
=
...
...
@@ -153,8 +163,10 @@ class ExpandGradOp : public framework::OperatorWithKernel {
if
(
!
ctx
->
IsRuntime
()
&&
x_dims
[
0
]
<
0
)
{
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
out_dims
[
0
],
"The first dimension size of Input(Out@GRAD) should be "
"equal to the crroresponding dimension size of Input(X)"
);
platform
::
errors
::
InvalidArgument
(
"The first dimension size (%d) of Input(Out@GRAD) should be "
"equal to the crroresponding dimension size (%d) of Input(X)"
,
out_dims
[
0
],
x_dims
[
0
]));
start_pos
=
1u
;
}
...
...
@@ -165,9 +177,11 @@ class ExpandGradOp : public framework::OperatorWithKernel {
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
x_dims
[
i
]
*
expand_times
[
i
],
out_dims
[
i
],
"Each dimension size of Input(Out@GRAD) should be "
"equal to multiplication of crroresponding dimension "
"size of Input(X) and Attr(expand_times) value."
);
platform
::
errors
::
InvalidArgument
(
"The %uth dimension size (%d) of Input(Out@GRAD) should be "
"equal to the multiplication of the crroresponding dimension "
"sizes of Input(X) (%d) and expand_times (%d)."
,
i
,
out_dims
[
i
],
x_dims
[
i
],
expand_times
[
i
]));
}
}
}
...
...
paddle/fluid/operators/expand_op.h
浏览文件 @
ca41e552
...
...
@@ -96,12 +96,19 @@ class ExpandKernel : public framework::OpKernel<T> {
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
rank
=
context
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
();
switch
(
rank
)
{
REP_EXPAND_TEMPLATE
(
MAX_RANK_SUPPORTED
)
default:
PADDLE_ENFORCE
(
false
,
"Only support tensor with rank being between 1 and 6."
);
}
PADDLE_ENFORCE_GE
(
rank
,
1
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input 'x' for Op(expand) "
"must be greater than or equal to 1, but the value received is %d."
,
rank
));
PADDLE_ENFORCE_LE
(
rank
,
MAX_RANK_SUPPORTED
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input 'x' for Op(expand) "
"must be less than or equal to %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
rank
));
switch
(
rank
)
{
REP_EXPAND_TEMPLATE
(
MAX_RANK_SUPPORTED
)
}
}
protected:
...
...
@@ -111,9 +118,13 @@ class ExpandKernel : public framework::OpKernel<T> {
auto
in_dims
=
in0
->
dims
();
auto
expand_times
=
get_expand_times
(
context
);
PADDLE_ENFORCE_EQ
(
static_cast
<
size_t
>
(
in_dims
.
size
()),
expand_times
.
size
(),
"The number of Attr(expand_times)'s value must be equal "
"to the rank of Input(X)."
);
PADDLE_ENFORCE_EQ
(
static_cast
<
size_t
>
(
in_dims
.
size
()),
expand_times
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The number of elements (%d) of 'expand_times' for "
"Op(expand) must be equal to the number "
"of dimensions (%d) of the input."
,
expand_times
.
size
(),
static_cast
<
size_t
>
(
in_dims
.
size
())));
auto
*
out0
=
context
.
Output
<
Tensor
>
(
"Out"
);
Eigen
::
DSizes
<
int
,
Rank
>
bcast_dims
;
for
(
size_t
i
=
0
;
i
<
expand_times
.
size
();
++
i
)
{
...
...
@@ -172,12 +183,19 @@ class ExpandGradKernel : public framework::OpKernel<T> {
framework
::
TensorCopy
(
*
in0
,
context
.
GetPlace
(),
context
.
device_context
(),
out0
);
}
else
{
switch
(
dims
)
{
REP_EXPAND_GRAD_TEMPLATE
(
MAX_RANK_SUPPORTED
)
default:
PADDLE_ENFORCE
(
false
,
"Only support tensor with rank being between 1 and 6."
);
}
PADDLE_ENFORCE_GE
(
dims
,
1
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input "
"'Out@GRAD' for Op(expand_grad)"
" must be greater than or equal to 1, but "
"the value received is %d."
,
dims
));
PADDLE_ENFORCE_LE
(
dims
,
MAX_RANK_SUPPORTED
,
platform
::
errors
::
InvalidArgument
(
"The number of dimensions of the input 'Out@GRAD' "
"for Op(expand_grad) must be less than or equal "
"to %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
dims
));
switch
(
dims
)
{
REP_EXPAND_GRAD_TEMPLATE
(
MAX_RANK_SUPPORTED
)
}
}
}
...
...
@@ -189,11 +207,15 @@ class ExpandGradKernel : public framework::OpKernel<T> {
size_t
reshape_size
=
reshape_dims_vec
.
size
();
size_t
reduce_size
=
reduce_dims_vec
.
size
();
PADDLE_ENFORCE_EQ
(
reshape_size
,
reshape_dims_vec
.
size
(),
"Inconsistent size between template Dims and "
"reshape dimensions."
);
platform
::
errors
::
InvalidArgument
(
"Inconsistent size between template Dims (%d) and "
"reshape dimensions (%d)."
,
reshape_size
,
reshape_dims_vec
.
size
()));
PADDLE_ENFORCE_EQ
(
reduce_size
,
reduce_dims_vec
.
size
(),
"Inconsistent size between template Dims and "
"reduce dimensions."
);
platform
::
errors
::
InvalidArgument
(
"Inconsistent size between template Dims (%d) and "
"reduce dimensions (%d)."
,
reduce_size
,
reduce_dims_vec
.
size
()));
auto
*
in0
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
out0
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
out0
->
mutable_data
<
T
>
(
context
.
GetPlace
());
...
...
paddle/fluid/operators/merge_selected_rows_op.cc
浏览文件 @
ca41e552
...
...
@@ -23,16 +23,18 @@ class MergeSelectedRowsOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of MergeSelectedRowsOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of MergeSelectedRowsOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputsVarType
(
"X"
).
front
(),
framework
::
proto
::
VarType
::
SELECTED_ROWS
,
"Input X only should be SelectedRows."
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetOutputsVarType
(
"Out"
).
front
(),
framework
::
proto
::
VarType
::
SELECTED_ROWS
,
"Output Y only should be SelectedRows."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"MergeSelectedRows"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"MergeSelectedRows"
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputsVarType
(
"X"
).
front
(),
framework
::
proto
::
VarType
::
SELECTED_ROWS
,
platform
::
errors
::
InvalidArgument
(
"Input(X) of MergeSelectedRowsOp "
"should be of type SelectedRows."
));
PADDLE_ENFORCE_EQ
(
ctx
->
GetOutputsVarType
(
"Out"
).
front
(),
framework
::
proto
::
VarType
::
SELECTED_ROWS
,
platform
::
errors
::
InvalidArgument
(
"Output(Out) of MergeSelectedRowsOp "
"should be of type SelectedRows."
));
ctx
->
ShareDim
(
"X"
,
/*->*/
"Out"
);
}
...
...
paddle/fluid/operators/shard_index_op.cc
浏览文件 @
ca41e552
...
...
@@ -21,17 +21,21 @@ class ShardIndexOp : public framework::OperatorWithKernel {
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of ShardIndexOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of ShardIndexOp should not be null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"ShardIndex"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"ShardIndex"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
"Rank of Input(X) should be at least 2."
);
platform
::
errors
::
InvalidArgument
(
"Rank of Input(X) should be at least 2, "
"but the value given is %d."
,
x_dims
.
size
()));
if
(
ctx
->
IsRuntime
()
||
x_dims
[
x_dims
.
size
()
-
1
]
>
0
)
{
PADDLE_ENFORCE_GE
(
x_dims
[
x_dims
.
size
()
-
1
],
1U
,
"Last dimension of Input(X) should be 1."
);
platform
::
errors
::
InvalidArgument
(
"The last dimension of Input(X) should be 1, "
"but the value given is %d."
,
x_dims
[
x_dims
.
size
()
-
1
]));
}
ctx
->
SetOutputDim
(
"Out"
,
x_dims
);
...
...
paddle/fluid/operators/shard_index_op.cu
浏览文件 @
ca41e552
...
...
@@ -50,10 +50,29 @@ class ShardIndexCUDAKernel : public framework::OpKernel<T> {
int
nshards
=
context
.
Attr
<
int
>
(
"nshards"
);
int
shard_id
=
context
.
Attr
<
int
>
(
"shard_id"
);
int
ignore_value
=
context
.
Attr
<
int
>
(
"ignore_value"
);
PADDLE_ENFORCE_GT
(
index_num
,
0
);
PADDLE_ENFORCE_GT
(
nshards
,
0
);
PADDLE_ENFORCE
(
shard_id
>=
0
&&
shard_id
<
nshards
,
"shard_id(%d) is not in range [0, %d)"
,
shard_id
,
nshards
);
PADDLE_ENFORCE_GT
(
index_num
,
0
,
platform
::
errors
::
InvalidArgument
(
"The value 'index_num' for Op(shard_index) must be greater than 0, "
"but the value given is %d."
,
index_num
));
PADDLE_ENFORCE_GT
(
nshards
,
0
,
platform
::
errors
::
InvalidArgument
(
"The value 'nshard' for Op(shard_index) must be "
"greater than 0, but the value given is %d."
,
nshards
));
PADDLE_ENFORCE_GE
(
shard_id
,
0
,
platform
::
errors
::
InvalidArgument
(
"The value 'shard_id' for Op(shard_index) must be greater or "
"equal to 0, but the value given is %d."
,
shard_id
));
PADDLE_ENFORCE_LT
(
shard_id
,
nshards
,
platform
::
errors
::
InvalidArgument
(
"The value 'shard_id' for Op(shard_index) must be less than "
"nshards (%d), but the value given is %d."
,
nshards
,
shard_id
));
out
->
Resize
(
in
->
dims
());
out
->
set_lod
(
in
->
lod
());
...
...
paddle/fluid/operators/shard_index_op.h
浏览文件 @
ca41e552
...
...
@@ -29,10 +29,29 @@ class ShardIndexCPUKernel : public framework::OpKernel<T> {
int
nshards
=
context
.
Attr
<
int
>
(
"nshards"
);
int
shard_id
=
context
.
Attr
<
int
>
(
"shard_id"
);
int
ignore_value
=
context
.
Attr
<
int
>
(
"ignore_value"
);
PADDLE_ENFORCE_GT
(
index_num
,
0
);
PADDLE_ENFORCE_GT
(
nshards
,
0
);
PADDLE_ENFORCE
(
shard_id
>=
0
&&
shard_id
<
nshards
,
"shard_id(%d) is not in range [0, %d)"
,
shard_id
,
nshards
);
PADDLE_ENFORCE_GT
(
index_num
,
0
,
platform
::
errors
::
InvalidArgument
(
"The value 'index_num' for Op(shard_index) must be greater than 0, "
"but the value given is %d."
,
index_num
));
PADDLE_ENFORCE_GT
(
nshards
,
0
,
platform
::
errors
::
InvalidArgument
(
"The value 'nshard' for Op(shard_index) must be "
"greater than 0, but the value given is %d."
,
nshards
));
PADDLE_ENFORCE_GE
(
shard_id
,
0
,
platform
::
errors
::
InvalidArgument
(
"The value 'shard_id' for Op(shard_index) must be greater or "
"equal to 0, but the value given is %d."
,
shard_id
));
PADDLE_ENFORCE_LT
(
shard_id
,
nshards
,
platform
::
errors
::
InvalidArgument
(
"The value 'shard_id' for Op(shard_index) must be less than "
"nshards (%d), but the value given is %d."
,
nshards
,
shard_id
));
int
shard_size
=
(
index_num
+
nshards
-
1
)
/
nshards
;
...
...
@@ -42,9 +61,16 @@ class ShardIndexCPUKernel : public framework::OpKernel<T> {
auto
*
out_data
=
out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
int64_t
numel
=
in
->
numel
();
for
(
int64_t
i
=
0
;
i
<
numel
;
++
i
)
{
PADDLE_ENFORCE
(
in_data
[
i
]
>=
0
&&
in_data
[
i
]
<
index_num
,
"Input index(%d) is out of range [0,%d)"
,
in_data
[
i
],
index_num
);
PADDLE_ENFORCE_GE
(
in_data
[
i
],
0
,
platform
::
errors
::
InvalidArgument
(
"The input_index for Op(shard_index) must be "
"greater or equal to 0, but the value given is %d."
,
in_data
[
i
]));
PADDLE_ENFORCE_LT
(
in_data
[
i
],
index_num
,
platform
::
errors
::
InvalidArgument
(
"The input_index for Op(shard_index) must be less "
"than index_num (%d), but the value given is %d."
,
index_num
,
in_data
[
i
]));
if
(
in_data
[
i
]
/
shard_size
==
shard_id
)
{
out_data
[
i
]
=
in_data
[
i
]
%
shard_size
;
}
else
{
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
ca41e552
...
...
@@ -9913,12 +9913,10 @@ def crop(x, shape=None, offsets=None, name=None):
crop = fluid.layers.crop(z, shape=[2, 2, 3])
"""
check_variable_and_dtype(x, 'x', ['float32'], 'crop')
check_type(shape, 'shape', (list, tuple, Variable), 'crop')
helper = LayerHelper('crop', **locals())
if not (isinstance(shape, list) or isinstance(shape, tuple) or \
isinstance(shape, Variable)):
raise ValueError("The shape should be a list, tuple or Variable.")
if offsets is None:
offsets = [0] * len(x.shape)
...
...
@@ -15580,6 +15578,7 @@ def shard_index(input, index_num, nshards, shard_id, ignore_value=-1):
nshards=2,
shard_id=0)
"""
check_variable_and_dtype(input, 'input', ['int64'], 'shard_index')
op_type = 'shard_index'
helper = LayerHelper(op_type, **locals())
if shard_id < 0 or shard_id >= nshards:
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
ca41e552
...
...
@@ -1840,6 +1840,14 @@ class TestLayer(LayerTest):
self
.
assertIsNotNone
(
out2
)
self
.
assertIsNotNone
(
out3
)
def
test_shard_index
(
self
):
with
self
.
static_graph
():
x
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
4
,
1
],
dtype
=
'int64'
)
shard_label
=
fluid
.
layers
.
shard_index
(
input
=
x
,
index_num
=
20
,
nshards
=
2
,
shard_id
=
0
)
self
.
assertIsNotNone
(
shard_label
)
def
test_accuracy
(
self
):
x
=
np
.
random
.
rand
(
3
,
32
,
32
).
astype
(
"float32"
)
y
=
np
.
array
([[
1
],
[
0
],
[
1
]])
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录