Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ba5fa2c2
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ba5fa2c2
编写于
4月 15, 2020
作者:
B
Bai Yifan
提交者:
GitHub
4月 15, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
enhance some error message, test=release/2.0 (#23840)
上级
d4b4fa04
变更
15
显示空白变更内容
内联
并排
Showing
15 changed file
with
404 addition
and
134 deletion
+404
-134
paddle/fluid/operators/deformable_conv_op.cc
paddle/fluid/operators/deformable_conv_op.cc
+96
-48
paddle/fluid/operators/deformable_conv_v1_op.cc
paddle/fluid/operators/deformable_conv_v1_op.cc
+71
-45
paddle/fluid/operators/detection/polygon_box_transform_op.cc
paddle/fluid/operators/detection/polygon_box_transform_op.cc
+16
-10
paddle/fluid/operators/detection/polygon_box_transform_op.cu
paddle/fluid/operators/detection/polygon_box_transform_op.cu
+3
-2
paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc
paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc
+73
-29
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+2
-0
python/paddle/fluid/layers/learning_rate_scheduler.py
python/paddle/fluid/layers/learning_rate_scheduler.py
+3
-0
python/paddle/fluid/layers/loss.py
python/paddle/fluid/layers/loss.py
+11
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+6
-0
python/paddle/fluid/tests/unittests/test_deformable_conv_op.py
...n/paddle/fluid/tests/unittests/test_deformable_conv_op.py
+27
-0
python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py
...addle/fluid/tests/unittests/test_deformable_conv_v1_op.py
+33
-0
python/paddle/fluid/tests/unittests/test_mse_loss.py
python/paddle/fluid/tests/unittests/test_mse_loss.py
+17
-0
python/paddle/fluid/tests/unittests/test_polygon_box_transform.py
...addle/fluid/tests/unittests/test_polygon_box_transform.py
+11
-0
python/paddle/fluid/tests/unittests/test_square_error_cost.py
...on/paddle/fluid/tests/unittests/test_square_error_cost.py
+17
-0
python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py
...d/tests/unittests/test_teacher_student_sigmoid_loss_op.py
+18
-0
未找到文件。
paddle/fluid/operators/deformable_conv_op.cc
浏览文件 @
ba5fa2c2
...
...
@@ -109,21 +109,14 @@ class DeformableConvOp : public framework::OperatorWithKernel {
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Input"
),
"Input(Input) of DeformableConvOp "
"should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Offset"
),
"Input(Offset) of DeformableConvOp "
"should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Mask"
),
"Input(Mask) of DeformableConvOp "
"should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Filter"
),
"Input(Filter) of DeformableConvOp "
"should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Output"
),
"Output(Output) of DeformableConvOp "
"should not be null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Input"
),
"Input"
,
"Input"
,
"deformable_conv"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Offset"
),
"Input"
,
"Offset"
,
"deformable_conv)"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Mask"
),
"Input"
,
"Mask"
,
"deformable_conv"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Filter"
),
"Input"
,
"Filter"
,
"deformable_conv"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Output"
),
"Output"
,
"Output"
,
"deformable_conv"
);
auto
in_dims
=
ctx
->
GetInputDim
(
"Input"
);
auto
filter_dims
=
ctx
->
GetInputDim
(
"Filter"
);
...
...
@@ -138,39 +131,62 @@ class DeformableConvOp : public framework::OperatorWithKernel {
int
deformable_groups
=
ctx
->
Attrs
().
Get
<
int
>
(
"deformable_groups"
);
int
im2col_step
=
ctx
->
Attrs
().
Get
<
int
>
(
"im2col_step"
);
PADDLE_ENFORCE
(
in_dims
.
size
()
==
4
,
"Conv input should be 4-D tensor, get %u"
,
in_dims
.
size
());
PADDLE_ENFORCE_EQ
(
in_dims
.
size
(),
filter_dims
.
size
(),
"Conv input dimension and filter dimension should be the same."
);
PADDLE_ENFORCE_EQ
(
in_dims
.
size
()
-
strides
.
size
(),
2U
,
"Conv input dimension and strides dimension should be consistent."
);
in_dims
.
size
(),
4
,
platform
::
errors
::
InvalidArgument
(
"Conv input should be 4-D tensor, get %u"
,
in_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
in_dims
.
size
(),
filter_dims
.
size
(),
platform
::
errors
::
InvalidArgument
(
"Conv input dimension and filter dimension should be "
"the same. The difference is [%d]: [%d]"
,
in_dims
.
size
(),
filter_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
in_dims
.
size
()
-
strides
.
size
(),
2U
,
platform
::
errors
::
InvalidArgument
(
"Conv input dimension and strides "
"dimension should be consistent. But received input "
"dimension:[%d], strides dimension:[%d]"
,
in_dims
.
size
(),
strides
.
size
()));
PADDLE_ENFORCE_EQ
(
paddings
.
size
(),
strides
.
size
(),
platform
::
errors
::
InvalidArgument
(
"Conv paddings dimension and Conv strides dimension "
"should be the same."
);
"should be the same. The difference is [%d]: [%d]"
,
paddings
.
size
(),
strides
.
size
()));
PADDLE_ENFORCE_EQ
(
in_dims
[
1
],
filter_dims
[
1
]
*
groups
,
PADDLE_ENFORCE_EQ
(
in_dims
[
1
],
filter_dims
[
1
]
*
groups
,
platform
::
errors
::
InvalidArgument
(
"The number of input channels should be equal to filter "
"channels * groups."
);
"channels * groups. The difference is [%d]: [%d]"
,
in_dims
[
1
],
filter_dims
[
1
]
*
groups
));
PADDLE_ENFORCE_EQ
(
filter_dims
[
0
]
%
groups
,
0
,
"The number of output channels should be divided by groups."
);
PADDLE_ENFORCE_EQ
(
filter_dims
[
0
]
%
deformable_groups
,
0
,
platform
::
errors
::
InvalidArgument
(
"The number of output channels should be divided by groups. But "
"received output channels:[%d], groups:[%d]"
,
filter_dims
[
0
],
groups
));
PADDLE_ENFORCE_EQ
(
filter_dims
[
0
]
%
deformable_groups
,
0
,
platform
::
errors
::
InvalidArgument
(
"The number of output channels should be "
"divided by deformable groups."
);
"divided by deformable groups. The difference is [%d]: [%d]"
,
filter_dims
[
0
]
%
groups
,
0
));
if
(
in_dims
[
0
]
>
im2col_step
)
{
PADDLE_ENFORCE_EQ
(
in_dims
[
0
]
%
im2col_step
,
0U
,
"Input batchsize must be smaller than or divide im2col_step"
);
platform
::
errors
::
InvalidArgument
(
"Input batchsize must be smaller than or divide im2col_step. But "
"received Input batchsize:[%d], im2col_step:[%d]"
,
in_dims
[
0
],
im2col_step
));
}
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
PADDLE_ENFORCE_GT
(
strides
[
i
],
0U
,
"stride %d size incorrect"
,
i
);
PADDLE_ENFORCE_GT
(
strides
[
i
],
0U
,
platform
::
errors
::
InvalidArgument
(
"stride %d size incorrect"
,
i
));
}
for
(
size_t
i
=
0
;
i
<
dilations
.
size
();
++
i
)
{
PADDLE_ENFORCE_GT
(
dilations
[
i
],
0U
,
"dilation %d size incorrect"
,
i
);
PADDLE_ENFORCE_GT
(
dilations
[
i
],
0U
,
platform
::
errors
::
InvalidArgument
(
"dilation %d size incorrect"
,
i
));
}
std
::
vector
<
int64_t
>
output_shape
({
in_dims
[
0
],
filter_dims
[
0
]});
...
...
@@ -185,29 +201,61 @@ class DeformableConvOp : public framework::OperatorWithKernel {
}
}
PADDLE_ENFORCE_EQ
(
output_shape
[
1
]
%
deformable_groups
,
0U
,
"output num_filter must divide deformable group size."
);
PADDLE_ENFORCE_EQ
(
output_shape
[
1
]
%
deformable_groups
,
0U
,
platform
::
errors
::
InvalidArgument
(
"output num_filter must divide deformable group size. But received "
"output num_filter:[%d], deformable group size:[%d]"
,
output_shape
[
1
],
deformable_groups
));
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
output_shape
[
2
],
offset_dims
[
2
],
"output height must equal to offset map height."
);
platform
::
errors
::
InvalidArgument
(
"output height must equal to offset map height. "
"The difference is [%d]: [%d]"
,
output_shape
[
2
],
offset_dims
[
2
]));
PADDLE_ENFORCE_EQ
(
output_shape
[
3
],
offset_dims
[
3
],
"output width must equal to offset map width."
);
platform
::
errors
::
InvalidArgument
(
"output width must equal to offset map width. The "
"difference is [%d]: [%d]"
,
output_shape
[
3
],
offset_dims
[
3
]));
PADDLE_ENFORCE_EQ
(
offset_dims
[
1
]
%
(
filter_dims
[
2
]
*
filter_dims
[
3
]),
0U
,
"offset filter must divide deformable group size."
);
PADDLE_ENFORCE_EQ
(
offset_dims
[
1
]
/
(
2
*
filter_dims
[
2
]
*
filter_dims
[
3
]),
platform
::
errors
::
InvalidArgument
(
"offset filter must divide deformable group size. "
"But received [%d]: [%d]"
,
offset_dims
[
1
],
filter_dims
[
2
]
*
filter_dims
[
3
]));
PADDLE_ENFORCE_EQ
(
offset_dims
[
1
]
/
(
2
*
filter_dims
[
2
]
*
filter_dims
[
3
]),
deformable_groups
,
"offset filter must divide deformable group size."
);
platform
::
errors
::
InvalidArgument
(
"offset filter must divide deformable group size. But received "
"[%d]: [%d]"
,
offset_dims
[
1
]
/
(
2
*
filter_dims
[
2
]
*
filter_dims
[
3
]),
deformable_groups
));
PADDLE_ENFORCE_EQ
(
output_shape
[
2
],
mask_dims
[
2
],
"output height must equal to mask map height."
);
platform
::
errors
::
InvalidArgument
(
"output height must equal to mask map height. The "
"difference is [%d] vs [%d]"
,
output_shape
[
2
],
mask_dims
[
2
]));
PADDLE_ENFORCE_EQ
(
output_shape
[
3
],
mask_dims
[
3
],
"output width must equal to mask map width."
);
platform
::
errors
::
InvalidArgument
(
"output width must equal to mask map width. The "
"difference is [%d] vs [%d]"
,
output_shape
[
3
],
mask_dims
[
3
]));
PADDLE_ENFORCE_EQ
(
mask_dims
[
1
]
%
(
filter_dims
[
2
]
*
filter_dims
[
3
]),
0U
,
"mask filter must divide deformable group size."
);
platform
::
errors
::
InvalidArgument
(
"mask filter must divide deformable group size. "
"But received [%d]: [%d]"
,
mask_dims
[
1
],
filter_dims
[
2
]
*
filter_dims
[
3
]));
PADDLE_ENFORCE_EQ
(
mask_dims
[
1
]
/
(
filter_dims
[
2
]
*
filter_dims
[
3
]),
deformable_groups
,
"mask filter must divide deformable group size."
);
platform
::
errors
::
InvalidArgument
(
"mask filter must divide deformable group size. "
"But received [%d]: [%d]"
,
mask_dims
[
1
]
/
(
filter_dims
[
2
]
*
filter_dims
[
3
]),
deformable_groups
));
}
ctx
->
SetOutputDim
(
"Output"
,
framework
::
make_ddim
(
output_shape
));
...
...
@@ -255,8 +303,8 @@ class DeformableConvGradOp : public framework::OperatorWithKernel {
auto
offset_dims
=
ctx
->
GetInputDim
(
"Offset"
);
auto
mask_dims
=
ctx
->
GetInputDim
(
"Mask"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Output"
))
,
"
the gradient of output(Out) must not be null
"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Output"
)),
"Input"
,
"
Output@Grad"
,
"deformable_conv_grad
"
);
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"Input"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"Input"
),
in_dims
);
}
...
...
paddle/fluid/operators/deformable_conv_v1_op.cc
浏览文件 @
ba5fa2c2
...
...
@@ -114,18 +114,14 @@ class DeformableConvV1Op : public framework::OperatorWithKernel {
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Input"
),
true
,
"Input(Input) of DeformableConvOp "
"should not be null"
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Offset"
),
true
,
"Input(Offset) of DeformableConvOp "
"should not be null"
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Filter"
),
true
,
"Input(Filter) of DeformableConvOp "
"should not be null"
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Output"
),
true
,
"Output(Output) of DeformableConvOp "
"should not be null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Input"
),
"Input"
,
"Input"
,
"deformable_conv_v1"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Offset"
),
"Input"
,
"Offset"
,
"deformable_conv_v1"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Filter"
),
"Input"
,
"Filter"
,
"deformable_conv_v1"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Output"
),
"Output"
,
"Output"
,
"deformable_conv_v1"
);
auto
in_dims
=
ctx
->
GetInputDim
(
"Input"
);
auto
filter_dims
=
ctx
->
GetInputDim
(
"Filter"
);
...
...
@@ -139,40 +135,61 @@ class DeformableConvV1Op : public framework::OperatorWithKernel {
int
deformable_groups
=
ctx
->
Attrs
().
Get
<
int
>
(
"deformable_groups"
);
int
im2col_step
=
ctx
->
Attrs
().
Get
<
int
>
(
"im2col_step"
);
PADDLE_ENFORCE_EQ
(
in_dims
.
size
(),
4
,
"Conv input should be 4-D tensor, get %u"
,
in_dims
.
size
());
PADDLE_ENFORCE_EQ
(
in_dims
.
size
(),
filter_dims
.
size
(),
"Conv input dimension and filter dimension should be the same."
);
in_dims
.
size
(),
4
,
platform
::
errors
::
InvalidArgument
(
"Conv input should be 4-D tensor, get %u"
,
in_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
in_dims
.
size
(),
filter_dims
.
size
(),
platform
::
errors
::
InvalidArgument
(
"Conv input dimension and filter dimension should be "
"the same. the difference is [%d] vs [%d]"
,
in_dims
.
size
(),
filter_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
in_dims
.
size
()
-
strides
.
size
(),
2U
,
"Conv input dimension and strides dimension should be consistent."
);
platform
::
errors
::
InvalidArgument
(
"Conv input dimension and strides "
"dimension should be consistent., But received [%d]: [%d]"
,
in_dims
.
size
(),
strides
.
size
()));
PADDLE_ENFORCE_EQ
(
paddings
.
size
(),
strides
.
size
(),
platform
::
errors
::
InvalidArgument
(
"Conv paddings dimension and Conv strides dimension "
"should be the same."
);
"should be the same. The difference is [%d] vs [%d]"
,
paddings
.
size
(),
strides
.
size
()));
PADDLE_ENFORCE_EQ
(
in_dims
[
1
],
filter_dims
[
1
]
*
groups
,
PADDLE_ENFORCE_EQ
(
in_dims
[
1
],
filter_dims
[
1
]
*
groups
,
platform
::
errors
::
InvalidArgument
(
"The number of input channels should be equal to filter "
"channels * groups."
);
"channels * groups. The difference is [%d]: [%d]"
,
in_dims
[
1
],
filter_dims
[
1
]
*
groups
));
PADDLE_ENFORCE_EQ
(
filter_dims
[
0
]
%
groups
,
0
,
"The number of output channels should be divided by groups."
);
PADDLE_ENFORCE_EQ
(
filter_dims
[
0
]
%
deformable_groups
,
0
,
platform
::
errors
::
InvalidArgument
(
"The number of output channels should be divided by groups. But"
"received output channels: [%d], groups: [%d]"
,
filter_dims
[
0
],
groups
));
PADDLE_ENFORCE_EQ
(
filter_dims
[
0
]
%
deformable_groups
,
0
,
platform
::
errors
::
InvalidArgument
(
"The number of output channels should be "
"divided by deformable groups."
);
"divided by deformable groups. But received [%d]: [%d]"
,
filter_dims
[
0
],
deformable_groups
));
if
(
in_dims
[
0
]
>
im2col_step
)
{
PADDLE_ENFORCE_EQ
(
in_dims
[
0
]
%
im2col_step
,
0U
,
"Input batchsize must be smaller than or divide im2col_step"
);
PADDLE_ENFORCE_EQ
(
in_dims
[
0
]
%
im2col_step
,
0U
,
platform
::
errors
::
InvalidArgument
(
"Input batchsize must be smaller than or divide "
"im2col_step, But received [%d]: [%d]"
,
in_dims
[
0
],
im2col_step
));
}
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
PADDLE_ENFORCE_GT
(
strides
[
i
],
0U
,
"stride %d size incorrect"
,
i
);
PADDLE_ENFORCE_GT
(
strides
[
i
],
0U
,
platform
::
errors
::
InvalidArgument
(
"stride %d size incorrect"
,
i
));
}
for
(
size_t
i
=
0
;
i
<
dilations
.
size
();
++
i
)
{
PADDLE_ENFORCE_GT
(
dilations
[
i
],
0U
,
"dilation %d size incorrect"
,
i
);
PADDLE_ENFORCE_GT
(
dilations
[
i
],
0U
,
platform
::
errors
::
InvalidArgument
(
"dilation %d size incorrect"
,
i
));
}
std
::
vector
<
int64_t
>
output_shape
({
in_dims
[
0
],
filter_dims
[
0
]});
...
...
@@ -187,25 +204,34 @@ class DeformableConvV1Op : public framework::OperatorWithKernel {
}
}
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
output_shape
[
1
]
%
deformable_groups
,
0U
,
PADDLE_ENFORCE_EQ
(
output_shape
[
1
]
%
deformable_groups
,
0U
,
platform
::
errors
::
InvalidArgument
(
"output num_filter must divide deformable group size."
));
"output num_filter must divide deformable group "
"size. But received [%d]: [%d]"
,
output_shape
[
1
],
deformable_groups
));
PADDLE_ENFORCE_EQ
(
output_shape
[
2
],
offset_dims
[
2
],
platform
::
errors
::
InvalidArgument
(
"output height must equal to offset map height."
));
"output height must equal to offset map height. "
"The difference is [%d]: [%d]"
,
output_shape
[
2
],
offset_dims
[
2
]));
PADDLE_ENFORCE_EQ
(
output_shape
[
3
],
offset_dims
[
3
],
platform
::
errors
::
InvalidArgument
(
"output width must equal to offset map width."
));
PADDLE_ENFORCE_EQ
(
offset_dims
[
1
]
%
(
filter_dims
[
2
]
*
filter_dims
[
3
]),
0U
,
"output width must equal to offset map width. The "
"difference is [%d]: [%d]"
,
output_shape
[
3
],
offset_dims
[
3
]));
PADDLE_ENFORCE_EQ
(
offset_dims
[
1
]
%
(
filter_dims
[
2
]
*
filter_dims
[
3
]),
0U
,
platform
::
errors
::
InvalidArgument
(
"offset filter must divide deformable group size."
));
"offset filter must divide deformable group size. "
"But received [%d]: [%d]"
,
offset_dims
[
1
],
filter_dims
[
2
]
*
filter_dims
[
3
]));
PADDLE_ENFORCE_EQ
(
offset_dims
[
1
]
/
(
2
*
filter_dims
[
2
]
*
filter_dims
[
3
]),
deformable_groups
,
platform
::
errors
::
InvalidArgument
(
"offset filter must divide deformable group size."
));
"offset filter must divide deformable group size. But received "
"[%d]: [%d]"
,
offset_dims
[
1
]
/
(
2
*
filter_dims
[
2
]
*
filter_dims
[
3
]),
deformable_groups
));
}
ctx
->
SetOutputDim
(
"Output"
,
framework
::
make_ddim
(
output_shape
));
}
...
...
@@ -249,8 +275,8 @@ class DeformableConvV1GradOp : public framework::OperatorWithKernel {
auto
filter_dims
=
ctx
->
GetInputDim
(
"Filter"
);
auto
offset_dims
=
ctx
->
GetInputDim
(
"Offset"
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Output"
)),
true
,
"the gradient of output(Out) must not be null
"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Output"
)),
"Input"
,
"Output@Grad"
,
"deformable_conv_v1_grad
"
);
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"Input"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"Input"
),
in_dims
);
}
...
...
paddle/fluid/operators/detection/polygon_box_transform_op.cc
浏览文件 @
ba5fa2c2
...
...
@@ -23,8 +23,9 @@ template <typename DeviceContext, typename T>
class
PolygonBoxTransformCPUKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
platform
::
is_cpu_place
(
ctx
.
GetPlace
()),
"It must use CUDAPlace."
);
PADDLE_ENFORCE_EQ
(
platform
::
is_cpu_place
(
ctx
.
GetPlace
()),
true
,
platform
::
errors
::
InvalidArgument
(
"It must use CUDAPlace."
));
auto
*
in
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
in_dims
=
in
->
dims
();
const
T
*
in_data
=
in
->
data
<
T
>
();
...
...
@@ -56,18 +57,23 @@ class PolygonBoxTransformOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Input"
),
"Input (Input) of polygon_box transform op should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Output"
),
"Output (Output) of polygon_box transform op should not be null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Input"
),
"Input"
,
"Input"
,
"polygon_box_transform"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Output"
),
"Output"
,
"Output"
,
"polygon_box_transform"
);
auto
in_dim
=
ctx
->
GetInputDim
(
"Input"
);
PADDLE_ENFORCE_EQ
(
in_dim
.
size
(),
4
,
"input's rank must be 4."
);
PADDLE_ENFORCE_EQ
(
in_dim
.
size
(),
4
,
platform
::
errors
::
InvalidArgument
(
"input's rank must be 4. But received: Input rank is [%d]"
,
in_dim
.
size
()));
PADDLE_ENFORCE_EQ
(
in_dim
[
1
]
%
2
,
0
,
"input's second dimension must be even."
);
platform
::
errors
::
InvalidArgument
(
"input's second dimension must be even. But "
"received: Input 2nd dimension is [%d]"
,
in_dim
[
1
]));
ctx
->
SetOutputDim
(
"Output"
,
in_dim
);
}
...
...
paddle/fluid/operators/detection/polygon_box_transform_op.cu
浏览文件 @
ba5fa2c2
...
...
@@ -43,8 +43,9 @@ template <typename T>
class
PolygonBoxTransformOpCUDAKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
"It must use CUDAPlace."
);
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
platform
::
errors
::
InvalidArgument
(
"It must use CUDAPlace."
));
auto
*
in
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
in_dims
=
in
->
dims
();
const
T
*
in_data
=
in
->
data
<
T
>
();
...
...
paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc
浏览文件 @
ba5fa2c2
...
...
@@ -28,22 +28,38 @@ class TeacherStudentSigmoidLossOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) should be not null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Label"
),
"Input(Label) should be not null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Y"
),
"Output(Y) should be not null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"teacher_student_sigmoid_loss"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Label"
),
"Input"
,
"Label"
,
"teacher_student_sigmoid_loss"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Y"
),
"Output"
,
"Y"
,
"teacher_student_sigmoid_loss"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
label_dims
=
ctx
->
GetInputDim
(
"Label"
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2UL
,
"Input(X)'s rank should be 2."
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2UL
,
platform
::
errors
::
InvalidArgument
(
"Input(X)'s rank should be 2. But received: "
"Input(X)'s rank is [%d]"
,
x_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
label_dims
.
size
(),
2UL
,
"Input(Label)'s rank should be 2."
);
platform
::
errors
::
InvalidArgument
(
"Input(Label)'s rank should be 2. But "
"received Input(Label)'s rank is [%d]"
,
label_dims
.
size
()));
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
label_dims
[
0
],
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
label_dims
[
0
],
platform
::
errors
::
InvalidArgument
(
"The 1st dimension of Input(X) and Input(Label) should "
"be equal."
);
"be equal. The difference is [%d]: [%d]"
,
x_dims
[
0
],
label_dims
[
0
]));
PADDLE_ENFORCE_EQ
(
label_dims
[
1
],
1UL
,
platform
::
errors
::
InvalidArgument
(
"The 2nd dimension of "
"Input(Label) should be 1."
);
"Input(Label) should be 1. But received "
"Input(Label)'s 2nd dim is [%d]"
,
label_dims
[
1
]));
}
ctx
->
SetOutputDim
(
"Y"
,
{
x_dims
[
0
],
1
});
ctx
->
ShareLoD
(
"X"
,
/*->*/
"Y"
);
...
...
@@ -87,32 +103,60 @@ class TeacherStudentSigmoidLossGradientOp
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) should be not null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Label"
),
"Input(Label) should be not null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Y"
)),
"Input(Y@GRAD) should be not null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)),
"Output(X@GRAD) should be not null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"teacher_student_sigmoid_loss_grad"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Label"
),
"Input"
,
"X"
,
"teacher_student_sigmoid_loss_grad"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Y"
)),
"Input"
,
"Y@Grad"
,
"teacher_student_sigmoid_loss_grad"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)),
"Input"
,
"X@Grad"
,
"teacher_student_sigmoid_loss_grad"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
label_dims
=
ctx
->
GetInputDim
(
"Label"
);
auto
dy_dims
=
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"Y"
));
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2
,
"Input(X)'s rank should be 2."
);
PADDLE_ENFORCE_EQ
(
dy_dims
.
size
(),
2
,
"Input(Y@Grad)'s rank should be 2."
);
PADDLE_ENFORCE_EQ
(
label_dims
.
size
(),
2
,
"Input(Label)'s rank should be 2."
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2
,
platform
::
errors
::
InvalidArgument
(
"Input(X)'s rank should be 2. But received Input(X)'s rank is [%d]"
,
x_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
dy_dims
.
size
(),
2
,
platform
::
errors
::
InvalidArgument
(
"Input(Y@Grad)'s rank should be 2. But received "
"Input(Y@Grad)'s rank is [%d]"
,
dy_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
label_dims
.
size
(),
2
,
platform
::
errors
::
InvalidArgument
(
"Input(Label)'s rank should be 2. But received "
"Input(Y@Grad)'s rank is [%d]"
,
label_dims
.
size
()));
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
label_dims
[
0
],
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
label_dims
[
0
],
platform
::
errors
::
InvalidArgument
(
"The 1st dimension of Input(X) and Input(Label) should "
"be equal."
);
"be equal. The difference is [%d]: [%d]"
,
x_dims
[
0
],
label_dims
[
0
]));
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
dy_dims
[
0
],
platform
::
errors
::
InvalidArgument
(
"The 1st dimension of Input(X) and Input(Y@Grad) should "
"be equal."
);
PADDLE_ENFORCE_EQ
(
dy_dims
[
1
],
1
,
"The 2nd dimension of Input(Y@Grad) should be 1."
);
PADDLE_ENFORCE_EQ
(
label_dims
[
1
],
1
,
"be equal. The difference is [%d]: [%d]"
,
x_dims
[
0
],
dy_dims
[
0
]));
PADDLE_ENFORCE_EQ
(
dy_dims
[
1
],
1
,
platform
::
errors
::
InvalidArgument
(
"The 2nd dimension of Input(Y@Grad) should be 1. "
"But received Input(Y@Grad)'s 2nd dimension is [%d]"
,
dy_dims
[
1
]));
PADDLE_ENFORCE_EQ
(
label_dims
[
1
],
1
,
platform
::
errors
::
InvalidArgument
(
"When Attr(soft_label) == false, the 2nd dimension of "
"Input(Label) should be 1."
);
"Input(Label) should be 1. But received Input(Label)'s 2nd "
"dimemsion "
"is [%d]"
,
label_dims
[
1
]));
}
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
x_dims
);
ctx
->
ShareLoD
(
"X"
,
framework
::
GradVarName
(
"X"
));
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
ba5fa2c2
...
...
@@ -891,6 +891,8 @@ def polygon_box_transform(input, name=None):
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype
(
input
,
"input"
,
[
'float32'
,
'float64'
],
'polygon_box_transform'
)
helper
=
LayerHelper
(
"polygon_box_transform"
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
...
...
python/paddle/fluid/layers/learning_rate_scheduler.py
浏览文件 @
ba5fa2c2
...
...
@@ -33,6 +33,7 @@ from ..framework import default_main_program, Parameter, unique_name, name_scope
from
..framework
import
Variable
from
..framework
import
in_dygraph_mode
from
..dygraph
import
learning_rate_scheduler
as
imperate_lr
from
..data_feeder
import
check_variable_and_dtype
,
check_type
__all__
=
[
'exponential_decay'
,
'natural_exp_decay'
,
'inverse_time_decay'
,
...
...
@@ -449,6 +450,8 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
lr = fluid.layers.cosine_decay(
learning_rate = base_lr, step_each_epoch=10000, epochs=120)
"""
check_type
(
learning_rate
,
'learning_rate'
,
(
float
,
tensor
.
Variable
),
'cosine_decay'
)
with
default_main_program
().
_lr_schedule_guard
():
if
in_dygraph_mode
():
...
...
python/paddle/fluid/layers/loss.py
浏览文件 @
ba5fa2c2
...
...
@@ -334,6 +334,10 @@ def square_error_cost(input, label):
# [0.04000002]
"""
check_variable_and_dtype
(
input
,
"input"
,
[
'float32'
,
'float64'
],
'square_error_cost'
)
check_variable_and_dtype
(
label
,
"label"
,
[
'float32'
,
'float64'
],
'square_error_cost'
)
helper
=
LayerHelper
(
'square_error_cost'
,
**
locals
())
minus_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
...
...
@@ -1481,6 +1485,11 @@ def teacher_student_sigmoid_loss(input,
cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label)
"""
check_variable_and_dtype
(
input
,
"input"
,
[
'float32'
,
'float64'
],
'teacher_student_sigmoid_loss'
)
check_variable_and_dtype
(
label
,
"label"
,
[
'float32'
,
'float64'
],
'teacher_student_sigmoid_loss'
)
helper
=
LayerHelper
(
'teacher_student_sigmoid_loss'
,
**
locals
())
out
=
helper
.
create_variable
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
...
...
@@ -1715,4 +1724,6 @@ def mse_loss(input, label):
# [0.04000002]
"""
check_variable_and_dtype
(
input
,
"input"
,
[
'float32'
,
'float64'
],
'mse_loss'
)
check_variable_and_dtype
(
label
,
"label"
,
[
'float32'
,
'float64'
],
'mse_loss'
)
return
nn
.
reduce_mean
(
square_error_cost
(
input
,
label
))
python/paddle/fluid/layers/nn.py
浏览文件 @
ba5fa2c2
...
...
@@ -13554,6 +13554,12 @@ def deformable_conv(input,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'deformable_conv')
check_variable_and_dtype(offset, "offset", ['float32', 'float64'],
'deformable_conv')
check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv')
num_channels = input.shape[1]
assert param_attr is not False, "param_attr should not be False here."
...
...
python/paddle/fluid/tests/unittests/test_deformable_conv_op.py
浏览文件 @
ba5fa2c2
...
...
@@ -18,6 +18,7 @@ import unittest
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
from
op_test
import
OpTest
...
...
@@ -256,5 +257,31 @@ class TestWithGroup(TestModulatedDeformableConvOp):
self
.
groups
=
2
class
TestModulatedDeformableConvInvalidInput
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_invalid_input
():
input
=
[
1
,
3
,
32
,
32
]
offset
=
fluid
.
data
(
name
=
'offset'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'float32'
)
mask
=
fluid
.
data
(
name
=
'mask'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'float32'
)
loss
=
fluid
.
layers
.
deformable_conv
(
input
,
offset
,
mask
,
num_filters
=
4
,
filter_size
=
1
)
self
.
assertRaises
(
TypeError
,
test_invalid_input
)
def
test_invalid_offset
():
input
=
fluid
.
data
(
name
=
'input'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'int32'
)
offset
=
fluid
.
data
(
name
=
'offset'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'float32'
)
mask
=
fluid
.
data
(
name
=
'mask'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'float32'
)
loss
=
fluid
.
layers
.
deformable_conv
(
input
,
offset
,
mask
,
num_filters
=
4
,
filter_size
=
1
)
self
.
assertRaises
(
TypeError
,
test_invalid_offset
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py
浏览文件 @
ba5fa2c2
...
...
@@ -18,6 +18,7 @@ import unittest
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
from
op_test
import
OpTest
...
...
@@ -252,5 +253,37 @@ class TestWithGroup(TestModulatedDeformableConvOp):
self
.
groups
=
2
class
TestModulatedDeformableConvV1InvalidInput
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_invalid_input
():
input
=
[
1
,
3
,
32
,
32
]
offset
=
fluid
.
data
(
name
=
'offset'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'float32'
)
loss
=
fluid
.
layers
.
deformable_conv
(
input
,
offset
,
mask
=
None
,
num_filters
=
4
,
filter_size
=
1
,
modulated
=
False
)
self
.
assertRaises
(
TypeError
,
test_invalid_input
)
def
test_invalid_offset
():
input
=
fluid
.
data
(
name
=
'input'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'int32'
)
offset
=
fluid
.
data
(
name
=
'offset'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'float32'
)
loss
=
fluid
.
layers
.
deformable_conv
(
input
,
offset
,
mask
=
None
,
num_filters
=
4
,
filter_size
=
1
,
modulated
=
False
)
self
.
assertRaises
(
TypeError
,
test_invalid_offset
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_mse_loss.py
浏览文件 @
ba5fa2c2
...
...
@@ -47,5 +47,22 @@ class TestMseLoss(unittest.TestCase):
self
.
assertTrue
(
np
.
isclose
(
np_result
,
result
).
all
())
class
TestMseInvalidInput
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_invalid_input
():
input
=
[
256
,
3
]
label
=
fluid
.
data
(
name
=
'label'
,
shape
=
[
None
,
3
],
dtype
=
'float32'
)
loss
=
fluid
.
layers
.
mse_loss
(
input
,
label
)
self
.
assertRaises
(
TypeError
,
test_invalid_input
)
def
test_invalid_label
():
input
=
fluid
.
data
(
name
=
'input1'
,
shape
=
[
None
,
3
],
dtype
=
'float32'
)
label
=
[
256
,
3
]
loss
=
fluid
.
layers
.
mse_loss
(
input
,
label
)
self
.
assertRaises
(
TypeError
,
test_invalid_label
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_polygon_box_transform.py
浏览文件 @
ba5fa2c2
...
...
@@ -16,6 +16,7 @@ from __future__ import print_function
import
unittest
import
numpy
as
np
import
paddle.fluid
as
fluid
from
op_test
import
OpTest
...
...
@@ -66,5 +67,15 @@ class TestCase2(TestPolygonBoxRestoreOp):
self
.
input_shape
=
(
3
,
12
,
4
,
5
)
class
TestPolygonBoxInvalidInput
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_invalid_input
():
input
=
fluid
.
data
(
name
=
'input'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'int64'
)
out
=
fluid
.
layers
.
polygon_box_transform
(
input
)
self
.
assertRaises
(
TypeError
,
test_invalid_input
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_square_error_cost.py
浏览文件 @
ba5fa2c2
...
...
@@ -48,5 +48,22 @@ class TestSquareErrorCost(unittest.TestCase):
self
.
assertTrue
(
np
.
isclose
(
np_result
,
result
).
all
())
class
TestSquareErrorInvalidInput
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_invalid_input
():
input
=
[
256
,
3
]
label
=
fluid
.
data
(
name
=
'label1'
,
shape
=
[
None
,
3
],
dtype
=
'float32'
)
loss
=
fluid
.
layers
.
square_error_cost
(
input
,
label
)
self
.
assertRaises
(
TypeError
,
test_invalid_input
)
def
test_invalid_label
():
input
=
fluid
.
data
(
name
=
'input2'
,
shape
=
[
None
,
3
],
dtype
=
'float32'
)
label
=
[
256
,
3
]
loss
=
fluid
.
layers
.
square_error_cost
(
input
,
label
)
self
.
assertRaises
(
TypeError
,
test_invalid_label
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py
浏览文件 @
ba5fa2c2
...
...
@@ -19,6 +19,7 @@ from op_test import OpTest
from
scipy.special
import
logit
from
scipy.special
import
expit
import
unittest
import
paddle.fluid
as
fluid
class
TestTeacherStudentSigmoidLossOp
(
OpTest
):
...
...
@@ -57,3 +58,20 @@ class TestTeacherStudentSigmoidLossOp(OpTest):
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Y"
,
numeric_grad_delta
=
0.005
)
class
TestTeacherStudentSigmoidLossInvalidInput
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_invalid_input
():
input
=
[
512
,
1
]
label
=
fluid
.
data
(
name
=
'label'
,
shape
=
[
None
,
1
],
dtype
=
'float32'
)
loss
=
fluid
.
layers
.
teacher_student_sigmoid_loss
(
input
,
label
)
self
.
assertRaises
(
TypeError
,
test_invalid_input
)
def
test_invalid_label
():
input
=
fluid
.
data
(
name
=
'input1'
,
shape
=
[
None
,
1
],
dtype
=
'float32'
)
label
=
[
512
,
1
]
loss
=
fluid
.
layers
.
teacher_student_sigmoid_loss
(
input
,
label
)
self
.
assertRaises
(
TypeError
,
test_invalid_label
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录