Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
e1c707fe
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e1c707fe
编写于
2月 19, 2019
作者:
T
tensor-tang
提交者:
GitHub
2月 19, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix warnings (#15790)
* fix warnings test=develop * fix enforce test test=develop
上级
6402424f
变更
24
隐藏空白更改
内联
并排
Showing
24 changed file
with
60 addition
and
59 deletion
+60
-59
paddle/fluid/framework/details/broadcast_op_handle.cc
paddle/fluid/framework/details/broadcast_op_handle.cc
+1
-1
paddle/fluid/framework/details/data_balance_op_handle.cc
paddle/fluid/framework/details/data_balance_op_handle.cc
+1
-1
paddle/fluid/framework/details/fuse_vars_op_handle.cc
paddle/fluid/framework/details/fuse_vars_op_handle.cc
+1
-1
paddle/fluid/framework/details/reduce_op_handle.cc
paddle/fluid/framework/details/reduce_op_handle.cc
+1
-1
paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
+1
-1
paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc
paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc
+3
-3
paddle/fluid/framework/ir/graph_pattern_detector.cc
paddle/fluid/framework/ir/graph_pattern_detector.cc
+2
-2
paddle/fluid/inference/api/api.cc
paddle/fluid/inference/api/api.cc
+1
-1
paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc
...le/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc
+2
-2
paddle/fluid/operators/attention_lstm_op.cc
paddle/fluid/operators/attention_lstm_op.cc
+1
-1
paddle/fluid/operators/controlflow/get_places_op.cc
paddle/fluid/operators/controlflow/get_places_op.cc
+1
-1
paddle/fluid/operators/crf_decoding_op.cc
paddle/fluid/operators/crf_decoding_op.cc
+2
-2
paddle/fluid/operators/detection/anchor_generator_op.cc
paddle/fluid/operators/detection/anchor_generator_op.cc
+3
-3
paddle/fluid/operators/fc_op.cc
paddle/fluid/operators/fc_op.cc
+1
-1
paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h
paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h
+2
-1
paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc
paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc
+2
-2
paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc
...le/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc
+1
-1
paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc
paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc
+1
-1
paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc
paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc
+1
-1
paddle/fluid/operators/layer_norm_op.cc
paddle/fluid/operators/layer_norm_op.cc
+2
-2
paddle/fluid/operators/linear_chain_crf_op.cc
paddle/fluid/operators/linear_chain_crf_op.cc
+4
-4
paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc
paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc
+2
-2
paddle/fluid/operators/sequence_ops/sequence_expand_op.cc
paddle/fluid/operators/sequence_ops/sequence_expand_op.cc
+4
-3
paddle/fluid/platform/enforce_test.cc
paddle/fluid/platform/enforce_test.cc
+20
-21
未找到文件。
paddle/fluid/framework/details/broadcast_op_handle.cc
浏览文件 @
e1c707fe
...
...
@@ -30,7 +30,7 @@ void BroadcastOpHandle::RunImpl() {
VarHandle
*
in_var_handle
;
{
auto
in_var_handles
=
DynamicCast
<
VarHandle
>
(
inputs_
);
PADDLE_ENFORCE_EQ
(
in_var_handles
.
size
(),
1
,
PADDLE_ENFORCE_EQ
(
in_var_handles
.
size
(),
1
UL
,
"The number of input should be one."
);
in_var_handle
=
in_var_handles
[
0
];
}
...
...
paddle/fluid/framework/details/data_balance_op_handle.cc
浏览文件 @
e1c707fe
...
...
@@ -86,7 +86,7 @@ std::vector<std::array<int, 3>> DataBalanceOpHandle::GetBalancePlan(
}
void
DataBalanceOpHandle
::
RunImpl
()
{
PADDLE_ENFORCE_GT
(
places_
.
size
(),
1
,
PADDLE_ENFORCE_GT
(
places_
.
size
(),
1
UL
,
"Data balance can only be enabled when the number of "
"places to run larger than 1."
);
auto
in_var_handles
=
DynamicCast
<
VarHandle
>
(
this
->
Inputs
());
...
...
paddle/fluid/framework/details/fuse_vars_op_handle.cc
浏览文件 @
e1c707fe
...
...
@@ -23,7 +23,7 @@ void FuseVarsOpHandle::RunImpl() {
auto
in_var_handles
=
DynamicCast
<
VarHandle
>
(
this
->
Inputs
());
auto
out_var_handles
=
DynamicCast
<
VarHandle
>
(
this
->
Outputs
());
PADDLE_ENFORCE_EQ
(
in_var_handles
.
size
(),
0
);
PADDLE_ENFORCE_EQ
(
in_var_handles
.
size
(),
0
UL
);
PADDLE_ENFORCE_EQ
(
out_var_handles
.
size
()
-
1
,
inputs_numel_
.
size
(),
""
);
auto
scope
=
local_scope_
->
FindVar
(
kLocalExecScopeName
)
->
Get
<
Scope
*>
();
...
...
paddle/fluid/framework/details/reduce_op_handle.cc
浏览文件 @
e1c707fe
...
...
@@ -153,7 +153,7 @@ void ReduceOpHandle::RunImpl() {
{
auto
out_var_handles
=
DynamicCast
<
VarHandle
>
(
outputs_
);
PADDLE_ENFORCE_EQ
(
out_var_handles
.
size
(),
1
,
PADDLE_ENFORCE_EQ
(
out_var_handles
.
size
(),
1
UL
,
"The number of output should be one."
);
out_var_handle
=
out_var_handles
.
front
();
}
...
...
paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
浏览文件 @
e1c707fe
...
...
@@ -169,7 +169,7 @@ std::unique_ptr<ir::Graph> ConvBNFusePass::ApplyImpl(
if
(
has_bias
&&
conv
->
Op
()
->
Input
(
"Bias"
).
size
()
>
0
)
{
// reuse existing conv bias node
auto
conv_bias_names
=
conv
->
Op
()
->
Input
(
"Bias"
);
PADDLE_ENFORCE_EQ
(
conv_bias_names
.
size
(),
1
);
PADDLE_ENFORCE_EQ
(
conv_bias_names
.
size
(),
1
UL
);
auto
*
conv_bias_var
=
scope
->
FindVar
(
conv_bias_names
[
0
]);
auto
*
conv_bias_tensor
=
conv_bias_var
->
GetMutable
<
LoDTensor
>
();
PADDLE_ENFORCE_EQ
(
conv_bias_tensor
->
dims
(),
...
...
paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc
浏览文件 @
e1c707fe
...
...
@@ -111,7 +111,7 @@ std::unique_ptr<ir::Graph> FuseReluDepthwiseConvPass::FuseReluDepthwiseConv(
xg_var
=
subgraph
.
at
(
xg
)
->
Var
();
}
PADDLE_ENFORCE_EQ
(
layer_op
->
Input
(
"Input"
).
size
(),
1
);
PADDLE_ENFORCE_EQ
(
layer_op
->
Input
(
"Input"
).
size
(),
1
UL
);
PADDLE_ENFORCE_EQ
(
layer_op
->
Input
(
"Input"
)[
0
],
y_var
->
Name
());
layer_op
->
SetInput
(
"Input"
,
{
x_var
->
Name
()});
subgraph
.
at
(
layer
)
->
inputs
.
push_back
(
subgraph
.
at
(
x
));
...
...
@@ -119,13 +119,13 @@ std::unique_ptr<ir::Graph> FuseReluDepthwiseConvPass::FuseReluDepthwiseConv(
VLOG
(
4
)
<<
"replace "
<<
y_var
->
Name
()
<<
" -> "
<<
x_var
->
Name
();
if
(
!
only_forward
)
{
PADDLE_ENFORCE_EQ
(
layer_g_op
->
Input
(
"Input"
).
size
(),
1
);
PADDLE_ENFORCE_EQ
(
layer_g_op
->
Input
(
"Input"
).
size
(),
1
UL
);
PADDLE_ENFORCE_EQ
(
layer_g_op
->
Input
(
"Input"
)[
0
],
y_var
->
Name
());
layer_g_op
->
SetInput
(
"Input"
,
{
x_var
->
Name
()});
subgraph
.
at
(
layer_g
)
->
inputs
.
push_back
(
subgraph
.
at
(
x
));
subgraph
.
at
(
x
)
->
outputs
.
push_back
(
subgraph
.
at
(
layer_g
));
PADDLE_ENFORCE_EQ
(
layer_g_op
->
Output
(
GradVarName
(
"Input"
)).
size
(),
1
);
PADDLE_ENFORCE_EQ
(
layer_g_op
->
Output
(
GradVarName
(
"Input"
)).
size
(),
1
UL
);
PADDLE_ENFORCE_EQ
(
layer_g_op
->
Output
(
GradVarName
(
"Input"
))[
0
],
yg_var
->
Name
());
layer_g_op
->
SetOutput
(
GradVarName
(
"Input"
),
{
xg_var
->
Name
()});
...
...
paddle/fluid/framework/ir/graph_pattern_detector.cc
浏览文件 @
e1c707fe
...
...
@@ -38,7 +38,7 @@ size_t PDPattern::id_ = 0UL;
PDNode
*
PDPattern
::
NewNode
(
const
std
::
string
&
name
)
{
if
(
!
name
.
empty
())
{
PADDLE_ENFORCE_EQ
(
node_map_
.
count
(
name
),
0
,
PADDLE_ENFORCE_EQ
(
node_map_
.
count
(
name
),
0
UL
,
"PDNode's name should be unique, get duplicate [%s]"
,
name
);
}
...
...
@@ -51,7 +51,7 @@ PDNode *PDPattern::NewNode(const std::string &name) {
PDNode
*
PDPattern
::
NewNode
(
PDNode
::
teller_t
&&
teller
,
const
std
::
string
&
name
)
{
if
(
!
name
.
empty
())
{
PADDLE_ENFORCE_EQ
(
node_map_
.
count
(
name
),
0
,
PADDLE_ENFORCE_EQ
(
node_map_
.
count
(
name
),
0
UL
,
"PDNode's name should be unique, get duplicate [%s]"
,
name
);
}
...
...
paddle/fluid/inference/api/api.cc
浏览文件 @
e1c707fe
...
...
@@ -92,7 +92,7 @@ void PaddleBuf::Reset(void *data, size_t length) {
void
PaddleBuf
::
Free
()
{
if
(
memory_owned_
&&
data_
)
{
PADDLE_ENFORCE_GT
(
length_
,
0
);
PADDLE_ENFORCE_GT
(
length_
,
0
UL
);
free
(
static_cast
<
char
*>
(
data_
));
data_
=
nullptr
;
length_
=
0
;
...
...
paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc
浏览文件 @
e1c707fe
...
...
@@ -56,14 +56,14 @@ struct DataRecord {
std
::
vector
<
float
>
slot_data
;
split_to_float
(
data
[
1
],
' '
,
&
slot_data
);
std
::
string
name
=
data
[
0
];
PADDLE_ENFORCE_EQ
(
slot_data
.
size
()
%
11
,
0
,
PADDLE_ENFORCE_EQ
(
slot_data
.
size
()
%
11
,
0
UL
,
"line %d, %s should be divisible"
,
num_lines
,
name
);
datasets
[
name
].
emplace_back
(
std
::
move
(
slot_data
));
}
num_samples
=
num_lines
/
num_slots
;
PADDLE_ENFORCE_EQ
(
num_samples
*
num_slots
,
static_cast
<
size_t
>
(
num_lines
),
"num samples should be divisible"
);
PADDLE_ENFORCE_GT
(
num_samples
,
0
);
PADDLE_ENFORCE_GT
(
num_samples
,
0
UL
);
}
void
Prepare
(
int
bs
)
{
...
...
paddle/fluid/operators/attention_lstm_op.cc
浏览文件 @
e1c707fe
...
...
@@ -293,7 +293,7 @@ class AttentionLSTMKernel : public framework::OpKernel<T> {
int
len
=
x_lod
[
0
][
i
+
1
]
-
x_lod
[
0
][
i
];
max_seq_len
=
max_seq_len
<
len
?
len
:
max_seq_len
;
}
PADDLE_ENFORCE_EQ
(
x_lod
.
size
(),
1
,
"Input(X)'s lod size must be 1."
);
PADDLE_ENFORCE_EQ
(
x_lod
.
size
(),
1
UL
,
"Input(X)'s lod size must be 1."
);
PADDLE_ENFORCE_EQ
(
c0
->
dims
()[
0
],
N
,
"C0 dims should be %d x %d."
,
N
,
D
);
fc_out
->
Resize
({
max_seq_len
,
1
});
...
...
paddle/fluid/operators/controlflow/get_places_op.cc
浏览文件 @
e1c707fe
...
...
@@ -52,7 +52,7 @@ class GetPlacesOp : public framework::OperatorBase {
device_count
=
is_gpu
?
CUDADevCount
()
:
std
::
thread
::
hardware_concurrency
();
}
PADDLE_ENFORCE_NE
(
device_count
,
0
,
"Cannot indicate %s device count"
,
PADDLE_ENFORCE_NE
(
device_count
,
0
UL
,
"Cannot indicate %s device count"
,
is_gpu
?
"GPU"
:
"CPU"
);
auto
out_var_name
=
Output
(
"Out"
);
...
...
paddle/fluid/operators/crf_decoding_op.cc
浏览文件 @
e1c707fe
...
...
@@ -84,12 +84,12 @@ class CRFDecodingOp : public framework::OperatorWithKernel {
"Output(ViterbiPath) should be not null."
);
auto
emission_dims
=
ctx
->
GetInputDim
(
"Emission"
);
PADDLE_ENFORCE_EQ
(
emission_dims
.
size
(),
2
UL
,
PADDLE_ENFORCE_EQ
(
emission_dims
.
size
(),
2
,
"The Input(Emission) should be a 2-D tensor."
);
PADDLE_ENFORCE
(
emission_dims
[
0
],
"An empty mini-batch is not allowed."
);
auto
transition_dims
=
ctx
->
GetInputDim
(
"Transition"
);
PADDLE_ENFORCE_EQ
(
transition_dims
.
size
(),
2
UL
,
PADDLE_ENFORCE_EQ
(
transition_dims
.
size
(),
2
,
"The Input(Transition) should be a 2-D tensor."
);
PADDLE_ENFORCE_EQ
(
transition_dims
[
0
]
-
2
,
transition_dims
[
1
],
...
...
paddle/fluid/operators/detection/anchor_generator_op.cc
浏览文件 @
e1c707fe
...
...
@@ -85,7 +85,7 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker {
" For instance, the anchor size of 64 means the area of this anchor "
"equals to 64**2."
)
.
AddCustomChecker
([](
const
std
::
vector
<
float
>&
anchor_sizes
)
{
PADDLE_ENFORCE_GT
(
anchor_sizes
.
size
(),
0
,
PADDLE_ENFORCE_GT
(
anchor_sizes
.
size
(),
0
UL
,
"Size of anchor_sizes must be at least 1."
);
for
(
size_t
i
=
0
;
i
<
anchor_sizes
.
size
();
++
i
)
{
PADDLE_ENFORCE_GT
(
anchor_sizes
[
i
],
0.0
,
...
...
@@ -103,7 +103,7 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker {
"(vector<float>) List of variances to be used "
"in box regression deltas"
)
.
AddCustomChecker
([](
const
std
::
vector
<
float
>&
variances
)
{
PADDLE_ENFORCE_EQ
(
variances
.
size
(),
4
,
PADDLE_ENFORCE_EQ
(
variances
.
size
(),
4
UL
,
"Must and only provide 4 variance."
);
for
(
size_t
i
=
0
;
i
<
variances
.
size
();
++
i
)
{
PADDLE_ENFORCE_GT
(
variances
[
i
],
0.0
,
...
...
@@ -117,7 +117,7 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker {
.
SetDefault
(
std
::
vector
<
float
>
(
2
,
16.0
))
.
AddCustomChecker
([](
const
std
::
vector
<
float
>&
stride
)
{
PADDLE_ENFORCE_EQ
(
stride
.
size
(),
2
,
stride
.
size
(),
2
UL
,
"Must and only provide 2 stride for width and height."
);
for
(
size_t
i
=
0
;
i
<
stride
.
size
();
++
i
)
{
PADDLE_ENFORCE_GT
(
stride
[
i
],
0.0
,
...
...
paddle/fluid/operators/fc_op.cc
浏览文件 @
e1c707fe
...
...
@@ -47,7 +47,7 @@ void FCOp::InferShape(framework::InferShapeContext* ctx) const {
PADDLE_ENFORCE
(
in_dims
.
size
()
==
2
||
in_dims
.
size
()
==
4
,
"Fully Connected input should be 2-D or 4-D tensor."
);
}
PADDLE_ENFORCE_EQ
(
w_dims
.
size
(),
2
UL
,
PADDLE_ENFORCE_EQ
(
w_dims
.
size
(),
2
,
"Fully Connected input should be 2-D tensor."
);
int
in_num_col_dims
=
ctx
->
Attrs
().
Get
<
int
>
(
"in_num_col_dims"
);
PADDLE_ENFORCE_GT
(
...
...
paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h
浏览文件 @
e1c707fe
...
...
@@ -47,10 +47,11 @@ struct EmbeddingVSumFunctor {
auto
*
output
=
output_t
->
mutable_data
<
T
>
(
context
.
GetPlace
());
PADDLE_ENFORCE_LE
(
table_width
*
idx_width
,
out_width
);
PADDLE_ENFORCE_GT
(
ids_lod
.
size
(),
1UL
);
jit
::
emb_seq_pool_attr_t
attr
(
table_height
,
table_width
,
0
,
idx_width
,
out_width
,
jit
::
SeqPoolType
::
kSum
);
for
(
int64
_t
i
=
0
;
i
!=
ids_lod
.
size
()
-
1
;
++
i
)
{
for
(
size
_t
i
=
0
;
i
!=
ids_lod
.
size
()
-
1
;
++
i
)
{
attr
.
index_height
=
ids_lod
[
i
+
1
]
-
ids_lod
[
i
];
auto
emb_seqpool
=
jit
::
Get
<
jit
::
kEmbSeqPool
,
jit
::
EmbSeqPoolTuples
<
T
>
,
platform
::
CPUPlace
>
(
attr
);
...
...
paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc
浏览文件 @
e1c707fe
...
...
@@ -37,7 +37,7 @@ void FusionRepeatedFCReluOp::InferShape(
"Output(Out) of FusionRepeatedFCReluOp should not be null."
);
auto
i_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_EQ
(
i_dims
.
size
(),
2
UL
,
"Input shape size should be 2"
);
PADDLE_ENFORCE_EQ
(
i_dims
.
size
(),
2
,
"Input shape size should be 2"
);
auto
w_dims
=
ctx
->
GetInputsDim
(
"W"
);
auto
b_dims
=
ctx
->
GetInputsDim
(
"Bias"
);
...
...
@@ -49,7 +49,7 @@ void FusionRepeatedFCReluOp::InferShape(
"inpute width should be equal with weight height"
);
for
(
size_t
i
=
1
;
i
<
sz
;
++
i
)
{
PADDLE_ENFORCE_EQ
(
w_dims
[
i
].
size
(),
2
UL
,
PADDLE_ENFORCE_EQ
(
w_dims
[
i
].
size
(),
2
,
"Every weight shape size should be 2."
);
PADDLE_ENFORCE_EQ
(
framework
::
product
(
b_dims
[
i
]),
w_dims
[
i
][
1
],
"The length of Bias must be equal with w_dims[1]."
);
...
...
paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc
浏览文件 @
e1c707fe
...
...
@@ -39,7 +39,7 @@ void FusionSeqExpandConcatFCOp::InferShape(
auto
ins_dims
=
ctx
->
GetInputsDim
(
"X"
);
auto
w_dims
=
ctx
->
GetInputDim
(
"FCWeight"
);
// (M0+M1+M2+..) x D
PADDLE_ENFORCE_EQ
(
w_dims
.
size
(),
2
UL
,
"Input(FCWeight)'s rank must be 2."
);
PADDLE_ENFORCE_EQ
(
w_dims
.
size
(),
2
,
"Input(FCWeight)'s rank must be 2."
);
const
int
D
=
w_dims
[
1
];
int
sum
=
ins_dims
[
0
][
1
];
for
(
size_t
i
=
1
;
i
<
ins_dims
.
size
();
++
i
)
{
...
...
paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc
浏览文件 @
e1c707fe
...
...
@@ -39,7 +39,7 @@ void FusionSeqPoolConcatOp::InferShape(
// The output height should be confirmed in Compute,
// since input lod is not accessible here.
PADDLE_ENFORCE_EQ
(
ins_dims
[
0
].
size
(),
2
UL
,
PADDLE_ENFORCE_EQ
(
ins_dims
[
0
].
size
(),
2
,
"The dims size of first input should be 2."
);
ctx
->
SetOutputDim
(
"Out"
,
{
-
1
,
ins_dims
[
0
][
axis
]
*
static_cast
<
int
>
(
n
)});
}
...
...
paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc
浏览文件 @
e1c707fe
...
...
@@ -42,7 +42,7 @@ void FusionSquaredMatSubOp::InferShape(
auto
y_dims
=
ctx
->
GetInputDim
(
"Y"
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
y_dims
.
size
(),
"Input tensors dims size should be equal."
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2
UL
,
"Input tensors should be a Matrix."
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2
,
"Input tensors should be a Matrix."
);
PADDLE_ENFORCE_EQ
(
x_dims
[
1
],
y_dims
[
0
],
"Inputs Matrix should be multiply."
);
ctx
->
SetOutputDim
(
"SquaredX"
,
x_dims
);
...
...
paddle/fluid/operators/layer_norm_op.cc
浏览文件 @
e1c707fe
...
...
@@ -44,11 +44,11 @@ class LayerNormOp : public framework::OperatorWithKernel {
int
left
=
static_cast
<
int
>
(
matrix_dim
[
0
]);
int
right
=
static_cast
<
int
>
(
matrix_dim
[
1
]);
if
(
ctx
->
HasInput
(
"Scale"
))
{
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"Scale"
).
size
(),
1
UL
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"Scale"
).
size
(),
1
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"Scale"
)[
0
],
right
);
}
if
(
ctx
->
HasInput
(
"Bias"
))
{
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"Bias"
).
size
(),
1
UL
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"Bias"
).
size
(),
1
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"Bias"
)[
0
],
right
);
}
...
...
paddle/fluid/operators/linear_chain_crf_op.cc
浏览文件 @
e1c707fe
...
...
@@ -144,12 +144,12 @@ class LinearChainCRFOp : public framework::OperatorWithKernel {
"Output(LogLikelihood) should be not null."
);
auto
emission_dims
=
ctx
->
GetInputDim
(
"Emission"
);
PADDLE_ENFORCE_EQ
(
emission_dims
.
size
(),
2
UL
,
PADDLE_ENFORCE_EQ
(
emission_dims
.
size
(),
2
,
"The Input(Emission) should be a 2-D tensor."
);
PADDLE_ENFORCE
(
emission_dims
[
0
],
"An empty mini-batch is not allowed."
);
auto
transition_dims
=
ctx
->
GetInputDim
(
"Transition"
);
PADDLE_ENFORCE_EQ
(
transition_dims
.
size
(),
2
UL
,
PADDLE_ENFORCE_EQ
(
transition_dims
.
size
(),
2
,
"The Input(Transition) should be a 2-D tensor."
);
PADDLE_ENFORCE_EQ
(
transition_dims
[
0
]
-
2
,
transition_dims
[
1
],
...
...
@@ -202,13 +202,13 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel {
"Input(LogLikelihood@GRAD) shoudl be not null."
);
auto
emission_exps_dims
=
ctx
->
GetInputDim
(
"EmissionExps"
);
PADDLE_ENFORCE_EQ
(
emission_exps_dims
.
size
(),
2
UL
,
PADDLE_ENFORCE_EQ
(
emission_exps_dims
.
size
(),
2
,
"The Input(EmissionExps) should be a 2-D tensor."
);
PADDLE_ENFORCE
(
emission_exps_dims
[
0
],
"An empty mini-batch is not allowed."
);
auto
transition_exps_dims
=
ctx
->
GetInputDim
(
"TransitionExps"
);
PADDLE_ENFORCE_EQ
(
transition_exps_dims
.
size
(),
2
UL
,
PADDLE_ENFORCE_EQ
(
transition_exps_dims
.
size
(),
2
,
"The Input(TransitionExps) should be a 2-D tensor."
);
PADDLE_ENFORCE_EQ
(
transition_exps_dims
[
0
]
-
2
,
transition_exps_dims
[
1
],
...
...
paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc
浏览文件 @
e1c707fe
...
...
@@ -31,10 +31,10 @@ class SequenceEnumerateOp : public framework::OperatorWithKernel {
const
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2
UL
,
x_dims
.
size
(),
2
,
"Input(X) of SequenceEnumerate operator's rank should be 2."
);
PADDLE_ENFORCE_EQ
(
x_dims
[
1
],
1
UL
,
x_dims
[
1
],
1
,
"Input(X) of SequenceEnumerate operator's 2nd dimension should be 1."
);
const
auto
win_size
=
ctx
->
Attrs
().
Get
<
int
>
(
"win_size"
);
...
...
paddle/fluid/operators/sequence_ops/sequence_expand_op.cc
浏览文件 @
e1c707fe
...
...
@@ -48,10 +48,10 @@ class SequenceExpandOp : public framework::OperatorWithKernel {
auto
&
x_lod
=
x_var
->
Get
<
LoDTensor
>
().
lod
();
auto
&
y_lod
=
y_var
->
Get
<
LoDTensor
>
().
lod
();
PADDLE_ENFORCE_LE
(
x_lod
.
size
(),
1
,
PADDLE_ENFORCE_LE
(
x_lod
.
size
(),
1
UL
,
"Level number of Input(X)'s lod should not be "
"greater than 1."
);
PADDLE_ENFORCE_GT
(
y_lod
.
size
(),
0
,
PADDLE_ENFORCE_GT
(
y_lod
.
size
(),
0
UL
,
"Level number of Input(Y)'s lod should be "
"greater than 0."
);
PADDLE_ENFORCE
(
...
...
@@ -69,7 +69,8 @@ class SequenceExpandOp : public framework::OperatorWithKernel {
"size of Input(X)'s first level lod should be equal to "
"size of Input(Y)'s referred level lod."
);
}
else
{
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
y_lod
[
ref_level
].
size
()
-
1
,
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
static_cast
<
int64_t
>
(
y_lod
[
ref_level
].
size
())
-
1
,
"When Input(X)'s lod is null, the dims[0] of "
"Input(X) should match the "
"size of Input(Y)'s referred level lod."
);
...
...
paddle/fluid/platform/enforce_test.cc
浏览文件 @
e1c707fe
...
...
@@ -118,59 +118,58 @@ TEST(ENFORCE_GT, OK) { PADDLE_ENFORCE_GT(2, 1); }
TEST
(
ENFORCE_GT
,
FAIL
)
{
bool
caught_exception
=
false
;
try
{
PADDLE_ENFORCE_GT
(
1
,
2
UL
);
PADDLE_ENFORCE_GT
(
1
,
2
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
caught_exception
=
true
;
EXPECT_TRUE
(
HasPrefix
(
StringPiece
(
error
.
what
()),
"Enforce failed. Expected 1 > 2UL, but received 1:1 <= 2UL
:2."
));
EXPECT_TRUE
(
HasPrefix
(
StringPiece
(
error
.
what
()),
"Enforce failed. Expected 1 > 2, but received 1:1 <= 2
:2."
));
}
EXPECT_TRUE
(
caught_exception
);
}
TEST
(
ENFORCE_GE
,
OK
)
{
PADDLE_ENFORCE_GE
(
2
,
2UL
);
PADDLE_ENFORCE_GE
(
3
,
2UL
);
PADDLE_ENFORCE_GE
(
2
,
2
);
PADDLE_ENFORCE_GE
(
3
,
2
);
PADDLE_ENFORCE_GE
(
3.21
,
2
UL
);
PADDLE_ENFORCE_GE
(
3.21
,
2
.0
);
}
TEST
(
ENFORCE_GE
,
FAIL
)
{
bool
caught_exception
=
false
;
try
{
PADDLE_ENFORCE_GE
(
1
,
2
UL
);
PADDLE_ENFORCE_GE
(
1
,
2
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
caught_exception
=
true
;
EXPECT_TRUE
(
HasPrefix
(
StringPiece
(
error
.
what
()),
"Enforce failed. Expected 1 >= 2UL, but received 1:1 < 2UL
:2."
));
EXPECT_TRUE
(
HasPrefix
(
StringPiece
(
error
.
what
()),
"Enforce failed. Expected 1 >= 2, but received 1:1 < 2
:2."
));
}
EXPECT_TRUE
(
caught_exception
);
}
TEST
(
ENFORCE_LE
,
OK
)
{
PADDLE_ENFORCE_LE
(
1
,
1
);
PADDLE_ENFORCE_LE
(
1
,
1UL
);
PADDLE_ENFORCE_LE
(
2
,
3
UL
);
PADDLE_ENFORCE_LE
(
2UL
,
3
);
PADDLE_ENFORCE_LE
(
2
UL
,
3.2
);
PADDLE_ENFORCE_LE
(
1
UL
,
1UL
);
PADDLE_ENFORCE_LE
(
2
,
3
);
PADDLE_ENFORCE_LE
(
2UL
,
3
UL
);
PADDLE_ENFORCE_LE
(
2
.0
,
3.2
);
}
TEST
(
ENFORCE_LE
,
FAIL
)
{
bool
caught_exception
=
false
;
try
{
PADDLE_ENFORCE_GT
(
1
,
2
UL
);
PADDLE_ENFORCE_GT
(
1
,
2
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
caught_exception
=
true
;
EXPECT_TRUE
(
HasPrefix
(
StringPiece
(
error
.
what
()),
"Enforce failed. Expected 1 > 2UL, but received 1:1 <= 2UL
:2."
));
EXPECT_TRUE
(
HasPrefix
(
StringPiece
(
error
.
what
()),
"Enforce failed. Expected 1 > 2, but received 1:1 <= 2
:2."
));
}
EXPECT_TRUE
(
caught_exception
);
}
TEST
(
ENFORCE_LT
,
OK
)
{
PADDLE_ENFORCE_LT
(
3
,
10
);
PADDLE_ENFORCE_LT
(
2
,
3UL
);
PADDLE_ENFORCE_LT
(
2
UL
,
3
);
PADDLE_ENFORCE_LT
(
2
UL
,
3UL
);
PADDLE_ENFORCE_LT
(
2
,
3
);
}
TEST
(
ENFORCE_LT
,
FAIL
)
{
bool
caught_exception
=
false
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录