Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
4ba977c7
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4ba977c7
编写于
10月 14, 2020
作者:
C
Chen Weihang
提交者:
GitHub
10月 14, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish some error message in opeators (#27876)
* polish some error message * add white list * revert shell script change
上级
8e70b18e
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
53 addition
and
32 deletion
+53
-32
paddle/fluid/operators/detail/strided_memcpy.h
paddle/fluid/operators/detail/strided_memcpy.h
+4
-2
paddle/fluid/operators/distributed/parameter_recv.cc
paddle/fluid/operators/distributed/parameter_recv.cc
+4
-2
paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc
...luid/operators/fused/fused_fc_elementwise_layernorm_op.cc
+8
-4
paddle/fluid/operators/interpolate_v2_op.cc
paddle/fluid/operators/interpolate_v2_op.cc
+18
-15
paddle/fluid/operators/metrics/accuracy_op_xpu.cc
paddle/fluid/operators/metrics/accuracy_op_xpu.cc
+1
-1
paddle/fluid/operators/optimizers/adadelta_op.cc
paddle/fluid/operators/optimizers/adadelta_op.cc
+2
-1
paddle/fluid/operators/optimizers/dpsgd_op.h
paddle/fluid/operators/optimizers/dpsgd_op.h
+8
-2
paddle/fluid/operators/scale_op_xpu.cc
paddle/fluid/operators/scale_op_xpu.cc
+1
-1
paddle/fluid/operators/sign_op_xpu.cc
paddle/fluid/operators/sign_op_xpu.cc
+1
-1
paddle/fluid/operators/sum_op_xpu.cc
paddle/fluid/operators/sum_op_xpu.cc
+1
-1
paddle/fluid/operators/top_k_op.cc
paddle/fluid/operators/top_k_op.cc
+3
-1
paddle/fluid/operators/top_k_op.cu
paddle/fluid/operators/top_k_op.cu
+2
-1
未找到文件。
paddle/fluid/operators/detail/strided_memcpy.h
浏览文件 @
4ba977c7
...
...
@@ -41,7 +41,8 @@ struct StridedMemcpyFunctor<T, 0> {
memory
::
Copy
(
gpu_place
,
dst
,
gpu_place
,
src
,
sizeof
(
T
),
cuda_ctx
.
stream
());
#else
PADDLE_THROW
(
"Paddle is not compiled with GPU"
);
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Paddle is not compiled with GPU."
));
#endif
}
}
...
...
@@ -64,7 +65,8 @@ struct StridedMemcpyFunctor<T, 1> {
memory
::
Copy
(
gpu_place
,
dst
,
gpu_place
,
src
,
sizeof
(
T
)
*
dst_dim
[
0
],
cuda_ctx
.
stream
());
#else
PADDLE_THROW
(
"Paddle is not compiled with GPU"
);
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Paddle is not compiled with GPU."
));
#endif
}
}
...
...
paddle/fluid/operators/distributed/parameter_recv.cc
浏览文件 @
4ba977c7
...
...
@@ -86,8 +86,10 @@ void RecvSparseLodTensor(const CommContext &rpc_ctx,
height
+=
splited_var
->
Get
<
framework
::
LoDTensor
>
().
dims
()[
0
];
}
PADDLE_ENFORCE_EQ
(
merged_var
->
Get
<
framework
::
LoDTensor
>
().
dims
()[
0
],
height
,
"recved var must has same dims with local var"
);
PADDLE_ENFORCE_EQ
(
merged_var
->
Get
<
framework
::
LoDTensor
>
().
dims
()[
0
],
height
,
platform
::
errors
::
InvalidArgument
(
"Received variable must has same dimension with local variable."
));
auto
*
merged_t
=
merged_var
->
GetMutable
<
framework
::
LoDTensor
>
();
auto
*
merged_d
=
merged_t
->
mutable_data
<
float
>
(
cpu_place
);
...
...
paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc
浏览文件 @
4ba977c7
...
...
@@ -218,9 +218,11 @@ class FusedFCElementwiseLayerNormOpMaker
.
SetDefault
(
1e-5
)
.
AddCustomChecker
([](
const
float
&
epsilon
)
{
PADDLE_ENFORCE_GE
(
epsilon
,
0.0
f
,
"'epsilon' should be between 0.0 and 0.001."
);
platform
::
errors
::
InvalidArgument
(
"'epsilon' should be between 0.0 and 0.001."
));
PADDLE_ENFORCE_LE
(
epsilon
,
0.001
f
,
"'epsilon' should be between 0.0 and 0.001."
);
platform
::
errors
::
InvalidArgument
(
"'epsilon' should be between 0.0 and 0.001."
));
});
AddAttr
<
int
>
(
"begin_norm_axis"
,
"the axis of `begin_norm_axis ... Rank(Y) - 1` will be "
...
...
@@ -228,8 +230,10 @@ class FusedFCElementwiseLayerNormOpMaker
"matrix [N,H]. [default 1]."
)
.
SetDefault
(
1
)
.
AddCustomChecker
([](
const
int
&
begin_norm_axis
)
{
PADDLE_ENFORCE_GT
(
begin_norm_axis
,
0
,
"'begin_norm_axis' should be greater than zero."
);
PADDLE_ENFORCE_GT
(
begin_norm_axis
,
0
,
platform
::
errors
::
InvalidArgument
(
"'begin_norm_axis' should be greater than zero."
));
});
AddComment
(
R"DOC(
fc_out <= fc(X, W, Bias0)
...
...
paddle/fluid/operators/interpolate_v2_op.cc
浏览文件 @
4ba977c7
...
...
@@ -118,9 +118,10 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) {
PADDLE_ENFORCE
(
"bilinear"
==
interp_method
||
"nearest"
==
interp_method
||
"bicubic"
==
interp_method
,
"Interpolation method can only be
\"
bilinear
\"
or
\"
nearest
\"
when "
"Input(X) dimension is 4, but got method = %s ."
,
interp_method
);
platform
::
errors
::
InvalidArgument
(
"Interpolation method can only be
\"
bilinear
\"
or
\"
nearest
\"
when "
"Input(X) dimension is 4, but got method = %s."
,
interp_method
));
const
DataLayout
data_layout
=
framework
::
StringToDataLayout
(
ctx
->
Attrs
().
Get
<
std
::
string
>
(
"data_layout"
));
...
...
@@ -305,12 +306,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) {
if
(
ctx
->
HasInput
(
"OutSize"
)
&&
ctx
->
IsRuntime
())
{
auto
out_size_dim
=
ctx
->
GetInputDim
(
"OutSize"
);
PADDLE_ENFORCE_EQ
(
out_size_dim
.
size
(),
1
,
"OutSize's dimension size must be 1, but got size =%d ."
,
out_size_dim
.
size
());
PADDLE_ENFORCE_EQ
(
out_size_dim
.
size
(),
1
,
platform
::
errors
::
InvalidArgument
(
"OutSize's dimension size must be 1, but got size is %d."
,
out_size_dim
.
size
()));
PADDLE_ENFORCE_EQ
(
out_size_dim
[
0
],
3
,
"OutSize's dim[0] must be 3, but got size = %d ."
,
out_size_dim
[
0
]);
platform
::
errors
::
InvalidArgument
(
"OutSize's dim[0] must be 3, but got size is %d."
,
out_size_dim
[
0
]));
ctx
->
ShareLoD
(
"X"
,
"Out"
);
return
;
}
...
...
@@ -330,10 +334,8 @@ class InterpolateV2Op : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of InterpolateV2Op should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of InterpolationOp should not be null."
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"Interpolate"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"Interpolate"
);
auto
dim_x
=
ctx
->
GetInputDim
(
"X"
);
// NCHW format
PADDLE_ENFORCE
(
...
...
@@ -576,9 +578,10 @@ class InterpolateV2OpGrad : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input(Out@GRAD) should not be null"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"InterpolateGrad"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input"
,
"Out@GRAD"
,
"InterpolateGrad"
);
auto
dim_x
=
ctx
->
GetInputDim
(
"X"
);
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
dim_x
);
...
...
paddle/fluid/operators/metrics/accuracy_op_xpu.cc
浏览文件 @
4ba977c7
...
...
@@ -98,7 +98,7 @@ class AccuracyXPUKernel : public framework::OpKernel<T> {
label_int32_device
,
num_samples
,
class_dim
,
correct_data
,
total_data
,
accuracy_data
);
PADDLE_ENFORCE_EQ
(
r
,
xpu
::
Error_t
::
SUCCESS
,
platform
::
errors
::
Fatal
(
"XPU kernel error!"
));
platform
::
errors
::
Fatal
(
"XPU
accuracy
kernel error!"
));
dev_ctx
.
Wait
();
xpu_free
(
indices_int32_device
);
xpu_free
(
label_int32_device
);
...
...
paddle/fluid/operators/optimizers/adadelta_op.cc
浏览文件 @
4ba977c7
...
...
@@ -71,7 +71,8 @@ class AdadeltaOp : public framework::OperatorWithKernel {
auto
param_dim
=
ctx
->
GetInputDim
(
"Param"
);
PADDLE_ENFORCE_EQ
(
param_dim
,
ctx
->
GetInputDim
(
"Grad"
),
"param and grad input of AdadeltaOp should have same dimension"
);
platform
::
errors
::
InvalidArgument
(
"Param and grad input of AdadeltaOp should have same dimension."
));
PADDLE_ENFORCE_NE
(
framework
::
product
(
ctx
->
GetInputDim
(
"AvgSquaredGrad"
)),
0
,
platform
::
errors
::
InvalidArgument
(
...
...
paddle/fluid/operators/optimizers/dpsgd_op.h
浏览文件 @
4ba977c7
...
...
@@ -50,8 +50,14 @@ class DpsgdOpKernel : public framework::OpKernel<T> {
auto
*
param_out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"ParamOut"
);
auto
sz
=
param_out
->
numel
();
PADDLE_ENFORCE_EQ
(
param
->
numel
(),
sz
);
PADDLE_ENFORCE_EQ
(
grad
->
numel
(),
sz
);
PADDLE_ENFORCE_EQ
(
param
->
numel
(),
sz
,
platform
::
errors
::
InvalidArgument
(
"Input parameter's number of elements is error, "
"expected %zu, but received %zu."
));
PADDLE_ENFORCE_EQ
(
grad
->
numel
(),
sz
,
platform
::
errors
::
InvalidArgument
(
"Input gradient's number of elements is error, "
"expected %zu, but received %zu."
));
const
T
*
lr
=
learning_rate
->
data
<
T
>
();
const
T
*
param_data
=
param
->
data
<
T
>
();
...
...
paddle/fluid/operators/scale_op_xpu.cc
浏览文件 @
4ba977c7
...
...
@@ -49,7 +49,7 @@ class ScaleXPUKernel : public framework::OpKernel<T> {
int
r
=
xpu
::
scale
(
dev_ctx
.
x_context
(),
in
->
numel
(),
scale
,
bias
,
bias_after_scale
,
in
->
data
<
float
>
(),
out
->
data
<
float
>
());
PADDLE_ENFORCE_EQ
(
r
,
xpu
::
Error_t
::
SUCCESS
,
platform
::
errors
::
Fatal
(
"XPU kernel error!"
));
platform
::
errors
::
Fatal
(
"XPU
scale
kernel error!"
));
}
};
...
...
paddle/fluid/operators/sign_op_xpu.cc
浏览文件 @
4ba977c7
...
...
@@ -30,7 +30,7 @@ class SignXPUKernel : public framework::OpKernel<T> {
int
r
=
xpu
::
activation_forward
(
xpu_context
,
xpu
::
Activation_t
::
SIGN
,
in
->
numel
(),
in
->
data
<
T
>
(),
out
->
data
<
T
>
());
PADDLE_ENFORCE_EQ
(
r
,
xpu
::
Error_t
::
SUCCESS
,
platform
::
errors
::
Fatal
(
"XPU kernel error!"
));
platform
::
errors
::
Fatal
(
"XPU
sign
kernel error!"
));
}
};
...
...
paddle/fluid/operators/sum_op_xpu.cc
浏览文件 @
4ba977c7
...
...
@@ -51,7 +51,7 @@ class SumXPUKernel : public framework::OpKernel<T> {
int
r
=
xpu
::
sum_batch
(
dev_ctx
.
x_context
(),
ptrs
.
data
(),
out
->
data
<
T
>
(),
valid_count
,
out
->
numel
());
PADDLE_ENFORCE_EQ
(
r
,
xpu
::
Error_t
::
SUCCESS
,
platform
::
errors
::
Fatal
(
"XPU kernel error!"
));
platform
::
errors
::
Fatal
(
"XPU
sum
kernel error!"
));
}
};
...
...
paddle/fluid/operators/top_k_op.cc
浏览文件 @
4ba977c7
...
...
@@ -36,7 +36,9 @@ class TopkOp : public framework::OperatorWithKernel {
auto
input_dims
=
ctx
->
GetInputDim
(
"X"
);
const
int
k
=
static_cast
<
int
>
(
ctx
->
Attrs
().
Get
<
int
>
(
"k"
));
PADDLE_ENFORCE_GE
(
k
,
1
,
"k must >= 1"
);
PADDLE_ENFORCE_GE
(
k
,
1
,
platform
::
errors
::
InvalidArgument
(
"Attribute k must be >= 1, but got k is %d."
,
k
));
PADDLE_ENFORCE_GE
(
input_dims
.
size
(),
1
,
platform
::
errors
::
InvalidArgument
(
"input must have >= 1d shape"
));
...
...
paddle/fluid/operators/top_k_op.cu
浏览文件 @
4ba977c7
...
...
@@ -96,7 +96,8 @@ class TopkOpCUDAKernel : public framework::OpKernel<T> {
output_data
,
k
,
indices_data
,
input_data
,
input_width
,
input_width
,
static_cast
<
int
>
(
k
),
gridx
,
input_height
));
default:
PADDLE_THROW
(
"Error"
);
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Calculation error occurred in TopK Operator's CUDA Kernel."
));
}
}
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录