Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
0e9cc551
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0e9cc551
编写于
9月 07, 2023
作者:
C
cyberslack_lee
提交者:
GitHub
9月 07, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[clang-tidy] NO.20 clang-analyzer-core.CallAndMessage (#56954)
上级
25f78de0
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
70 addition
and
61 deletion
+70
-61
.clang-tidy
.clang-tidy
+1
-1
paddle/fluid/eager/grad_node_info.cc
paddle/fluid/eager/grad_node_info.cc
+1
-1
paddle/fluid/framework/ir/delete_cast_op_pass.cc
paddle/fluid/framework/ir/delete_cast_op_pass.cc
+1
-1
paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc
paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc
+1
-1
paddle/fluid/framework/ir/fuse_adamw_op_pass.cc
paddle/fluid/framework/ir/fuse_adamw_op_pass.cc
+2
-1
paddle/fluid/framework/ir/graph_pattern_detector.cc
paddle/fluid/framework/ir/graph_pattern_detector.cc
+6
-4
paddle/fluid/framework/ir/transfer_layout_elim_pass.cc
paddle/fluid/framework/ir/transfer_layout_elim_pass.cc
+1
-1
paddle/fluid/framework/new_executor/program_interpreter.cc
paddle/fluid/framework/new_executor/program_interpreter.cc
+3
-2
paddle/fluid/framework/op_desc.cc
paddle/fluid/framework/op_desc.cc
+1
-1
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+1
-1
paddle/fluid/operators/gru_op.cc
paddle/fluid/operators/gru_op.cc
+8
-7
paddle/fluid/operators/gru_op.cu.cc
paddle/fluid/operators/gru_op.cu.cc
+1
-1
paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc
paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc
+1
-1
paddle/fluid/operators/sequence_ops/sequence_softmax_cudnn_op.cu.cc
...id/operators/sequence_ops/sequence_softmax_cudnn_op.cu.cc
+1
-1
paddle/phi/kernels/cpu/rnn_functor.h
paddle/phi/kernels/cpu/rnn_functor.h
+1
-1
paddle/phi/kernels/cpu/rnn_grad_kernel.cc
paddle/phi/kernels/cpu/rnn_grad_kernel.cc
+4
-4
paddle/phi/kernels/cpu/send_u_recv_grad_kernel.cc
paddle/phi/kernels/cpu/send_u_recv_grad_kernel.cc
+2
-1
paddle/phi/kernels/funcs/activation_functor.h
paddle/phi/kernels/funcs/activation_functor.h
+29
-26
paddle/phi/kernels/funcs/gpc.cc
paddle/phi/kernels/funcs/gpc.cc
+2
-2
paddle/phi/kernels/gpu/rnn_functor.h
paddle/phi/kernels/gpu/rnn_functor.h
+1
-1
paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc
paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc
+2
-2
未找到文件。
.clang-tidy
浏览文件 @
0e9cc551
...
...
@@ -50,7 +50,7 @@ bugprone-use-after-move,
-clang-analyzer-apiModeling.google.GTest,
-clang-analyzer-apiModeling.llvm.CastValue,
-clang-analyzer-apiModeling.llvm.ReturnValue,
-
clang-analyzer-core.CallAndMessage,
clang-analyzer-core.CallAndMessage,
-clang-analyzer-core.DivideZero,
-clang-analyzer-core.DynamicTypePropagation,
clang-analyzer-core.NonNullParamChecker,
...
...
paddle/fluid/eager/grad_node_info.cc
浏览文件 @
0e9cc551
...
...
@@ -126,7 +126,7 @@ void GradNodeBase::SetGradInMeta(const paddle::Tensor& fwd_out,
}
else
if
(
phi
::
distributed
::
DistTensor
::
classof
(
fwd_out
.
impl
().
get
()))
{
// TODO(chenweihang): DistTensor contains global and local meta, here
// only set the local meta now, we should set global meta later
dense_tensor
=
dense_tensor
=
// NOLINT
&
(
static_cast
<
phi
::
distributed
::
DistTensor
*>
(
fwd_out
.
impl
().
get
())
->
value
());
}
else
{
...
...
paddle/fluid/framework/ir/delete_cast_op_pass.cc
浏览文件 @
0e9cc551
...
...
@@ -180,7 +180,7 @@ int DeleteCastOpPass::ApplyCastWriteReadPass(ir::Graph* graph) const {
std
::
string
cast_out_name
=
write_to_array_0_x_name
+
"_fp16"
;
VarDesc
cast_out_desc
(
cast_out_name
);
cast_out_desc
.
SetShape
(
write_to_array_0_x
->
Var
()
->
GetShape
());
cast_out_desc
.
SetShape
(
write_to_array_0_x
->
Var
()
->
GetShape
());
// NOLINT
cast_out_desc
.
SetDataType
(
proto
::
VarType
::
Type
::
VarType_Type_FP16
);
auto
*
cast_out
=
graph0
->
CreateVarNode
(
&
cast_out_desc
);
...
...
paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc
浏览文件 @
0e9cc551
...
...
@@ -121,7 +121,7 @@ static int BuildFusion(Graph* graph,
if
(
with_fc_bias
)
{
// Add FC-bias with LSTM-bias (into GEMM result to be)
auto
*
fc_bias_var
=
scope
->
FindVar
(
fc_bias
->
Name
());
auto
*
fc_bias_var
=
scope
->
FindVar
(
fc_bias
->
Name
());
// NOLINT
const
auto
&
fc_bias_tensor
=
fc_bias_var
->
Get
<
phi
::
DenseTensor
>
();
for
(
int
i
=
0
;
i
<
fc_bias_tensor
.
numel
();
i
++
)
{
combined_biases
[
i
]
+=
fc_bias_tensor
.
data
<
float
>
()[
i
];
...
...
paddle/fluid/framework/ir/fuse_adamw_op_pass.cc
浏览文件 @
0e9cc551
...
...
@@ -102,7 +102,8 @@ void InsertOpToGraph(const std::vector<std::vector<Node *>> &inout_node_vectors,
i
++
;
}
fuse_adamw_op_desc
.
SetInput
(
"LearningRate"
,
{
config
.
first_lr
->
Name
()});
fuse_adamw_op_desc
.
SetInput
(
"LearningRate"
,
{
config
.
first_lr
->
Name
()});
// NOLINT
if
(
config
.
use_skip_update
)
{
fuse_adamw_op_desc
.
SetInput
(
"SkipUpdate"
,
{
config
.
first_skip_update
->
Name
()});
...
...
paddle/fluid/framework/ir/graph_pattern_detector.cc
浏览文件 @
0e9cc551
...
...
@@ -72,10 +72,12 @@ PDNode *PDPattern::RetrieveNode(const std::string &id) const {
}
void
PDPattern
::
AddEdge
(
PDNode
*
a
,
PDNode
*
b
)
{
PADDLE_ENFORCE_NOT_NULL
(
a
,
platform
::
errors
::
NotFound
(
"PDNode %s is not found."
,
a
->
name
()));
PADDLE_ENFORCE_NOT_NULL
(
b
,
platform
::
errors
::
NotFound
(
"PDNode %s is not found."
,
b
->
name
()));
PADDLE_ENFORCE_NOT_NULL
(
a
,
platform
::
errors
::
NotFound
(
"PDNode %s is not found."
,
a
->
name
()));
// NOLINT
PADDLE_ENFORCE_NOT_NULL
(
b
,
platform
::
errors
::
NotFound
(
"PDNode %s is not found."
,
b
->
name
()));
// NOLINT
PADDLE_ENFORCE_NE
(
a
,
b
,
platform
::
errors
::
PermissionDenied
(
...
...
paddle/fluid/framework/ir/transfer_layout_elim_pass.cc
浏览文件 @
0e9cc551
...
...
@@ -49,7 +49,7 @@ void TransferLayoutElimPass::PutTranferlayoutAfterOp(
std
::
unordered_set
<
const
Node
*>
remove_nodes
;
// Ensure op_node has only one output!
int
op_node_useful_output
=
0
;
Node
*
var2
;
Node
*
var2
=
nullptr
;
for
(
auto
ele
:
op_node
->
outputs
)
{
if
(
!
ele
->
outputs
.
empty
())
{
op_node_useful_output
++
;
...
...
paddle/fluid/framework/new_executor/program_interpreter.cc
浏览文件 @
0e9cc551
...
...
@@ -909,7 +909,8 @@ void ProgramInterpreter::RunOperator(const Instruction& instr_node) {
}
}
VLOG
(
4
)
<<
"End run "
<<
place
<<
" "
<<
op
->
DebugStringEx
(
local_scope
);
VLOG
(
4
)
<<
"End run "
<<
place
<<
" "
<<
op
->
DebugStringEx
(
local_scope
);
// NOLINT
if
(
!
instr_node
.
InplaceBackMap
().
empty
())
{
platform
::
RecordEvent
inplaceback_event
(
...
...
@@ -933,7 +934,7 @@ void ProgramInterpreter::RunOperator(const Instruction& instr_node) {
instr_node
.
DeviceContext
().
Wait
();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
GpuGetLastError
());
VLOG
(
4
)
<<
"Operator("
<<
op
->
Type
()
VLOG
(
4
)
<<
"Operator("
<<
op
->
Type
()
// NOLINT
<<
"): context wait and get last error"
;
#endif
}
...
...
paddle/fluid/framework/op_desc.cc
浏览文件 @
0e9cc551
...
...
@@ -1200,7 +1200,7 @@ VarDesc *OpDesc::FindVarRecursive(const std::string &name) {
PADDLE_THROW
(
platform
::
errors
::
NotFound
(
"Not found Var(%s) from Block(%d) back into global Block."
,
name
,
block_
->
ID
()));
block_
->
ID
()));
// NOLINT
}
CompileTimeInferShapeContext
::
CompileTimeInferShapeContext
(
...
...
paddle/fluid/imperative/layer.cc
浏览文件 @
0e9cc551
...
...
@@ -387,7 +387,7 @@ void VarBase::CopyFrom(const VarBase& src, const bool blocking) {
src
.
Name
()));
place
=
Place
();
}
else
{
dst_tensor
->
set_lod
(
src_tensor
.
lod
());
dst_tensor
->
set_lod
(
src_tensor
.
lod
());
// NOLINT
dst_tensor
->
Resize
(
src_tensor
.
dims
());
}
framework
::
TensorCopy
(
src_tensor
,
place
,
dst_tensor
);
...
...
paddle/fluid/operators/gru_op.cc
浏览文件 @
0e9cc551
...
...
@@ -513,13 +513,14 @@ class GRUCPUKernel : public framework::OpKernel<T> {
gru_value
.
gate_value
=
gate_t
.
data
<
T
>
();
gru_value
.
reset_output_value
=
reset_hidden_prev_t
.
data
<
T
>
();
phi
::
funcs
::
GRUUnitFunctor
<
DeviceContext
,
T
>::
compute
(
dev_ctx
,
gru_value
,
frame_size
,
cur_batch_size
,
active_node
,
active_gate
,
origin_mode
);
phi
::
funcs
::
GRUUnitFunctor
<
DeviceContext
,
T
>::
compute
(
dev_ctx
,
// NOLINT
gru_value
,
frame_size
,
cur_batch_size
,
active_node
,
active_gate
,
origin_mode
);
gru_value
.
prev_out_value
=
gru_value
.
output_value
;
}
...
...
paddle/fluid/operators/gru_op.cu.cc
浏览文件 @
0e9cc551
...
...
@@ -109,7 +109,7 @@ class GRUKernel : public framework::OpKernel<T> {
gru_value
.
output_value
=
hidden_t
.
data
<
T
>
();
gru_value
.
gate_value
=
gate_t
.
data
<
T
>
();
gru_value
.
reset_output_value
=
reset_hidden_prev_t
.
data
<
T
>
();
phi
::
funcs
::
GRUUnitFunctor
<
DeviceContext
,
T
>::
compute
(
dev_ctx
,
phi
::
funcs
::
GRUUnitFunctor
<
DeviceContext
,
T
>::
compute
(
dev_ctx
,
// NOLINT
gru_value
,
frame_size
,
cur_batch_size
,
...
...
paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc
浏览文件 @
0e9cc551
...
...
@@ -600,7 +600,7 @@ class MatMulGradMKLDNNKernel : public paddle::framework::OpKernel<T> {
phi
::
DenseTensor
*
out
)
const
{
// gradient is calculated in a different way when broadcasting is used
bool
need_combine
=
(
x
->
dims
().
size
()
==
3
||
y
->
dims
().
size
()
==
3
)
&&
out
->
dims
().
size
()
==
2
;
out
->
dims
().
size
()
==
2
;
// NOLINT
phi
::
DenseTensor
x_combined
,
y_combined
;
if
(
need_combine
)
{
...
...
paddle/fluid/operators/sequence_ops/sequence_softmax_cudnn_op.cu.cc
浏览文件 @
0e9cc551
...
...
@@ -81,7 +81,7 @@ class SequenceSoftmaxGradCUDNNKernel : public framework::OpKernel<T> {
auto
&
lod
=
x
->
lod
();
const
size_t
level
=
lod
.
size
()
-
1
;
x_grad
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
x_grad
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
// NOLINT
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
lod
[
level
].
size
())
-
1
;
++
i
)
{
int
start_pos
=
static_cast
<
int
>
(
lod
[
level
][
i
]);
int
end_pos
=
static_cast
<
int
>
(
lod
[
level
][
i
+
1
]);
...
...
paddle/phi/kernels/cpu/rnn_functor.h
浏览文件 @
0e9cc551
...
...
@@ -295,7 +295,7 @@ void RnnFunc(const Context& dev_ctx,
num_layers
,
init_h_dims
[
0
]));
if
(
is_lstm
(
cell_type
))
{
const
auto
&
init_c_dims
=
init_c
->
dims
();
const
auto
&
init_c_dims
=
init_c
->
dims
();
// NOLINT
PADDLE_ENFORCE_EQ
(
init_c_dims
[
0
],
num_layers
*
direction_num
,
phi
::
errors
::
InvalidArgument
(
...
...
paddle/phi/kernels/cpu/rnn_grad_kernel.cc
浏览文件 @
0e9cc551
...
...
@@ -32,7 +32,7 @@ template <typename T>
void
BackupTensor
(
const
CPUContext
&
dev_ctx
,
DenseTensor
*
dst
,
DenseTensor
*
src
)
{
dst
->
Resize
(
src
->
dims
());
dst
->
Resize
(
src
->
dims
());
// NOLINT
dev_ctx
.
Alloc
<
T
>
(
dst
);
Copy
(
dev_ctx
,
*
src
,
dev_ctx
.
GetPlace
(),
false
,
dst
);
}
...
...
@@ -250,7 +250,7 @@ struct GRUGradCell : GradCell<T> {
gru_value
.
gate_weight
=
weight_hh
->
data
<
T
>
();
gru_grad
.
gate_grad
=
grad_gate
->
data
<
T
>
();
gru_grad
.
reset_output_grad
=
grad_state
->
data
<
T
>
();
gru_grad
.
reset_output_grad
=
grad_state
->
data
<
T
>
();
// NOLINT
gru_grad
.
prev_out_grad
=
grad_pre_hidden
->
data
<
T
>
();
gru_grad
.
output_grad
=
grad_hidden
->
data
<
T
>
();
gru_grad
.
gate_weight_grad
=
grad_weight_hh
->
data
<
T
>
();
...
...
@@ -314,9 +314,9 @@ struct LSTMGradCell : GradCell<T> {
lstm_value
.
gate_value
=
gate_tensor
->
data
<
T
>
();
lstm_value
.
state_value
=
state_tensor
->
data
<
T
>
();
lstm_value
.
state_active_value
=
act_state_tensor
->
data
<
T
>
();
lstm_value
.
prev_state_value
=
pre_state
->
data
<
T
>
();
lstm_value
.
prev_state_value
=
pre_state
->
data
<
T
>
();
// NOLINT
lstm_grad
.
state_grad
=
grad_state
->
data
<
T
>
();
lstm_grad
.
state_grad
=
grad_state
->
data
<
T
>
();
// NOLINT
lstm_grad
.
gate_grad
=
grad_gate
->
data
<
T
>
();
lstm_grad
.
output_grad
=
grad_hidden
->
data
<
T
>
();
lstm_grad
.
prev_state_grad
=
grad_pre_state
->
data
<
T
>
();
...
...
paddle/phi/kernels/cpu/send_u_recv_grad_kernel.cc
浏览文件 @
0e9cc551
...
...
@@ -55,7 +55,8 @@ void GraphSendRecvCpuGradLoop(const int& index_size,
const
IndexT
&
forward_src_idx
=
d_index
[
i
];
const
IndexT
&
forward_dst_idx
=
s_index
[
i
];
auto
input_slice
=
input
.
Slice
(
forward_src_idx
,
forward_src_idx
+
1
);
auto
output_slice
=
output
->
Slice
(
forward_dst_idx
,
forward_dst_idx
+
1
);
auto
output_slice
=
output
->
Slice
(
forward_dst_idx
,
forward_dst_idx
+
1
);
// NOLINT
auto
eigen_input
=
phi
::
EigenVector
<
T
>::
Flatten
(
input_slice
);
auto
eigen_output
=
phi
::
EigenVector
<
T
>::
Flatten
(
output_slice
);
...
...
paddle/phi/kernels/funcs/activation_functor.h
浏览文件 @
0e9cc551
...
...
@@ -528,7 +528,7 @@ struct MishFunctor : public BaseActivationFunctor<T> {
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
auto
sp
=
(
x
>
static_cast
<
T
>
(
threshold
))
auto
sp
=
(
x
>
static_cast
<
T
>
(
threshold
))
// NOLINT
.
select
(
x
,
(
static_cast
<
T
>
(
1
)
+
x
.
exp
()).
log
());
out
.
device
(
d
)
=
x
*
sp
.
tanh
();
}
...
...
@@ -551,7 +551,7 @@ struct MishGradFunctor : public BaseActivationFunctor<T> {
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
UNUSED
,
dOut
dout
,
dX
dx
)
const
{
auto
sp
=
(
x
>
static_cast
<
T
>
(
threshold
))
auto
sp
=
(
x
>
static_cast
<
T
>
(
threshold
))
// NOLINT
.
select
(
x
,
(
static_cast
<
T
>
(
1
)
+
x
.
exp
()).
log
());
auto
gsp
=
static_cast
<
T
>
(
1
)
-
(
-
sp
).
exp
();
auto
tsp
=
sp
.
tanh
();
...
...
@@ -571,8 +571,8 @@ struct STanhFunctor : public BaseActivationFunctor<T> {
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
out
.
device
(
d
)
=
static_cast
<
T
>
(
scale_b
)
*
(
static_cast
<
T
>
(
scale_a
)
*
x
).
tanh
();
out
.
device
(
d
)
=
static_cast
<
T
>
(
scale_b
)
*
(
static_cast
<
T
>
(
scale_a
)
*
x
).
tanh
();
// NOLINT
}
};
...
...
@@ -738,7 +738,7 @@ struct SoftplusFunctor : public BaseActivationFunctor<T> {
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
auto
x_beta
=
static_cast
<
T
>
(
beta
)
*
x
;
auto
x_beta
=
static_cast
<
T
>
(
beta
)
*
x
;
// NOLINT
out
.
device
(
d
)
=
(
x_beta
>
static_cast
<
T
>
(
threshold
))
.
select
(
x
,
(
static_cast
<
T
>
(
1
)
+
x_beta
.
exp
()).
log
()
/
...
...
@@ -764,7 +764,7 @@ struct SoftplusGradFunctor : public BaseActivationFunctor<T> {
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
UNUSED
,
dOut
dout
,
dX
dx
)
const
{
auto
x_beta
=
static_cast
<
T
>
(
beta
)
*
x
;
auto
x_beta
=
static_cast
<
T
>
(
beta
)
*
x
;
// NOLINT
dx
.
device
(
d
)
=
(
x_beta
>
static_cast
<
T
>
(
threshold
))
.
select
(
dout
,
dout
/
(
static_cast
<
T
>
(
1
)
+
(
-
x_beta
).
exp
()));
...
...
@@ -790,7 +790,7 @@ struct SoftplusDoubleGradFunctor : public BaseActivationFunctor<T> {
auto
*
d
=
dev
.
eigen_device
();
auto
x
=
EigenVector
<
T
>::
Flatten
(
GET_DATA_SAFELY
(
X
,
"Input"
,
"X"
,
"SoftplusDoubleGrad"
));
auto
x_beta
=
static_cast
<
T
>
(
beta
)
*
x
;
auto
x_beta
=
static_cast
<
T
>
(
beta
)
*
x
;
// NOLINT
auto
ddx
=
EigenVector
<
T
>::
Flatten
(
GET_DATA_SAFELY
(
ddX
,
"Input"
,
"DDX"
,
"SoftplusDoubleGrad"
));
...
...
@@ -1453,8 +1453,8 @@ struct HardTanhFunctor : public BaseActivationFunctor<T> {
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
out
.
device
(
d
)
=
x
.
cwiseMax
(
static_cast
<
T
>
(
t_min
)).
cwiseMin
(
static_cast
<
T
>
(
t_max
));
out
.
device
(
d
)
=
x
.
cwiseMax
(
static_cast
<
T
>
(
t_min
))
.
cwiseMin
(
static_cast
<
T
>
(
t_max
));
// NOLINT
}
};
...
...
@@ -1471,9 +1471,9 @@ struct HardTanhGradFunctor : public BaseActivationFunctor<T> {
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
UNUSED
,
dOut
dout
,
dX
dx
)
const
{
dx
.
device
(
d
)
=
dout
*
((
x
>
static_cast
<
T
>
(
t_min
))
*
(
x
<
static_cast
<
T
>
(
t_max
)))
.
template
cast
<
T
>();
dx
.
device
(
d
)
=
dout
*
((
x
>
static_cast
<
T
>
(
t_min
))
*
(
x
<
static_cast
<
T
>
(
t_max
)))
// NOLINT
.
template
cast
<
T
>();
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
ActBwdOpFwdDeps
::
kDepX
;
}
...
...
@@ -1508,8 +1508,8 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor<T> {
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
UNUSED
,
dOut
dout
,
dX
dx
)
const
{
auto
temp1
=
static_cast
<
T
>
(
alpha
)
*
(
x
<
static_cast
<
T
>
(
0
)).
template
cast
<
T
>();
auto
temp1
=
static_cast
<
T
>
(
alpha
)
*
(
x
<
static_cast
<
T
>
(
0
)).
template
cast
<
T
>();
// NOLINT
auto
temp2
=
(
x
>=
static_cast
<
T
>
(
0
)).
template
cast
<
T
>();
dx
.
device
(
d
)
=
dout
*
(
temp1
+
temp2
).
template
cast
<
T
>();
}
...
...
@@ -1593,8 +1593,8 @@ struct Relu6Functor : public BaseActivationFunctor<T> {
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
out
.
device
(
d
)
=
x
.
cwiseMax
(
static_cast
<
T
>
(
0
)).
cwiseMin
(
static_cast
<
T
>
(
threshold
));
out
.
device
(
d
)
=
x
.
cwiseMax
(
static_cast
<
T
>
(
0
))
.
cwiseMin
(
static_cast
<
T
>
(
threshold
));
// NOLINT
}
};
...
...
@@ -1731,7 +1731,8 @@ struct ELUFunctor : public BaseActivationFunctor<T> {
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
out
.
device
(
d
)
=
(
x
<
static_cast
<
T
>
(
0
))
.
select
(
static_cast
<
T
>
(
alpha
)
*
(
x
.
exp
()
-
static_cast
<
T
>
(
1
)),
x
);
.
select
(
static_cast
<
T
>
(
alpha
)
*
(
x
.
exp
()
-
static_cast
<
T
>
(
1
)),
x
);
// NOLINT
}
};
...
...
@@ -2099,7 +2100,7 @@ struct HardSigmoidFunctor : public BaseActivationFunctor<T> {
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
auto
temp
=
x
*
static_cast
<
T
>
(
slope
)
+
static_cast
<
T
>
(
offset
);
auto
temp
=
x
*
static_cast
<
T
>
(
slope
)
+
static_cast
<
T
>
(
offset
);
// NOLINT
out
.
device
(
d
)
=
temp
.
cwiseMax
(
static_cast
<
T
>
(
0
)).
cwiseMin
(
static_cast
<
T
>
(
1
));
}
...
...
@@ -2118,7 +2119,7 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor<T> {
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
UNUSED
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
dx
.
device
(
d
)
=
dout
*
dx
.
device
(
d
)
=
dout
*
// NOLINT
((
out
>
static_cast
<
T
>
(
0
))
*
(
out
<
static_cast
<
T
>
(
1
)))
.
template
cast
<
T
>()
*
static_cast
<
T
>
(
slope
);
...
...
@@ -2353,7 +2354,7 @@ struct HardSwishFunctor : public BaseActivationFunctor<T> {
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
out
.
device
(
d
)
=
(
x
+
static_cast
<
T
>
(
offset
))
out
.
device
(
d
)
=
(
x
+
static_cast
<
T
>
(
offset
))
// NOLINT
.
cwiseMax
(
static_cast
<
T
>
(
0
))
.
cwiseMin
(
static_cast
<
T
>
(
threshold
))
*
x
/
static_cast
<
T
>
(
scale
);
...
...
@@ -2375,8 +2376,9 @@ struct HardSwishGradFunctor : public BaseActivationFunctor<T> {
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
UNUSED
,
dOut
dout
,
dX
dx
)
const
{
auto
tmp
=
((
x
+
static_cast
<
T
>
(
offset
))
<
static_cast
<
T
>
(
threshold
))
.
template
cast
<
T
>();
auto
tmp
=
((
x
+
static_cast
<
T
>
(
offset
))
<
static_cast
<
T
>
(
threshold
))
// NOLINT
.
template
cast
<
T
>();
dx
.
device
(
d
)
=
dout
*
(((
x
+
static_cast
<
T
>
(
offset
))
>
static_cast
<
T
>
(
0
)).
template
cast
<
T
>()
*
...
...
@@ -2397,7 +2399,8 @@ struct SwishFunctor : public BaseActivationFunctor<T> {
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
out
.
device
(
d
)
=
x
/
(
static_cast
<
T
>
(
1
)
+
(
static_cast
<
T
>
(
-
beta
)
*
x
).
exp
());
out
.
device
(
d
)
=
x
/
(
static_cast
<
T
>
(
1
)
+
(
static_cast
<
T
>
(
-
beta
)
*
x
).
exp
());
// NOLINT
}
};
...
...
@@ -2431,7 +2434,7 @@ struct PowFunctor : public BaseActivationFunctor<T> {
}
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
out
.
device
(
d
)
=
x
.
pow
(
static_cast
<
T
>
(
factor
));
out
.
device
(
d
)
=
x
.
pow
(
static_cast
<
T
>
(
factor
));
// NOLINT
}
};
...
...
@@ -2585,8 +2588,8 @@ struct CELUFunctor : public BaseActivationFunctor<T> {
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
out
.
device
(
d
)
=
(
x
<
static_cast
<
T
>
(
0
))
.
select
(
static_cast
<
T
>
(
alpha
)
*
((
x
/
static_cast
<
T
>
(
alpha
)).
exp
()
-
static_cast
<
T
>
(
1
)),
.
select
(
static_cast
<
T
>
(
alpha
)
*
((
x
/
static_cast
<
T
>
(
alpha
)).
exp
()
-
static_cast
<
T
>
(
1
)),
// NOLINT
x
);
}
};
...
...
paddle/phi/kernels/funcs/gpc.cc
浏览文件 @
0e9cc551
...
...
@@ -893,7 +893,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
/* Copy the old contour and hole data into the extended arrays */
for
(
c
=
0
;
c
<
p
->
num_contours
;
c
++
)
{
extended_hole
[
c
]
=
p
->
hole
[
c
];
extended_contour
[
c
]
=
p
->
contour
[
c
];
extended_contour
[
c
]
=
p
->
contour
[
c
];
// NOLINT
}
/* Copy the new contour and hole onto the end of the extended arrays */
...
...
@@ -905,7 +905,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
new_contour
->
num_vertices
*
static_cast
<
int
>
(
sizeof
(
gpc_vertex
)),
const_cast
<
char
*>
(
"contour addition"
));
for
(
v
=
0
;
v
<
new_contour
->
num_vertices
;
v
++
)
{
extended_contour
[
c
].
vertex
[
v
]
=
new_contour
->
vertex
[
v
];
extended_contour
[
c
].
vertex
[
v
]
=
new_contour
->
vertex
[
v
];
// NOLINT
}
/* Dispose of the old contour */
...
...
paddle/phi/kernels/gpu/rnn_functor.h
浏览文件 @
0e9cc551
...
...
@@ -110,7 +110,7 @@ class RNNDescriptors {
dropout_state
->
Resize
({
static_cast
<
int64_t
>
(
state_size
)});
dev_ctx
.
template
Alloc
<
uint8_t
>(
dropout_state
);
}
dropout_desc_
.
descriptor
(
handle
,
dropout_desc_
.
descriptor
(
handle
,
// NOLINT
dev_ctx
.
GetPlace
(),
is_initialized
,
dropout_prob_
,
...
...
paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc
浏览文件 @
0e9cc551
...
...
@@ -40,7 +40,7 @@ void AllocCsrPtr(const Context& dev_ctx,
DenseTensor
dx_crows
=
phi
::
EmptyLike
<
IntT
>
(
dev_ctx
,
x
.
crows
());
DenseTensor
dx_cols
=
phi
::
EmptyLike
<
IntT
>
(
dev_ctx
,
x
.
cols
());
DenseTensor
dx_values
=
phi
::
EmptyLike
<
T
>
(
dev_ctx
,
x
.
values
());
dx
->
set_meta
(
x
.
meta
());
dx
->
set_meta
(
x
.
meta
());
// NOLINT
dx
->
SetMember
(
dx_crows
,
dx_cols
,
dx_values
,
x
.
dims
());
}
...
...
@@ -50,7 +50,7 @@ void AllocCooPtr(const Context& dev_ctx,
SparseCooTensor
*
dx
)
{
DenseTensor
dx_indices
=
phi
::
EmptyLike
<
IntT
>
(
dev_ctx
,
x
.
indices
());
DenseTensor
dx_values
=
phi
::
EmptyLike
<
T
>
(
dev_ctx
,
x
.
values
());
dx
->
set_meta
(
x
.
meta
());
dx
->
set_meta
(
x
.
meta
());
// NOLINT
dx
->
SetMember
(
dx_indices
,
dx_values
,
x
.
dims
(),
x
.
coalesced
());
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录