Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
30a02d27
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
30a02d27
编写于
8月 07, 2023
作者:
R
Ruibin Cheung
提交者:
GitHub
8月 07, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[clang-tidy] enable modernize-use-equals-default (#55983)
上级
4d094b0c
变更
95
隐藏空白更改
内联
并排
Showing
95 changed file
with
121 addition
and
124 deletion
+121
-124
.clang-tidy
.clang-tidy
+1
-1
paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc
.../distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc
+1
-1
paddle/fluid/distributed/collective/process_group_nccl.cc
paddle/fluid/distributed/collective/process_group_nccl.cc
+1
-1
paddle/fluid/distributed/fleet_executor/fleet_executor.cc
paddle/fluid/distributed/fleet_executor/fleet_executor.cc
+1
-1
paddle/fluid/distributed/fleet_executor/interceptor.cc
paddle/fluid/distributed/fleet_executor/interceptor.cc
+1
-1
paddle/fluid/distributed/ps/service/communicator/communicator.cc
...fluid/distributed/ps/service/communicator/communicator.cc
+1
-1
paddle/fluid/distributed/ps/table/common_graph_table.cc
paddle/fluid/distributed/ps/table/common_graph_table.cc
+1
-1
paddle/fluid/framework/data_feed.cc
paddle/fluid/framework/data_feed.cc
+1
-1
paddle/fluid/framework/data_feed.h
paddle/fluid/framework/data_feed.h
+1
-1
paddle/fluid/framework/details/fetch_async_op_handle.cc
paddle/fluid/framework/details/fetch_async_op_handle.cc
+1
-1
paddle/fluid/framework/details/fetch_op_handle.cc
paddle/fluid/framework/details/fetch_op_handle.cc
+1
-1
paddle/fluid/framework/details/nan_inf_utils_detail.cc
paddle/fluid/framework/details/nan_inf_utils_detail.cc
+1
-1
paddle/fluid/framework/details/op_handle_base.cc
paddle/fluid/framework/details/op_handle_base.cc
+1
-1
paddle/fluid/framework/details/scale_loss_grad_op_handle.cc
paddle/fluid/framework/details/scale_loss_grad_op_handle.cc
+1
-1
paddle/fluid/framework/details/ssa_graph_executor.cc
paddle/fluid/framework/details/ssa_graph_executor.cc
+1
-1
paddle/fluid/framework/details/var_handle.cc
paddle/fluid/framework/details/var_handle.cc
+1
-1
paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc
...fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc
+1
-1
paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.h
.../fluid/framework/ir/adaptive_pool2d_convert_global_pass.h
+1
-1
paddle/fluid/framework/ir/constant_folding_pass.cc
paddle/fluid/framework/ir/constant_folding_pass.cc
+1
-1
paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
+4
-3
paddle/fluid/framework/ir/cost_model.cc
paddle/fluid/framework/ir/cost_model.cc
+1
-1
paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc
.../ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc
+1
-1
paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc
...e/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc
+1
-1
paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h
...le/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h
+1
-1
paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.cc
...framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.cc
+1
-1
paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.h
.../framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.h
+1
-1
paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.cc
...id/framework/ir/mkldnn/params_quantization_mkldnn_pass.cc
+1
-1
paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.h
...uid/framework/ir/mkldnn/params_quantization_mkldnn_pass.h
+1
-1
paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass_tester.cc
...ework/ir/mkldnn/params_quantization_mkldnn_pass_tester.cc
+1
-1
paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc
...rk/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc
+2
-1
paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.h
...ork/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.h
+1
-1
paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc
...framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc
+1
-1
paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.h
.../framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.h
+1
-1
paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc
paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc
+1
-1
paddle/fluid/framework/ir/reverse_roll_fuse_pass.h
paddle/fluid/framework/ir/reverse_roll_fuse_pass.h
+1
-1
paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc
paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc
+1
-1
paddle/fluid/framework/ir/shuffle_channel_detect_pass.h
paddle/fluid/framework/ir/shuffle_channel_detect_pass.h
+1
-1
paddle/fluid/framework/ir/sigmoid_elementmul_fuse_pass.cc
paddle/fluid/framework/ir/sigmoid_elementmul_fuse_pass.cc
+1
-1
paddle/fluid/framework/ir/trt_map_ops_to_matrix_multiply_pass.cc
...fluid/framework/ir/trt_map_ops_to_matrix_multiply_pass.cc
+1
-1
paddle/fluid/framework/ir/yolo_box_fuse_pass.cc
paddle/fluid/framework/ir/yolo_box_fuse_pass.cc
+1
-1
paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc
..._executor/garbage_collector/no_event_garbage_collector.cc
+1
-1
paddle/fluid/framework/new_executor/new_executor_defs.cc
paddle/fluid/framework/new_executor/new_executor_defs.cc
+1
-1
paddle/fluid/framework/phi_utils.cc
paddle/fluid/framework/phi_utils.cc
+1
-1
paddle/fluid/framework/program_utils.cc
paddle/fluid/framework/program_utils.cc
+1
-1
paddle/fluid/framework/reader.cc
paddle/fluid/framework/reader.cc
+1
-1
paddle/fluid/framework/var_type_traits.cc
paddle/fluid/framework/var_type_traits.cc
+1
-1
paddle/fluid/imperative/amp_auto_cast.cc
paddle/fluid/imperative/amp_auto_cast.cc
+4
-2
paddle/fluid/inference/analysis/analyzer.cc
paddle/fluid/inference/analysis/analyzer.cc
+1
-1
paddle/fluid/inference/analysis/passes/passes.cc
paddle/fluid/inference/analysis/passes/passes.cc
+1
-1
paddle/fluid/inference/api/resource_manager.cc
paddle/fluid/inference/api/resource_manager.cc
+1
-1
paddle/fluid/inference/tensorrt/convert/activation_op.cc
paddle/fluid/inference/tensorrt/convert/activation_op.cc
+1
-1
paddle/fluid/inference/tensorrt/convert/elementwise_op.cc
paddle/fluid/inference/tensorrt/convert/elementwise_op.cc
+2
-2
paddle/fluid/inference/tensorrt/convert/equal_op.cc
paddle/fluid/inference/tensorrt/convert/equal_op.cc
+2
-2
paddle/fluid/inference/tensorrt/convert/square_op.cc
paddle/fluid/inference/tensorrt/convert/square_op.cc
+1
-1
paddle/fluid/inference/tensorrt/convert/top_k_op.cc
paddle/fluid/inference/tensorrt/convert/top_k_op.cc
+1
-1
paddle/fluid/inference/tensorrt/convert/unary_op.cc
paddle/fluid/inference/tensorrt/convert/unary_op.cc
+1
-1
paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc
paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc
+1
-1
paddle/fluid/inference/tensorrt/op_teller.cc
paddle/fluid/inference/tensorrt/op_teller.cc
+4
-4
paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_plugin.cu
...id/inference/tensorrt/plugin/many_emb_layernorm_plugin.cu
+1
-1
paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.cu
...ce/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.cu
+1
-1
paddle/fluid/inference/tensorrt/plugin/roi_align_op_plugin.cu
...le/fluid/inference/tensorrt/plugin/roi_align_op_plugin.cu
+1
-1
paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.cu
paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.cu
+2
-2
paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu
paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu
+1
-1
paddle/fluid/ir_adaptor/translator/attribute_translator.cc
paddle/fluid/ir_adaptor/translator/attribute_translator.cc
+1
-1
paddle/fluid/memory/allocation/allocator_facade.cc
paddle/fluid/memory/allocation/allocator_facade.cc
+2
-2
paddle/fluid/operators/collective/c_comm_init_all_op.cc
paddle/fluid/operators/collective/c_comm_init_all_op.cc
+1
-1
paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc
...fluid/operators/collective/c_comm_init_multitrainer_op.cc
+1
-1
paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu
...luid/operators/fused/fused_fc_elementwise_layernorm_op.cu
+1
-1
paddle/fluid/operators/math/beam_search.cc
paddle/fluid/operators/math/beam_search.cc
+1
-1
paddle/fluid/operators/math/beam_search.cu
paddle/fluid/operators/math/beam_search.cu
+1
-1
paddle/fluid/operators/math/sampler.cc
paddle/fluid/operators/math/sampler.cc
+1
-1
paddle/fluid/operators/reader/py_reader.cc
paddle/fluid/operators/reader/py_reader.cc
+3
-1
paddle/fluid/platform/profiler/cuda_tracer.cc
paddle/fluid/platform/profiler/cuda_tracer.cc
+1
-1
paddle/fluid/platform/profiler/custom_device/custom_tracer.cc
...le/fluid/platform/profiler/custom_device/custom_tracer.cc
+1
-1
paddle/ir/core/ir_context.cc
paddle/ir/core/ir_context.cc
+1
-1
paddle/ir/core/storage_manager.cc
paddle/ir/core/storage_manager.cc
+1
-1
paddle/phi/api/include/tensor.h
paddle/phi/api/include/tensor.h
+1
-1
paddle/phi/api/lib/tensor.cc
paddle/phi/api/lib/tensor.cc
+1
-6
paddle/phi/backends/dynload/dynamic_loader.cc
paddle/phi/backends/dynload/dynamic_loader.cc
+1
-1
paddle/phi/backends/gpu/gpu_context.cc
paddle/phi/backends/gpu/gpu_context.cc
+1
-1
paddle/phi/backends/onednn/onednn_context.cc
paddle/phi/backends/onednn/onednn_context.cc
+1
-1
paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc
paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc
+1
-1
paddle/phi/kernels/cpu/rnn_grad_kernel.cc
paddle/phi/kernels/cpu/rnn_grad_kernel.cc
+4
-4
paddle/phi/kernels/cpu/rnn_kernel.cc
paddle/phi/kernels/cpu/rnn_kernel.cc
+2
-2
paddle/phi/kernels/funcs/concat_and_split_functor.cu
paddle/phi/kernels/funcs/concat_and_split_functor.cu
+5
-7
paddle/phi/kernels/gpu/masked_select_grad_kernel.cu
paddle/phi/kernels/gpu/masked_select_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/masked_select_kernel.cu
paddle/phi/kernels/gpu/masked_select_kernel.cu
+1
-1
paddle/phi/kernels/gpu/p_norm_kernel.cu
paddle/phi/kernels/gpu/p_norm_kernel.cu
+2
-2
paddle/phi/kernels/gpu/rms_norm_kernel.cu
paddle/phi/kernels/gpu/rms_norm_kernel.cu
+1
-3
test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc
test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc
+1
-1
test/cpp/fluid/fused/cudnn_norm_conv_test.cc
test/cpp/fluid/fused/cudnn_norm_conv_test.cc
+1
-1
test/cpp/fluid/fused/fused_dropout_act_bias_test.cu
test/cpp/fluid/fused/fused_dropout_act_bias_test.cu
+1
-1
test/cpp/fluid/fused/fused_layernorm_residual_dropout_bias_test.cu
...fluid/fused/fused_layernorm_residual_dropout_bias_test.cu
+1
-1
test/cpp/inference/api/analyzer_dam_tester.cc
test/cpp/inference/api/analyzer_dam_tester.cc
+2
-2
test/cpp/inference/api/api_tester.cc
test/cpp/inference/api/api_tester.cc
+1
-1
未找到文件。
.clang-tidy
浏览文件 @
30a02d27
...
...
@@ -181,7 +181,7 @@ modernize-redundant-void-arg,
-modernize-unary-static-assert,
-modernize-use-bool-literals,
modernize-use-emplace,
-
modernize-use-equals-default,
modernize-use-equals-default,
-modernize-use-equals-delete,
-modernize-use-noexcept,
modernize-use-nullptr,
...
...
paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc
浏览文件 @
30a02d27
...
...
@@ -34,7 +34,7 @@ DistTensorSpec::DistTensorSpec(const DistTensorSpec& spec) {
dist_attr_
.
copy_from
(
spec
.
dist_attr
());
}
DistTensorSpec
::~
DistTensorSpec
()
{}
DistTensorSpec
::~
DistTensorSpec
()
=
default
;
DistTensorSpec
::
DistTensorSpec
(
const
Tensor
&
tensor
)
{
shape_
=
tensor
.
shape
();
...
...
paddle/fluid/distributed/collective/process_group_nccl.cc
浏览文件 @
30a02d27
...
...
@@ -47,7 +47,7 @@ ProcessGroupNCCL::NCCLTask::NCCLTask(const Place& place,
comm_event_
(
place
),
task_place_
(
place
)
{}
ProcessGroupNCCL
::
NCCLTask
::~
NCCLTask
()
{}
ProcessGroupNCCL
::
NCCLTask
::~
NCCLTask
()
=
default
;
bool
ProcessGroupNCCL
::
NCCLTask
::
IsCompleted
()
{
return
comm_event_
.
Query
();
}
...
...
paddle/fluid/distributed/fleet_executor/fleet_executor.cc
浏览文件 @
30a02d27
...
...
@@ -48,7 +48,7 @@ FleetExecutor::FleetExecutor(const FleetExecutorDesc& exe_desc)
InitMessageBus
();
}
FleetExecutor
::~
FleetExecutor
()
{
FleetExecutor
::~
FleetExecutor
()
{
// NOLINT
for
(
const
auto
&
carrier_id
:
carrier_ids_
)
{
GlobalMap
<
std
::
string
,
Carrier
>::
Get
(
carrier_id
)
->
Release
();
}
...
...
paddle/fluid/distributed/fleet_executor/interceptor.cc
浏览文件 @
30a02d27
...
...
@@ -24,7 +24,7 @@ namespace distributed {
Interceptor
::
Interceptor
(
int64_t
interceptor_id
,
TaskNode
*
node
)
:
interceptor_id_
(
interceptor_id
),
node_
(
node
)
{}
Interceptor
::~
Interceptor
()
{
Interceptor
::~
Interceptor
()
{
// NOLINT
// FIXME(wangxi): throw in stop function
// std::lock_guard<std::mutex> lock(mutex_);
// PADDLE_ENFORCE_EQ(messages_.empty(), true,
...
...
paddle/fluid/distributed/ps/service/communicator/communicator.cc
浏览文件 @
30a02d27
...
...
@@ -38,7 +38,7 @@ inline double GetCurrentUS() {
return
1e+6
*
time
.
tv_sec
+
time
.
tv_usec
;
}
Communicator
::
Communicator
()
{}
Communicator
::
Communicator
()
=
default
;
void
Communicator
::
InitGFlag
(
const
std
::
string
&
gflags
)
{
VLOG
(
3
)
<<
"Init With Gflags:"
<<
gflags
;
...
...
paddle/fluid/distributed/ps/table/common_graph_table.cc
浏览文件 @
30a02d27
...
...
@@ -1205,7 +1205,7 @@ Node *GraphShard::find_node(uint64_t id) {
return
iter
==
node_location
.
end
()
?
nullptr
:
bucket
[
iter
->
second
];
}
GraphTable
::~
GraphTable
()
{
GraphTable
::~
GraphTable
()
{
// NOLINT
#ifdef PADDLE_WITH_GPU_GRAPH
clear_graph
();
#endif
...
...
paddle/fluid/framework/data_feed.cc
浏览文件 @
30a02d27
...
...
@@ -1999,7 +1999,7 @@ void PaddleBoxDataFeed::PutToFeedVec(const std::vector<Record*>& ins_vec) {
#endif
}
SlotRecordInMemoryDataFeed
::~
SlotRecordInMemoryDataFeed
()
{
SlotRecordInMemoryDataFeed
::~
SlotRecordInMemoryDataFeed
()
{
// NOLINT
#if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS)
stop_token_
.
store
(
true
);
for
(
auto
&
thread
:
pack_threads_
)
{
...
...
paddle/fluid/framework/data_feed.h
浏览文件 @
30a02d27
...
...
@@ -1821,7 +1821,7 @@ class MultiSlotInMemoryDataFeed : public InMemoryDataFeed<Record> {
class
SlotRecordInMemoryDataFeed
:
public
InMemoryDataFeed
<
SlotRecord
>
{
public:
SlotRecordInMemoryDataFeed
()
{}
SlotRecordInMemoryDataFeed
()
=
default
;
virtual
~
SlotRecordInMemoryDataFeed
();
void
Init
(
const
DataFeedDesc
&
data_feed_desc
)
override
;
void
LoadIntoMemory
()
override
;
...
...
paddle/fluid/framework/details/fetch_async_op_handle.cc
浏览文件 @
30a02d27
...
...
@@ -37,7 +37,7 @@ FetchAsyncOpHandle::FetchAsyncOpHandle(ir::Node *node,
local_exec_scopes_
(
local_exec_scopes
),
return_merged_
(
return_merged
)
{}
FetchAsyncOpHandle
::~
FetchAsyncOpHandle
()
{}
FetchAsyncOpHandle
::~
FetchAsyncOpHandle
()
=
default
;
void
FetchAsyncOpHandle
::
RecordWaitEventOnCtx
(
platform
::
DeviceContext
*
waited_ctx
)
{
...
...
paddle/fluid/framework/details/fetch_op_handle.cc
浏览文件 @
30a02d27
...
...
@@ -35,7 +35,7 @@ FetchOpHandle::FetchOpHandle(ir::Node *node,
local_exec_scopes_
(
local_exec_scopes
),
return_merged_
(
return_merged
)
{}
FetchOpHandle
::~
FetchOpHandle
()
{}
FetchOpHandle
::~
FetchOpHandle
()
=
default
;
void
FetchOpHandle
::
RecordWaitEventOnCtx
(
platform
::
DeviceContext
*
waited_ctx
)
{
PADDLE_THROW
(
platform
::
errors
::
PermissionDenied
(
...
...
paddle/fluid/framework/details/nan_inf_utils_detail.cc
浏览文件 @
30a02d27
...
...
@@ -27,7 +27,7 @@ namespace paddle {
namespace
framework
{
namespace
details
{
struct
DebugTools
{
DebugTools
()
{}
DebugTools
()
=
default
;
std
::
string
path
=
""
;
int
stack_limit
=
1
;
};
...
...
paddle/fluid/framework/details/op_handle_base.cc
浏览文件 @
30a02d27
...
...
@@ -30,7 +30,7 @@ std::string OpHandleBase::DebugString() const {
return
ss
.
str
();
}
OpHandleBase
::~
OpHandleBase
()
PADDLE_MAY_THROW
{
OpHandleBase
::~
OpHandleBase
()
PADDLE_MAY_THROW
{
// NOLINT
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
for
(
auto
&
ev
:
events_
)
{
if
(
ev
.
second
)
{
...
...
paddle/fluid/framework/details/scale_loss_grad_op_handle.cc
浏览文件 @
30a02d27
...
...
@@ -40,7 +40,7 @@ ScaleLossGradOpHandle::ScaleLossGradOpHandle(ir::Node *node,
this
->
SetDeviceContext
(
place_
,
dev_ctx
);
}
ScaleLossGradOpHandle
::~
ScaleLossGradOpHandle
()
{}
ScaleLossGradOpHandle
::~
ScaleLossGradOpHandle
()
=
default
;
struct
ScaleLossGradFunctor
{
float
coeff_
;
...
...
paddle/fluid/framework/details/ssa_graph_executor.cc
浏览文件 @
30a02d27
...
...
@@ -19,7 +19,7 @@
namespace
paddle
{
namespace
framework
{
namespace
details
{
SSAGraphExecutor
::~
SSAGraphExecutor
()
{}
SSAGraphExecutor
::~
SSAGraphExecutor
()
=
default
;
void
ClearFetchOp
(
ir
::
Graph
*
graph
,
std
::
vector
<
OpHandleBase
*>*
fetch_ops
)
{
if
(
fetch_ops
->
empty
())
return
;
...
...
paddle/fluid/framework/details/var_handle.cc
浏览文件 @
30a02d27
...
...
@@ -18,7 +18,7 @@ namespace paddle {
namespace
framework
{
namespace
details
{
VarHandleBase
::~
VarHandleBase
()
{}
VarHandleBase
::~
VarHandleBase
()
=
default
;
VarHandle
::~
VarHandle
()
{
VLOG
(
4
)
<<
"deleting var handle "
<<
DebugString
();
}
...
...
paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc
浏览文件 @
30a02d27
...
...
@@ -24,7 +24,7 @@ namespace paddle {
namespace
framework
{
namespace
ir
{
AdaptivePool2dConvertGlobalPass
::
AdaptivePool2dConvertGlobalPass
()
{
AdaptivePool2dConvertGlobalPass
::
AdaptivePool2dConvertGlobalPass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"pool2d"
))
.
AddInput
(
"X"
)
.
IsTensor
()
...
...
paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.h
浏览文件 @
30a02d27
...
...
@@ -32,7 +32,7 @@ class Graph;
class
AdaptivePool2dConvertGlobalPass
:
public
FusePassBase
{
public:
AdaptivePool2dConvertGlobalPass
();
virtual
~
AdaptivePool2dConvertGlobalPass
()
{}
virtual
~
AdaptivePool2dConvertGlobalPass
()
=
default
;
protected:
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
...
...
paddle/fluid/framework/ir/constant_folding_pass.cc
浏览文件 @
30a02d27
...
...
@@ -51,7 +51,7 @@ struct ConstantFolding : public PatternBase {
};
}
// namespace patterns
ConstantFoldingPass
::
ConstantFoldingPass
()
{}
ConstantFoldingPass
::
ConstantFoldingPass
()
=
default
;
void
ConstantFoldingPass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
PADDLE_ENFORCE_NOT_NULL
(
...
...
paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
浏览文件 @
30a02d27
...
...
@@ -756,7 +756,7 @@ void ConvEltwiseAddBNFusePass::ApplyImpl(ir::Graph* graph) const {
AddStatis
(
found_conv_bn_count
);
}
ConvTransposeBNFusePass
::
ConvTransposeBNFusePass
()
{
ConvTransposeBNFusePass
::
ConvTransposeBNFusePass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"conv2d_transpose"
))
.
AddInput
(
"Input"
)
.
IsTensor
()
...
...
@@ -800,7 +800,8 @@ ConvTransposeBNFusePass::ConvTransposeBNFusePass() {
.
End
();
}
ConvTransposeEltwiseAddBNFusePass
::
ConvTransposeEltwiseAddBNFusePass
()
{
ConvTransposeEltwiseAddBNFusePass
::
ConvTransposeEltwiseAddBNFusePass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"conv2d_transpose"
))
.
AddInput
(
"Input"
)
.
IsTensor
()
...
...
@@ -844,7 +845,7 @@ ConvTransposeEltwiseAddBNFusePass::ConvTransposeEltwiseAddBNFusePass() {
.
End
();
}
DepthwiseConvBNFusePass
::
DepthwiseConvBNFusePass
()
{
DepthwiseConvBNFusePass
::
DepthwiseConvBNFusePass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"depthwise_conv2d"
))
.
AddInput
(
"Input"
)
.
IsTensor
()
...
...
paddle/fluid/framework/ir/cost_model.cc
浏览文件 @
30a02d27
...
...
@@ -30,7 +30,7 @@ using platform::MemEvent;
const
double
CostData
::
NOT_MEASURED
=
-
1
;
CostData
::~
CostData
()
{
CostData
::~
CostData
()
{
// NOLINT
// TODO(zhhsplendid): when we save a copy of program/graph, we should delete
// here.
}
...
...
paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc
浏览文件 @
30a02d27
...
...
@@ -56,7 +56,7 @@ static const std::initializer_list<std::string> rnn_variable_names{
class
ComputePropagateScalesMkldnnPassTest
:
public
testing
::
Test
{
public:
ComputePropagateScalesMkldnnPassTest
()
{
ComputePropagateScalesMkldnnPassTest
()
{
// NOLINT
pass
.
reset
(
new
ComputePropagateScalesMkldnnPass
());
}
...
...
paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc
浏览文件 @
30a02d27
...
...
@@ -32,7 +32,7 @@ class Graph;
PADDLE_ENFORCE_NOT_NULL( \
id, platform::errors::InvalidArgument("Subgraph has no node %s.", #id));
DepthwiseConvMKLDNNPass
::
DepthwiseConvMKLDNNPass
()
{
DepthwiseConvMKLDNNPass
::
DepthwiseConvMKLDNNPass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"depthwise_conv2d"
))
.
AddInput
(
"Input"
)
.
IsTensor
()
...
...
paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h
浏览文件 @
30a02d27
...
...
@@ -25,7 +25,7 @@ class Graph;
class
DepthwiseConvMKLDNNPass
:
public
FusePassBase
{
public:
DepthwiseConvMKLDNNPass
();
virtual
~
DepthwiseConvMKLDNNPass
()
{}
virtual
~
DepthwiseConvMKLDNNPass
()
=
default
;
protected:
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
...
...
paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.cc
浏览文件 @
30a02d27
...
...
@@ -23,7 +23,7 @@ namespace paddle {
namespace
framework
{
namespace
ir
{
Int8ScaleCalculationMkldnnPass
::
Int8ScaleCalculationMkldnnPass
()
{
Int8ScaleCalculationMkldnnPass
::
Int8ScaleCalculationMkldnnPass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"conv2d"
))
.
AddInput
(
"Input"
)
.
IsTensor
()
...
...
paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.h
浏览文件 @
30a02d27
...
...
@@ -27,7 +27,7 @@ class Graph;
class
Int8ScaleCalculationMkldnnPass
:
public
FusePassBase
{
public:
Int8ScaleCalculationMkldnnPass
();
virtual
~
Int8ScaleCalculationMkldnnPass
()
{}
virtual
~
Int8ScaleCalculationMkldnnPass
()
=
default
;
protected:
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
...
...
paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.cc
浏览文件 @
30a02d27
...
...
@@ -76,7 +76,7 @@ void QuantizeConvInput(Scope* scope,
}
// namespace
ParamsQuantizationMkldnnPass
::
ParamsQuantizationMkldnnPass
()
{
ParamsQuantizationMkldnnPass
::
ParamsQuantizationMkldnnPass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"fused_conv2d"
))
.
AddInput
(
"Input"
)
.
IsTensor
()
...
...
paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.h
浏览文件 @
30a02d27
...
...
@@ -27,7 +27,7 @@ class Graph;
class
ParamsQuantizationMkldnnPass
:
public
FusePassBase
{
public:
ParamsQuantizationMkldnnPass
();
virtual
~
ParamsQuantizationMkldnnPass
()
{}
virtual
~
ParamsQuantizationMkldnnPass
()
=
default
;
protected:
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
...
...
paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass_tester.cc
浏览文件 @
30a02d27
...
...
@@ -65,7 +65,7 @@ struct TestScope {
};
struct
ProgramStrategy
{
virtual
~
ProgramStrategy
()
{}
virtual
~
ProgramStrategy
()
=
default
;
std
::
unique_ptr
<
Graph
>
CreateGraph
()
{
CreateProgram
();
...
...
paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc
浏览文件 @
30a02d27
...
...
@@ -170,7 +170,8 @@ void FuseQuantTranspose2DequantOneDNNPass::ApplyImpl(Graph *graph) const {
FuseTranspose2Dequantize
(
graph
,
"transpose2"
);
}
FuseQuantTranspose2DequantOneDNNPass
::
FuseQuantTranspose2DequantOneDNNPass
()
{
FuseQuantTranspose2DequantOneDNNPass
::
FuseQuantTranspose2DequantOneDNNPass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"transpose2"
))
.
AddInput
(
"X"
)
.
IsTensor
()
...
...
paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.h
浏览文件 @
30a02d27
...
...
@@ -23,7 +23,7 @@ namespace ir {
class
FuseQuantTranspose2DequantOneDNNPass
:
public
FusePassBase
{
public:
virtual
~
FuseQuantTranspose2DequantOneDNNPass
()
{}
virtual
~
FuseQuantTranspose2DequantOneDNNPass
()
=
default
;
FuseQuantTranspose2DequantOneDNNPass
();
protected:
...
...
paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc
浏览文件 @
30a02d27
...
...
@@ -31,7 +31,7 @@ namespace ir {
GET_IR_NODE(reshape2_op); \
GET_IR_NODE(reshape2_out);
ShuffleChannelMKLDNNDetectPass
::
ShuffleChannelMKLDNNDetectPass
()
{
ShuffleChannelMKLDNNDetectPass
::
ShuffleChannelMKLDNNDetectPass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"reshape2"
))
.
AddInput
(
"X"
)
.
IsTensor
()
...
...
paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.h
浏览文件 @
30a02d27
...
...
@@ -27,7 +27,7 @@ class Graph;
class
ShuffleChannelMKLDNNDetectPass
:
public
FusePassBase
{
public:
ShuffleChannelMKLDNNDetectPass
();
virtual
~
ShuffleChannelMKLDNNDetectPass
()
{}
virtual
~
ShuffleChannelMKLDNNDetectPass
()
=
default
;
protected:
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
...
...
paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc
浏览文件 @
30a02d27
...
...
@@ -37,7 +37,7 @@ namespace paddle {
namespace
framework
{
namespace
ir
{
class
Node
;
ReverseRollFusePass
::
ReverseRollFusePass
()
{
ReverseRollFusePass
::
ReverseRollFusePass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"reshape2"
))
.
AddInput
(
"X"
)
.
IsTensor
()
...
...
paddle/fluid/framework/ir/reverse_roll_fuse_pass.h
浏览文件 @
30a02d27
...
...
@@ -54,7 +54,7 @@ namespace ir {
class
ReverseRollFusePass
:
public
FusePassBase
{
public:
ReverseRollFusePass
();
virtual
~
ReverseRollFusePass
()
{}
virtual
~
ReverseRollFusePass
()
=
default
;
protected:
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
...
...
paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc
浏览文件 @
30a02d27
...
...
@@ -31,7 +31,7 @@ namespace ir {
GET_IR_NODE(reshape2_op); \
GET_IR_NODE(reshape2_out);
ShuffleChannelDetectPass
::
ShuffleChannelDetectPass
()
{
ShuffleChannelDetectPass
::
ShuffleChannelDetectPass
()
{
// NOLINT
AddOpCompat
(
OpCompat
(
"reshape2"
))
.
AddInput
(
"X"
)
.
IsTensor
()
...
...
paddle/fluid/framework/ir/shuffle_channel_detect_pass.h
浏览文件 @
30a02d27
...
...
@@ -27,7 +27,7 @@ class Graph;
class
ShuffleChannelDetectPass
:
public
FusePassBase
{
public:
ShuffleChannelDetectPass
();
virtual
~
ShuffleChannelDetectPass
()
{}
virtual
~
ShuffleChannelDetectPass
()
=
default
;
protected:
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
...
...
paddle/fluid/framework/ir/sigmoid_elementmul_fuse_pass.cc
浏览文件 @
30a02d27
...
...
@@ -67,7 +67,7 @@ SigmoidElementmulFusePattern::SigmoidElementmulFusePattern(
}
// namespace patterns
SigmoidElementmulFusePass
::
SigmoidElementmulFusePass
()
{}
SigmoidElementmulFusePass
::
SigmoidElementmulFusePass
()
=
default
;
void
SigmoidElementmulFusePass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
PADDLE_ENFORCE_NOT_NULL
(
...
...
paddle/fluid/framework/ir/trt_map_ops_to_matrix_multiply_pass.cc
浏览文件 @
30a02d27
...
...
@@ -28,7 +28,7 @@ namespace ir {
class
Node
;
TrtMapOpsToMatrixMultiplyPass
::
TrtMapOpsToMatrixMultiplyPass
()
{}
TrtMapOpsToMatrixMultiplyPass
::
TrtMapOpsToMatrixMultiplyPass
()
=
default
;
void
TrtMapOpsToMatrixMultiplyPass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
PADDLE_ENFORCE_NOT_NULL
(
...
...
paddle/fluid/framework/ir/yolo_box_fuse_pass.cc
浏览文件 @
30a02d27
...
...
@@ -149,7 +149,7 @@ struct YoloBoxPattern : public PatternBase {
};
}
// namespace patterns
YoloBoxFusePass
::
YoloBoxFusePass
()
{}
YoloBoxFusePass
::
YoloBoxFusePass
()
=
default
;
void
YoloBoxFusePass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
PADDLE_ENFORCE_NOT_NULL
(
...
...
paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc
浏览文件 @
30a02d27
...
...
@@ -27,7 +27,7 @@ InterpreterCoreNoEventGarbageCollector::
}
InterpreterCoreNoEventGarbageCollector
::
~
InterpreterCoreNoEventGarbageCollector
()
{
~
InterpreterCoreNoEventGarbageCollector
()
{
// NOLINT
queue_
.
reset
(
nullptr
);
}
...
...
paddle/fluid/framework/new_executor/new_executor_defs.cc
浏览文件 @
30a02d27
...
...
@@ -37,7 +37,7 @@ VariableScope::VariableScope(Scope* scope) {
"You have passed a nullptr to construct VariableScope."
));
}
VariableScope
::~
VariableScope
()
{}
VariableScope
::~
VariableScope
()
=
default
;
Scope
*
VariableScope
::
GetMutableScope
()
const
{
return
scope_
;
}
...
...
paddle/fluid/framework/phi_utils.cc
浏览文件 @
30a02d27
...
...
@@ -40,7 +40,7 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker {
platform
::
errors
::
InvalidArgument
(
"Op proto cannot be nullptr."
));
}
~
KernelArgsNameMakerByOpProto
()
override
{}
~
KernelArgsNameMakerByOpProto
()
override
=
default
;
const
paddle
::
small_vector
<
const
char
*>&
GetInputArgsNames
()
override
;
const
paddle
::
small_vector
<
const
char
*>&
GetOutputArgsNames
()
override
;
...
...
paddle/fluid/framework/program_utils.cc
浏览文件 @
30a02d27
...
...
@@ -187,7 +187,7 @@ void ProgramProcessor::AddDepToBlockOp(const BlockDesc &block) {
}
}
ProgramProcessor
::
ProgramProcessor
()
{}
ProgramProcessor
::
ProgramProcessor
()
=
default
;
}
// namespace framework
}
// namespace paddle
paddle/fluid/framework/reader.cc
浏览文件 @
30a02d27
...
...
@@ -72,7 +72,7 @@ void ReaderBase::Start() {
}
}
ReaderBase
::~
ReaderBase
()
{}
ReaderBase
::~
ReaderBase
()
=
default
;
DecoratedReader
::~
DecoratedReader
()
{
VLOG
(
1
)
<<
"~DecoratedReader"
;
...
...
paddle/fluid/framework/var_type_traits.cc
浏览文件 @
30a02d27
...
...
@@ -114,7 +114,7 @@ struct VarIdToTypeIndexMapHolder {
}
private:
VarIdToTypeIndexMapHolder
()
{
VarIdToTypeIndexMapHolder
()
{
// NOLINT
VarIdToTypeIndexMapInitializer
::
Init
(
&
id_to_type_map_
,
&
type_to_id_map_
);
}
...
...
paddle/fluid/imperative/amp_auto_cast.cc
浏览文件 @
30a02d27
...
...
@@ -131,7 +131,9 @@ AutoCastGuard::AutoCastGuard(std::shared_ptr<Tracer> tracer, AmpLevel level)
}
}
AutoCastGuard
::~
AutoCastGuard
()
{
tracer_
->
SetAmpLevel
(
pre_amp_level_
);
}
AutoCastGuard
::~
AutoCastGuard
()
{
// NOLINT
tracer_
->
SetAmpLevel
(
pre_amp_level_
);
}
AmpOperators
::
AmpOperators
()
:
allow_ops_
(
new
std
::
unordered_set
<
std
::
string
>
()),
...
...
@@ -163,7 +165,7 @@ AmpOperators::AmpOperators()
<<
unsupported_bf16_ops_
->
size
();
}
AmpOperators
::~
AmpOperators
()
{}
AmpOperators
::~
AmpOperators
()
=
default
;
AmpOperators
&
AmpOperators
::
Instance
()
{
static
AmpOperators
instance
;
...
...
paddle/fluid/inference/analysis/analyzer.cc
浏览文件 @
30a02d27
...
...
@@ -23,7 +23,7 @@ namespace paddle {
namespace
inference
{
namespace
analysis
{
Analyzer
::
Analyzer
()
{}
Analyzer
::
Analyzer
()
=
default
;
void
Analyzer
::
Run
(
Argument
*
argument
)
{
RunAnalysis
(
argument
);
}
...
...
paddle/fluid/inference/analysis/passes/passes.cc
浏览文件 @
30a02d27
...
...
@@ -27,7 +27,7 @@ namespace paddle {
namespace
inference
{
namespace
analysis
{
PassRegistry
::
PassRegistry
()
{
PassRegistry
::
PassRegistry
()
{
// NOLINT
// Register manually to avoid the trivial `USE_OP` like macro for easier use
// and link.
passes_
.
emplace
(
"ir_analysis_pass"
,
...
...
paddle/fluid/inference/api/resource_manager.cc
浏览文件 @
30a02d27
...
...
@@ -50,7 +50,7 @@ class EigenGpuStreamDevice : public Eigen::StreamInterface {
EigenGpuStreamDevice
()
:
scratch_
(
nullptr
),
semaphore_
(
nullptr
)
{
Eigen
::
initializeDeviceProp
();
}
~
EigenGpuStreamDevice
()
override
{}
~
EigenGpuStreamDevice
()
override
=
default
;
void
Reinitialize
(
gpuStream_t
cuda_stream
,
phi
::
Allocator
*
allocator
,
...
...
paddle/fluid/inference/tensorrt/convert/activation_op.cc
浏览文件 @
30a02d27
...
...
@@ -29,7 +29,7 @@ namespace tensorrt {
class
ActivationOpConverter
:
public
OpConverter
{
public:
ActivationOpConverter
()
{}
ActivationOpConverter
()
=
default
;
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
...
...
paddle/fluid/inference/tensorrt/convert/elementwise_op.cc
浏览文件 @
30a02d27
...
...
@@ -21,7 +21,7 @@ namespace tensorrt {
class
ElementwiseTensorOpConverter
:
public
OpConverter
{
public:
ElementwiseTensorOpConverter
()
{}
ElementwiseTensorOpConverter
()
=
default
;
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
...
...
@@ -325,7 +325,7 @@ class ElementwiseTensorModOpConverter : public ElementwiseTensorOpConverter {
// https://github.com/PaddlePaddle/Paddle/blob/release/2.4/python/paddle/tensor/math.py#L420
class
PowOpConverter
:
public
OpConverter
{
public:
PowOpConverter
()
{}
PowOpConverter
()
=
default
;
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
...
...
paddle/fluid/inference/tensorrt/convert/equal_op.cc
浏览文件 @
30a02d27
...
...
@@ -21,7 +21,7 @@ namespace tensorrt {
class
EqualOpConverter
:
public
OpConverter
{
public:
EqualOpConverter
()
{}
EqualOpConverter
()
=
default
;
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
...
...
@@ -74,7 +74,7 @@ class EqualOpConverter : public OpConverter {
class
NotEqualOpConverter
:
public
OpConverter
{
public:
NotEqualOpConverter
()
{}
NotEqualOpConverter
()
=
default
;
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
...
...
paddle/fluid/inference/tensorrt/convert/square_op.cc
浏览文件 @
30a02d27
...
...
@@ -20,7 +20,7 @@ namespace tensorrt {
class
SquareOpConverter
:
public
OpConverter
{
public:
SquareOpConverter
()
{}
SquareOpConverter
()
=
default
;
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
...
...
paddle/fluid/inference/tensorrt/convert/top_k_op.cc
浏览文件 @
30a02d27
...
...
@@ -29,7 +29,7 @@ namespace tensorrt {
class
TopKOpConverter
:
public
OpConverter
{
public:
TopKOpConverter
()
{}
TopKOpConverter
()
=
default
;
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
...
...
paddle/fluid/inference/tensorrt/convert/unary_op.cc
浏览文件 @
30a02d27
...
...
@@ -29,7 +29,7 @@ namespace tensorrt {
class
UnaryOpConverter
:
public
OpConverter
{
public:
UnaryOpConverter
()
{}
UnaryOpConverter
()
=
default
;
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
...
...
paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc
浏览文件 @
30a02d27
...
...
@@ -23,7 +23,7 @@ namespace tensorrt {
class
ExprWrapper
{
public:
ExprWrapper
()
{}
ExprWrapper
()
=
default
;
ExprWrapper
(
const
nvinfer1
::
IDimensionExpr
*
expr
,
nvinfer1
::
IExprBuilder
*
expr_builder
)
{
this
->
expr
=
expr
;
...
...
paddle/fluid/inference/tensorrt/op_teller.cc
浏览文件 @
30a02d27
...
...
@@ -35,7 +35,7 @@ namespace tensorrt {
// Just tell by the op_types.
struct
SimpleOpTypeSetTeller
:
public
Teller
{
SimpleOpTypeSetTeller
()
{
SimpleOpTypeSetTeller
()
{
// NOLINT
#if IS_TRT_VERSION_GE(7130)
// use TensorRT plugin
teller_set
.
insert
(
"group_norm"
);
...
...
@@ -3083,7 +3083,7 @@ struct SimpleOpTypeSetTeller : public Teller {
struct
GenericPluginTeller
:
public
Teller
{
public:
GenericPluginTeller
()
{}
GenericPluginTeller
()
=
default
;
bool
operator
()(
const
framework
::
OpDesc
&
desc
,
bool
use_no_calib_int8
=
false
,
bool
with_dynamic_shape
=
false
)
override
{
...
...
@@ -3125,7 +3125,7 @@ struct GenericPluginTeller : public Teller {
struct
CustomPluginTeller
:
public
Teller
{
public:
CustomPluginTeller
()
{}
CustomPluginTeller
()
=
default
;
bool
operator
()(
const
framework
::
OpDesc
&
desc
,
bool
use_no_calib_int8
=
false
,
bool
with_dynamic_shape
=
false
)
override
{
...
...
@@ -3178,7 +3178,7 @@ bool OpTeller::Tell(const framework::ir::Node* node,
return
false
;
}
OpTeller
::
OpTeller
()
{
OpTeller
::
OpTeller
()
{
// NOLINT
tellers_
.
emplace_back
(
new
tensorrt
::
SimpleOpTypeSetTeller
);
tellers_
.
emplace_back
(
new
tensorrt
::
GenericPluginTeller
);
tellers_
.
emplace_back
(
new
tensorrt
::
CustomPluginTeller
);
...
...
paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_plugin.cu
浏览文件 @
30a02d27
...
...
@@ -405,7 +405,7 @@ char const* EmbLayerNormPlugin::getPluginNamespace() const noexcept {
return
mNamespace
.
c_str
();
}
EmbLayerNormPluginCreator
::
EmbLayerNormPluginCreator
()
{}
EmbLayerNormPluginCreator
::
EmbLayerNormPluginCreator
()
=
default
;
char
const
*
EmbLayerNormPluginCreator
::
getPluginName
()
const
noexcept
{
return
EMB_LAYER_NORM_NAME
;
...
...
paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.cu
浏览文件 @
30a02d27
...
...
@@ -771,7 +771,7 @@ char const* EmbLayerNormVarSeqlenPluginBase::getPluginNamespace()
}
EmbLayerNormVarSeqlenPluginBaseCreator
::
EmbLayerNormVarSeqlenPluginBaseCreator
()
{}
EmbLayerNormVarSeqlenPluginBaseCreator
()
=
default
;
char
const
*
EmbLayerNormVarSeqlenPluginBaseCreator
::
getPluginName
()
const
noexcept
{
...
...
paddle/fluid/inference/tensorrt/plugin/roi_align_op_plugin.cu
浏览文件 @
30a02d27
...
...
@@ -405,7 +405,7 @@ void RoiAlignPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
void
RoiAlignPluginDynamic
::
destroy
()
TRT_NOEXCEPT
{}
RoiAlignPluginDynamicCreator
::
RoiAlignPluginDynamicCreator
()
{}
RoiAlignPluginDynamicCreator
::
RoiAlignPluginDynamicCreator
()
=
default
;
void
RoiAlignPluginDynamicCreator
::
setPluginNamespace
(
const
char
*
lib_namespace
)
TRT_NOEXCEPT
{
...
...
paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.cu
浏览文件 @
30a02d27
...
...
@@ -36,7 +36,7 @@ StackPluginDynamic::StackPluginDynamic(void const* serial_data,
DeserializeValue
(
&
serial_data
,
&
serial_length
,
&
with_fp16_
);
}
StackPluginDynamic
::~
StackPluginDynamic
()
{}
StackPluginDynamic
::~
StackPluginDynamic
()
=
default
;
nvinfer1
::
IPluginV2DynamicExt
*
StackPluginDynamic
::
clone
()
const
TRT_NOEXCEPT
{
return
new
StackPluginDynamic
(
axis_
,
num_stack_
,
with_fp16_
);
...
...
@@ -230,7 +230,7 @@ int StackPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
return
cudaGetLastError
()
!=
cudaSuccess
;
}
StackPluginDynamicCreator
::
StackPluginDynamicCreator
()
{}
StackPluginDynamicCreator
::
StackPluginDynamicCreator
()
=
default
;
const
char
*
StackPluginDynamicCreator
::
getPluginName
()
const
TRT_NOEXCEPT
{
return
"stack_plugin"
;
...
...
paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu
浏览文件 @
30a02d27
...
...
@@ -437,7 +437,7 @@ nvinfer1::IPluginV2Ext* YoloBoxPlugin::clone() const TRT_NOEXCEPT {
input_w_
);
}
YoloBoxPluginCreator
::
YoloBoxPluginCreator
()
{}
YoloBoxPluginCreator
::
YoloBoxPluginCreator
()
=
default
;
void
YoloBoxPluginCreator
::
setPluginNamespace
(
const
char
*
lib_namespace
)
TRT_NOEXCEPT
{
...
...
paddle/fluid/ir_adaptor/translator/attribute_translator.cc
浏览文件 @
30a02d27
...
...
@@ -34,7 +34,7 @@ class AttributeVisitor {
public:
ir
::
IrContext
*
ctx
;
AttributeVisitor
()
{
ctx
=
ir
::
IrContext
::
Instance
();
}
~
AttributeVisitor
()
{}
~
AttributeVisitor
()
=
default
;
public:
virtual
ir
::
Attribute
operator
()(
int
i
)
{
...
...
paddle/fluid/memory/allocation/allocator_facade.cc
浏览文件 @
30a02d27
...
...
@@ -127,7 +127,7 @@ class CUDAGraphAllocator
:
underlying_allocator_
(
allocator
)
{}
public:
~
CUDAGraphAllocator
()
override
{}
~
CUDAGraphAllocator
()
override
=
default
;
static
std
::
shared_ptr
<
Allocator
>
Create
(
const
std
::
shared_ptr
<
Allocator
>&
allocator
)
{
...
...
@@ -1272,7 +1272,7 @@ AllocatorFacadePrivate::AllocatorMap AllocatorFacadePrivate::system_allocators_;
AllocatorFacade
::
AllocatorFacade
()
:
m_
(
new
AllocatorFacadePrivate
())
{}
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade
::~
AllocatorFacade
()
{}
AllocatorFacade
::~
AllocatorFacade
()
=
default
;
AllocatorFacade
&
AllocatorFacade
::
Instance
()
{
static
AllocatorFacade
*
instance
=
new
AllocatorFacade
;
...
...
paddle/fluid/operators/collective/c_comm_init_all_op.cc
浏览文件 @
30a02d27
...
...
@@ -38,7 +38,7 @@ namespace operators {
class
CCommInitAllInferShape
:
public
framework
::
InferShapeBase
{
public:
~
CCommInitAllInferShape
()
override
{}
~
CCommInitAllInferShape
()
override
=
default
;
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{};
};
...
...
paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc
浏览文件 @
30a02d27
...
...
@@ -38,7 +38,7 @@ namespace operators {
class
CCommInitMultiTrainerInferShape
:
public
framework
::
InferShapeBase
{
public:
~
CCommInitMultiTrainerInferShape
()
override
{}
~
CCommInitMultiTrainerInferShape
()
override
=
default
;
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{};
};
...
...
paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu
浏览文件 @
30a02d27
...
...
@@ -44,7 +44,7 @@ static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); }
template
<
typename
T
>
struct
PairForLayerNorm
{
__device__
__forceinline__
PairForLayerNorm
()
{}
__device__
__forceinline__
PairForLayerNorm
()
=
default
;
__device__
__forceinline__
PairForLayerNorm
(
const
T
&
first
,
const
T
&
second
)
:
first_
(
first
),
second_
(
second
)
{}
...
...
paddle/fluid/operators/math/beam_search.cc
浏览文件 @
30a02d27
...
...
@@ -113,7 +113,7 @@ class BeamSearchFunctor<phi::CPUContext, T> {
* The basic items help to sort.
*/
struct
Item
{
Item
()
{}
Item
()
=
default
;
Item
(
size_t
offset
,
size_t
id
,
float
score
)
:
offset
(
offset
),
id
(
id
),
score
(
score
)
{}
// offset in the higher lod level.
...
...
paddle/fluid/operators/math/beam_search.cu
浏览文件 @
30a02d27
...
...
@@ -21,7 +21,7 @@ namespace operators {
namespace
math
{
struct
Triple
{
__device__
__forceinline__
Triple
()
{}
__device__
__forceinline__
Triple
()
=
default
;
__device__
__forceinline__
Triple
(
int
o
,
int
i
,
float
s
)
:
offset
(
o
),
id
(
i
),
score
(
s
)
{}
...
...
paddle/fluid/operators/math/sampler.cc
浏览文件 @
30a02d27
...
...
@@ -22,7 +22,7 @@ namespace paddle {
namespace
operators
{
namespace
math
{
Sampler
::~
Sampler
()
{}
Sampler
::~
Sampler
()
=
default
;
UniformSampler
::
UniformSampler
(
int64_t
range
,
unsigned
int
seed
)
:
Sampler
(
range
,
seed
),
inv_range_
(
1.0
/
(
range
+
1
))
{
...
...
paddle/fluid/operators/reader/py_reader.cc
浏览文件 @
30a02d27
...
...
@@ -36,7 +36,9 @@ void PyReader::ReadNext(paddle::framework::LoDTensorArray* out) {
if
(
!
success
)
out
->
clear
();
}
PyReader
::~
PyReader
()
{
queue_
->
Close
();
}
PyReader
::~
PyReader
()
{
// NOLINT
queue_
->
Close
();
}
void
PyReader
::
Shutdown
()
{
queue_
->
Close
();
}
...
...
paddle/fluid/platform/profiler/cuda_tracer.cc
浏览文件 @
30a02d27
...
...
@@ -47,7 +47,7 @@ std::unordered_map<uint32_t, uint64_t> CreateThreadIdMapping() {
}
}
// namespace details
CudaTracer
::
CudaTracer
()
{}
CudaTracer
::
CudaTracer
()
=
default
;
void
CudaTracer
::
PrepareTracing
()
{
PADDLE_ENFORCE_EQ
(
...
...
paddle/fluid/platform/profiler/custom_device/custom_tracer.cc
浏览文件 @
30a02d27
...
...
@@ -32,7 +32,7 @@ CustomTracer::CustomTracer(const std::string& dev_type) : dev_type_(dev_type) {
#endif
}
CustomTracer
::~
CustomTracer
()
{
CustomTracer
::~
CustomTracer
()
{
// NOLINT
#ifdef PADDLE_WITH_CUSTOM_DEVICE
phi
::
DeviceManager
::
ProfilerFinalize
(
dev_type_
,
&
collector_
,
context_
);
#endif
...
...
paddle/ir/core/ir_context.cc
浏览文件 @
30a02d27
...
...
@@ -29,7 +29,7 @@ namespace ir {
// AbstractType, TypeStorage, AbstractAttribute, AttributeStorage, Dialect.
class
IrContextImpl
{
public:
IrContextImpl
()
{}
IrContextImpl
()
=
default
;
~
IrContextImpl
()
{
std
::
lock_guard
<
ir
::
SpinLock
>
guard
(
destructor_lock_
);
...
...
paddle/ir/core/storage_manager.cc
浏览文件 @
30a02d27
...
...
@@ -66,7 +66,7 @@ struct ParametricStorageManager {
std
::
function
<
void
(
StorageBase
*
)
>
destroy_
;
};
StorageManager
::
StorageManager
()
{}
StorageManager
::
StorageManager
()
=
default
;
StorageManager
::~
StorageManager
()
=
default
;
...
...
paddle/phi/api/include/tensor.h
浏览文件 @
30a02d27
...
...
@@ -53,7 +53,7 @@ using IntArray = experimental::IntArray;
class
AbstractAutogradMeta
{
public:
// No AbstractAutogradMeta should be created
virtual
~
AbstractAutogradMeta
()
{}
virtual
~
AbstractAutogradMeta
()
=
default
;
};
/**
...
...
paddle/phi/api/lib/tensor.cc
浏览文件 @
30a02d27
...
...
@@ -403,12 +403,7 @@ void Tensor::reset() {
/* Part 6: Operator overloading */
Tensor
&
Tensor
::
operator
=
(
const
Tensor
&
x
)
&
{
impl_
=
x
.
impl_
;
autograd_meta_
=
x
.
autograd_meta_
;
name_
=
x
.
name_
;
return
*
this
;
}
Tensor
&
Tensor
::
operator
=
(
const
Tensor
&
x
)
&
=
default
;
Tensor
&
Tensor
::
operator
=
(
Tensor
&&
x
)
&
{
impl_
=
std
::
move
(
x
.
impl_
);
...
...
paddle/phi/backends/dynload/dynamic_loader.cc
浏览文件 @
30a02d27
...
...
@@ -109,7 +109,7 @@ namespace phi {
namespace
dynload
{
struct
PathNode
{
PathNode
()
{}
PathNode
()
=
default
;
std
::
string
path
=
""
;
};
...
...
paddle/phi/backends/gpu/gpu_context.cc
浏览文件 @
30a02d27
...
...
@@ -66,7 +66,7 @@ class EigenGpuStreamDevice : public Eigen::StreamInterface {
EigenGpuStreamDevice
()
:
scratch_
(
nullptr
),
semaphore_
(
nullptr
)
{
Eigen
::
initializeDeviceProp
();
}
~
EigenGpuStreamDevice
()
override
{}
~
EigenGpuStreamDevice
()
override
=
default
;
void
Reinitialize
(
gpuStream_t
cuda_stream
,
Allocator
*
allocator
,
...
...
paddle/phi/backends/onednn/onednn_context.cc
浏览文件 @
30a02d27
...
...
@@ -95,7 +95,7 @@ struct OneDNNContext::Impl {
p_mutex_
.
reset
(
new
std
::
mutex
());
}
~
Impl
()
{}
~
Impl
()
=
default
;
void
ResetBlobMap
(
void
*
ptr
)
{
VLOG
(
4
)
<<
OneDNNContext
::
tls
().
get_curr_exec
()
<<
" "
<<
ptr
;
...
...
paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc
浏览文件 @
30a02d27
...
...
@@ -27,7 +27,7 @@ template <class T>
class
Point_
{
public:
// default constructor
Point_
()
{}
Point_
()
=
default
;
Point_
(
T
_x
,
T
_y
)
{}
Point_
(
const
Point_
&
pt
UNUSED
)
{}
...
...
paddle/phi/kernels/cpu/rnn_grad_kernel.cc
浏览文件 @
30a02d27
...
...
@@ -53,7 +53,7 @@ void CreateLstmGrad(phi::funcs::LstmMetaGrad<T>* lstm_grad) {
template
<
typename
T
>
struct
GradCell
{
virtual
~
GradCell
()
{}
virtual
~
GradCell
()
=
default
;
virtual
void
operator
()(
const
CPUContext
&
dev_ctx
UNUSED
,
DenseTensor
*
gate_tensor
UNUSED
,
DenseTensor
*
state_tensor
UNUSED
,
...
...
@@ -355,7 +355,7 @@ struct LSTMGradCell : GradCell<T> {
template
<
typename
T
,
typename
GradCellType
>
struct
GradLayer
{
explicit
GradLayer
(
const
GradCellType
&
cell
)
:
cell_
(
cell
)
{}
virtual
~
GradLayer
()
{}
virtual
~
GradLayer
()
=
default
;
void
run_rnn_grad_function
(
const
CPUContext
&
dev_ctx
,
const
DenseTensor
*
input
,
...
...
@@ -690,7 +690,7 @@ struct SingleGradLayer : GradLayer<T, GradCellType> {
// explicit SingleGradLayer(GradCellType& cell) : cell_(cell) {}
explicit
SingleGradLayer
(
const
GradCellType
&
cell
)
:
GradLayer
<
T
,
GradCellType
>
(
cell
)
{}
~
SingleGradLayer
()
override
{}
~
SingleGradLayer
()
override
=
default
;
void
operator
()(
const
CPUContext
&
dev_ctx
,
const
DenseTensor
*
input
,
const
DenseTensor
*
output
,
...
...
@@ -802,7 +802,7 @@ template <typename T, typename GradCellType>
struct
BidirGradLayer
:
GradLayer
<
T
,
GradCellType
>
{
explicit
BidirGradLayer
(
const
GradCellType
&
cell
)
:
GradLayer
<
T
,
GradCellType
>
(
cell
)
{}
~
BidirGradLayer
()
override
{}
~
BidirGradLayer
()
override
=
default
;
void
operator
()(
const
CPUContext
&
dev_ctx
,
const
DenseTensor
*
input
,
const
DenseTensor
*
output
,
...
...
paddle/phi/kernels/cpu/rnn_kernel.cc
浏览文件 @
30a02d27
...
...
@@ -34,7 +34,7 @@ namespace phi {
template
<
typename
T
>
struct
Cell
{
virtual
~
Cell
()
{}
virtual
~
Cell
()
=
default
;
virtual
void
operator
()(
const
CPUContext
*
dev_ctx
UNUSED
,
DenseTensor
*
input
UNUSED
,
const
DenseTensor
*
weight_hh
UNUSED
,
...
...
@@ -208,7 +208,7 @@ struct LSTMCell : Cell<T> {
template
<
typename
T
,
typename
CellType
>
struct
Layer
{
explicit
Layer
(
const
CellType
&
cell
)
:
cell_
(
cell
)
{}
virtual
~
Layer
()
{}
virtual
~
Layer
()
=
default
;
void
preprocess
(
const
CPUContext
&
dev_ctx
,
const
DenseTensor
&
input
,
const
DenseTensor
&
weight
,
...
...
paddle/phi/kernels/funcs/concat_and_split_functor.cu
浏览文件 @
30a02d27
...
...
@@ -65,7 +65,7 @@ struct PointerWrapper {
const
void
*
ins_addr
[
Size
];
__device__
inline
const
void
*
operator
[](
int
i
)
const
{
return
ins_addr
[
i
];
}
PointerWrapper
()
{}
PointerWrapper
()
=
default
;
PointerWrapper
(
const
phi
::
GPUContext
&
ctx
,
const
std
::
vector
<
phi
::
DenseTensor
>&
ins
,
const
T
**
pre_alloced_host_ptr
)
{
...
...
@@ -84,7 +84,7 @@ template <typename T, int Size>
struct
PADDLE_ALIGN
(
256
)
AlignedPointerWrapper
:
public
PointerWrapper
<
T
,
Size
>
{
public:
AlignedPointerWrapper
()
{}
AlignedPointerWrapper
()
=
default
;
AlignedPointerWrapper
(
const
phi
::
GPUContext
&
ctx
,
const
std
::
vector
<
phi
::
DenseTensor
>&
ins
,
const
T
**
pre_alloced_host_ptr
)
{
...
...
@@ -98,7 +98,7 @@ struct PointerToPointer {
void
**
ins_addr
{
nullptr
};
__device__
inline
const
void
*
operator
[](
int
i
)
const
{
return
ins_addr
[
i
];
}
PointerToPointer
()
{}
PointerToPointer
()
=
default
;
PointerToPointer
(
const
phi
::
GPUContext
&
ctx
,
const
std
::
vector
<
phi
::
DenseTensor
>&
ins
,
const
T
**
pre_alloced_host_ptr
,
...
...
@@ -186,9 +186,7 @@ struct PointerToPointerAndCol {
template
<
int
MovSize
>
struct
alignas
(
MovSize
)
Packed
{
__device__
Packed
()
{
// do nothing
}
__device__
Packed
()
=
default
;
union
{
char
buf
[
MovSize
];
};
...
...
@@ -621,7 +619,7 @@ struct PointerAndColArray
public:
funcs
::
ValueArray
<
IndexT
,
Size
>
val_array
;
PointerAndColArray
()
{}
PointerAndColArray
()
=
default
;
PointerAndColArray
(
const
phi
::
GPUContext
&
ctx
,
const
int
out_col_num
,
IndexT
*
out_cols
,
...
...
paddle/phi/kernels/gpu/masked_select_grad_kernel.cu
浏览文件 @
30a02d27
...
...
@@ -32,7 +32,7 @@ namespace phi {
template
<
typename
MT
,
typename
InT
,
typename
OutT
>
struct
MaskedSelectGradFunctor
{
HOSTDEVICE
MaskedSelectGradFunctor
()
{}
HOSTDEVICE
MaskedSelectGradFunctor
()
=
default
;
HOSTDEVICE
inline
void
operator
()(
OutT
*
out
,
const
MT
*
mask
,
...
...
paddle/phi/kernels/gpu/masked_select_kernel.cu
浏览文件 @
30a02d27
...
...
@@ -30,7 +30,7 @@ namespace phi {
template
<
typename
MT
,
typename
InT
,
typename
OutT
>
struct
MaskedSelectFunctor
{
HOSTDEVICE
MaskedSelectFunctor
()
{}
HOSTDEVICE
MaskedSelectFunctor
()
=
default
;
HOSTDEVICE
inline
void
operator
()(
OutT
*
out
,
const
MT
*
mask
,
...
...
paddle/phi/kernels/gpu/p_norm_kernel.cu
浏览文件 @
30a02d27
...
...
@@ -63,7 +63,7 @@ __device__ __forceinline__ double inline_pow(double base, double exponent) {
template
<
typename
T
>
struct
NonzeroFunctor
{
HOSTDEVICE
explicit
inline
NonzeroFunctor
()
{}
HOSTDEVICE
explicit
inline
NonzeroFunctor
()
=
default
;
HOSTDEVICE
inline
T
operator
()(
const
T
x
)
const
{
return
static_cast
<
T
>
(
static_cast
<
double
>
(
x
)
!=
0
);
}
...
...
@@ -71,7 +71,7 @@ struct NonzeroFunctor {
template
<
typename
T
>
struct
AbsFunctor
{
HOSTDEVICE
explicit
inline
AbsFunctor
()
{}
HOSTDEVICE
explicit
inline
AbsFunctor
()
=
default
;
HOSTDEVICE
inline
T
operator
()(
const
T
x
)
const
{
return
static_cast
<
T
>
(
inline_abs
(
x
));
}
...
...
paddle/phi/kernels/gpu/rms_norm_kernel.cu
浏览文件 @
30a02d27
...
...
@@ -177,9 +177,7 @@ typename std::enable_if<HasCanPackAs<T>::value == false, bool>::type CanPackAs(
template
<
typename
T
,
int
N
>
struct
alignas
(
sizeof
(
T
)
*
N
)
Pack
{
__device__
Pack
()
{
// do nothing
}
__device__
Pack
()
=
default
;
T
elem
[
N
];
};
...
...
test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc
浏览文件 @
30a02d27
...
...
@@ -375,7 +375,7 @@ class CudnnBNAddReluTester {
SetUp
();
}
~
CudnnBNAddReluTester
()
{}
~
CudnnBNAddReluTester
()
=
default
;
void
CheckForward
(
float
diff
,
bool
is_relative_atol
=
false
)
{
LOG
(
INFO
)
<<
"[CheckForward, diff="
<<
diff
...
...
test/cpp/fluid/fused/cudnn_norm_conv_test.cc
浏览文件 @
30a02d27
...
...
@@ -235,7 +235,7 @@ class CudnnNormConvolutionTester {
SetUp
();
}
~
CudnnNormConvolutionTester
()
{}
~
CudnnNormConvolutionTester
()
=
default
;
void
CheckForward
(
float
diff
,
bool
is_relative_atol
=
false
)
{
phi
::
GPUContext
*
ctx
=
static_cast
<
phi
::
GPUContext
*>
(
...
...
test/cpp/fluid/fused/fused_dropout_act_bias_test.cu
浏览文件 @
30a02d27
...
...
@@ -89,7 +89,7 @@ struct TestFusedDropoutActBias {
ctx
=
reinterpret_cast
<
phi
::
GPUContext
*>
(
devicectx
);
}
~
TestFusedDropoutActBias
()
{}
~
TestFusedDropoutActBias
()
=
default
;
void
SetUp
()
{
const
int
n
=
rows
*
cols
;
...
...
test/cpp/fluid/fused/fused_layernorm_residual_dropout_bias_test.cu
浏览文件 @
30a02d27
...
...
@@ -95,7 +95,7 @@ struct TestFusedLayernormResidualDropoutBias {
ctx
=
reinterpret_cast
<
phi
::
GPUContext
*>
(
devicectx
);
}
~
TestFusedLayernormResidualDropoutBias
()
{}
~
TestFusedLayernormResidualDropoutBias
()
=
default
;
void
SetUp
()
{
using
U
=
LayerNormParamType
<
T
>
;
...
...
test/cpp/inference/api/analyzer_dam_tester.cc
浏览文件 @
30a02d27
...
...
@@ -35,7 +35,7 @@ struct DataRecord {
size_t
batch_size
{
1
};
size_t
num_samples
;
// total number of samples
DataRecord
()
{
DataRecord
()
{
// NOLINT
turns
=
new
std
::
vector
<
std
::
vector
<
int64_t
>>
[
FLAGS_max_turn_num
];
// turns data : FLAGS_max_turn_num
turns_mask
=
new
std
::
vector
<
std
::
vector
<
...
...
@@ -48,7 +48,7 @@ struct DataRecord {
Load
(
path
);
}
~
DataRecord
()
{
~
DataRecord
()
{
// NOLINT
delete
[]
turns
;
delete
[]
turns_mask
;
}
...
...
test/cpp/inference/api/api_tester.cc
浏览文件 @
30a02d27
...
...
@@ -50,7 +50,7 @@ class DemoPredictor : public PaddlePredictor {
return
nullptr
;
}
~
DemoPredictor
()
override
{}
~
DemoPredictor
()
override
=
default
;
};
template
<
>
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录