未验证 提交 30a02d27 编写于 作者: R Ruibin Cheung 提交者: GitHub

[clang-tidy] enable modernize-use-equals-default (#55983)

上级 4d094b0c
......@@ -181,7 +181,7 @@ modernize-redundant-void-arg,
-modernize-unary-static-assert,
-modernize-use-bool-literals,
modernize-use-emplace,
-modernize-use-equals-default,
modernize-use-equals-default,
-modernize-use-equals-delete,
-modernize-use-noexcept,
modernize-use-nullptr,
......
......@@ -34,7 +34,7 @@ DistTensorSpec::DistTensorSpec(const DistTensorSpec& spec) {
dist_attr_.copy_from(spec.dist_attr());
}
DistTensorSpec::~DistTensorSpec() {}
DistTensorSpec::~DistTensorSpec() = default;
DistTensorSpec::DistTensorSpec(const Tensor& tensor) {
shape_ = tensor.shape();
......
......@@ -47,7 +47,7 @@ ProcessGroupNCCL::NCCLTask::NCCLTask(const Place& place,
comm_event_(place),
task_place_(place) {}
ProcessGroupNCCL::NCCLTask::~NCCLTask() {}
ProcessGroupNCCL::NCCLTask::~NCCLTask() = default;
bool ProcessGroupNCCL::NCCLTask::IsCompleted() { return comm_event_.Query(); }
......
......@@ -48,7 +48,7 @@ FleetExecutor::FleetExecutor(const FleetExecutorDesc& exe_desc)
InitMessageBus();
}
FleetExecutor::~FleetExecutor() {
FleetExecutor::~FleetExecutor() { // NOLINT
for (const auto& carrier_id : carrier_ids_) {
GlobalMap<std::string, Carrier>::Get(carrier_id)->Release();
}
......
......@@ -24,7 +24,7 @@ namespace distributed {
Interceptor::Interceptor(int64_t interceptor_id, TaskNode* node)
: interceptor_id_(interceptor_id), node_(node) {}
Interceptor::~Interceptor() {
Interceptor::~Interceptor() { // NOLINT
// FIXME(wangxi): throw in stop function
// std::lock_guard<std::mutex> lock(mutex_);
// PADDLE_ENFORCE_EQ(messages_.empty(), true,
......
......@@ -38,7 +38,7 @@ inline double GetCurrentUS() {
return 1e+6 * time.tv_sec + time.tv_usec;
}
Communicator::Communicator() {}
Communicator::Communicator() = default;
void Communicator::InitGFlag(const std::string &gflags) {
VLOG(3) << "Init With Gflags:" << gflags;
......
......@@ -1205,7 +1205,7 @@ Node *GraphShard::find_node(uint64_t id) {
return iter == node_location.end() ? nullptr : bucket[iter->second];
}
GraphTable::~GraphTable() {
GraphTable::~GraphTable() { // NOLINT
#ifdef PADDLE_WITH_GPU_GRAPH
clear_graph();
#endif
......
......@@ -1999,7 +1999,7 @@ void PaddleBoxDataFeed::PutToFeedVec(const std::vector<Record*>& ins_vec) {
#endif
}
SlotRecordInMemoryDataFeed::~SlotRecordInMemoryDataFeed() {
SlotRecordInMemoryDataFeed::~SlotRecordInMemoryDataFeed() { // NOLINT
#if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS)
stop_token_.store(true);
for (auto& thread : pack_threads_) {
......
......@@ -1821,7 +1821,7 @@ class MultiSlotInMemoryDataFeed : public InMemoryDataFeed<Record> {
class SlotRecordInMemoryDataFeed : public InMemoryDataFeed<SlotRecord> {
public:
SlotRecordInMemoryDataFeed() {}
SlotRecordInMemoryDataFeed() = default;
virtual ~SlotRecordInMemoryDataFeed();
void Init(const DataFeedDesc& data_feed_desc) override;
void LoadIntoMemory() override;
......
......@@ -37,7 +37,7 @@ FetchAsyncOpHandle::FetchAsyncOpHandle(ir::Node *node,
local_exec_scopes_(local_exec_scopes),
return_merged_(return_merged) {}
FetchAsyncOpHandle::~FetchAsyncOpHandle() {}
FetchAsyncOpHandle::~FetchAsyncOpHandle() = default;
void FetchAsyncOpHandle::RecordWaitEventOnCtx(
platform::DeviceContext *waited_ctx) {
......
......@@ -35,7 +35,7 @@ FetchOpHandle::FetchOpHandle(ir::Node *node,
local_exec_scopes_(local_exec_scopes),
return_merged_(return_merged) {}
FetchOpHandle::~FetchOpHandle() {}
FetchOpHandle::~FetchOpHandle() = default;
void FetchOpHandle::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) {
PADDLE_THROW(platform::errors::PermissionDenied(
......
......@@ -27,7 +27,7 @@ namespace paddle {
namespace framework {
namespace details {
struct DebugTools {
DebugTools() {}
DebugTools() = default;
std::string path = "";
int stack_limit = 1;
};
......
......@@ -30,7 +30,7 @@ std::string OpHandleBase::DebugString() const {
return ss.str();
}
OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW {
OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW { // NOLINT
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
for (auto &ev : events_) {
if (ev.second) {
......
......@@ -40,7 +40,7 @@ ScaleLossGradOpHandle::ScaleLossGradOpHandle(ir::Node *node,
this->SetDeviceContext(place_, dev_ctx);
}
ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {}
ScaleLossGradOpHandle::~ScaleLossGradOpHandle() = default;
struct ScaleLossGradFunctor {
float coeff_;
......
......@@ -19,7 +19,7 @@
namespace paddle {
namespace framework {
namespace details {
SSAGraphExecutor::~SSAGraphExecutor() {}
SSAGraphExecutor::~SSAGraphExecutor() = default;
void ClearFetchOp(ir::Graph* graph, std::vector<OpHandleBase*>* fetch_ops) {
if (fetch_ops->empty()) return;
......
......@@ -18,7 +18,7 @@ namespace paddle {
namespace framework {
namespace details {
VarHandleBase::~VarHandleBase() {}
VarHandleBase::~VarHandleBase() = default;
VarHandle::~VarHandle() { VLOG(4) << "deleting var handle " << DebugString(); }
......
......@@ -24,7 +24,7 @@ namespace paddle {
namespace framework {
namespace ir {
AdaptivePool2dConvertGlobalPass::AdaptivePool2dConvertGlobalPass() {
AdaptivePool2dConvertGlobalPass::AdaptivePool2dConvertGlobalPass() { // NOLINT
AddOpCompat(OpCompat("pool2d"))
.AddInput("X")
.IsTensor()
......
......@@ -32,7 +32,7 @@ class Graph;
class AdaptivePool2dConvertGlobalPass : public FusePassBase {
public:
AdaptivePool2dConvertGlobalPass();
virtual ~AdaptivePool2dConvertGlobalPass() {}
virtual ~AdaptivePool2dConvertGlobalPass() = default;
protected:
void ApplyImpl(ir::Graph* graph) const override;
......
......@@ -51,7 +51,7 @@ struct ConstantFolding : public PatternBase {
};
} // namespace patterns
ConstantFoldingPass::ConstantFoldingPass() {}
ConstantFoldingPass::ConstantFoldingPass() = default;
void ConstantFoldingPass::ApplyImpl(ir::Graph *graph) const {
PADDLE_ENFORCE_NOT_NULL(
......
......@@ -756,7 +756,7 @@ void ConvEltwiseAddBNFusePass::ApplyImpl(ir::Graph* graph) const {
AddStatis(found_conv_bn_count);
}
ConvTransposeBNFusePass::ConvTransposeBNFusePass() {
ConvTransposeBNFusePass::ConvTransposeBNFusePass() { // NOLINT
AddOpCompat(OpCompat("conv2d_transpose"))
.AddInput("Input")
.IsTensor()
......@@ -800,7 +800,8 @@ ConvTransposeBNFusePass::ConvTransposeBNFusePass() {
.End();
}
ConvTransposeEltwiseAddBNFusePass::ConvTransposeEltwiseAddBNFusePass() {
ConvTransposeEltwiseAddBNFusePass::
ConvTransposeEltwiseAddBNFusePass() { // NOLINT
AddOpCompat(OpCompat("conv2d_transpose"))
.AddInput("Input")
.IsTensor()
......@@ -844,7 +845,7 @@ ConvTransposeEltwiseAddBNFusePass::ConvTransposeEltwiseAddBNFusePass() {
.End();
}
DepthwiseConvBNFusePass::DepthwiseConvBNFusePass() {
DepthwiseConvBNFusePass::DepthwiseConvBNFusePass() { // NOLINT
AddOpCompat(OpCompat("depthwise_conv2d"))
.AddInput("Input")
.IsTensor()
......
......@@ -30,7 +30,7 @@ using platform::MemEvent;
const double CostData::NOT_MEASURED = -1;
CostData::~CostData() {
CostData::~CostData() { // NOLINT
// TODO(zhhsplendid): when we save a copy of program/graph, we should delete
// here.
}
......
......@@ -56,7 +56,7 @@ static const std::initializer_list<std::string> rnn_variable_names{
class ComputePropagateScalesMkldnnPassTest : public testing::Test {
public:
ComputePropagateScalesMkldnnPassTest() {
ComputePropagateScalesMkldnnPassTest() { // NOLINT
pass.reset(new ComputePropagateScalesMkldnnPass());
}
......
......@@ -32,7 +32,7 @@ class Graph;
PADDLE_ENFORCE_NOT_NULL( \
id, platform::errors::InvalidArgument("Subgraph has no node %s.", #id));
DepthwiseConvMKLDNNPass::DepthwiseConvMKLDNNPass() {
DepthwiseConvMKLDNNPass::DepthwiseConvMKLDNNPass() { // NOLINT
AddOpCompat(OpCompat("depthwise_conv2d"))
.AddInput("Input")
.IsTensor()
......
......@@ -25,7 +25,7 @@ class Graph;
class DepthwiseConvMKLDNNPass : public FusePassBase {
public:
DepthwiseConvMKLDNNPass();
virtual ~DepthwiseConvMKLDNNPass() {}
virtual ~DepthwiseConvMKLDNNPass() = default;
protected:
void ApplyImpl(ir::Graph* graph) const override;
......
......@@ -23,7 +23,7 @@ namespace paddle {
namespace framework {
namespace ir {
Int8ScaleCalculationMkldnnPass::Int8ScaleCalculationMkldnnPass() {
Int8ScaleCalculationMkldnnPass::Int8ScaleCalculationMkldnnPass() { // NOLINT
AddOpCompat(OpCompat("conv2d"))
.AddInput("Input")
.IsTensor()
......
......@@ -27,7 +27,7 @@ class Graph;
class Int8ScaleCalculationMkldnnPass : public FusePassBase {
public:
Int8ScaleCalculationMkldnnPass();
virtual ~Int8ScaleCalculationMkldnnPass() {}
virtual ~Int8ScaleCalculationMkldnnPass() = default;
protected:
void ApplyImpl(ir::Graph* graph) const override;
......
......@@ -76,7 +76,7 @@ void QuantizeConvInput(Scope* scope,
} // namespace
ParamsQuantizationMkldnnPass::ParamsQuantizationMkldnnPass() {
ParamsQuantizationMkldnnPass::ParamsQuantizationMkldnnPass() { // NOLINT
AddOpCompat(OpCompat("fused_conv2d"))
.AddInput("Input")
.IsTensor()
......
......@@ -27,7 +27,7 @@ class Graph;
class ParamsQuantizationMkldnnPass : public FusePassBase {
public:
ParamsQuantizationMkldnnPass();
virtual ~ParamsQuantizationMkldnnPass() {}
virtual ~ParamsQuantizationMkldnnPass() = default;
protected:
void ApplyImpl(ir::Graph* graph) const override;
......
......@@ -65,7 +65,7 @@ struct TestScope {
};
struct ProgramStrategy {
virtual ~ProgramStrategy() {}
virtual ~ProgramStrategy() = default;
std::unique_ptr<Graph> CreateGraph() {
CreateProgram();
......
......@@ -170,7 +170,8 @@ void FuseQuantTranspose2DequantOneDNNPass::ApplyImpl(Graph *graph) const {
FuseTranspose2Dequantize(graph, "transpose2");
}
FuseQuantTranspose2DequantOneDNNPass::FuseQuantTranspose2DequantOneDNNPass() {
FuseQuantTranspose2DequantOneDNNPass::
FuseQuantTranspose2DequantOneDNNPass() { // NOLINT
AddOpCompat(OpCompat("transpose2"))
.AddInput("X")
.IsTensor()
......
......@@ -23,7 +23,7 @@ namespace ir {
class FuseQuantTranspose2DequantOneDNNPass : public FusePassBase {
public:
virtual ~FuseQuantTranspose2DequantOneDNNPass() {}
virtual ~FuseQuantTranspose2DequantOneDNNPass() = default;
FuseQuantTranspose2DequantOneDNNPass();
protected:
......
......@@ -31,7 +31,7 @@ namespace ir {
GET_IR_NODE(reshape2_op); \
GET_IR_NODE(reshape2_out);
ShuffleChannelMKLDNNDetectPass::ShuffleChannelMKLDNNDetectPass() {
ShuffleChannelMKLDNNDetectPass::ShuffleChannelMKLDNNDetectPass() { // NOLINT
AddOpCompat(OpCompat("reshape2"))
.AddInput("X")
.IsTensor()
......
......@@ -27,7 +27,7 @@ class Graph;
class ShuffleChannelMKLDNNDetectPass : public FusePassBase {
public:
ShuffleChannelMKLDNNDetectPass();
virtual ~ShuffleChannelMKLDNNDetectPass() {}
virtual ~ShuffleChannelMKLDNNDetectPass() = default;
protected:
void ApplyImpl(ir::Graph* graph) const override;
......
......@@ -37,7 +37,7 @@ namespace paddle {
namespace framework {
namespace ir {
class Node;
ReverseRollFusePass::ReverseRollFusePass() {
ReverseRollFusePass::ReverseRollFusePass() { // NOLINT
AddOpCompat(OpCompat("reshape2"))
.AddInput("X")
.IsTensor()
......
......@@ -54,7 +54,7 @@ namespace ir {
class ReverseRollFusePass : public FusePassBase {
public:
ReverseRollFusePass();
virtual ~ReverseRollFusePass() {}
virtual ~ReverseRollFusePass() = default;
protected:
void ApplyImpl(ir::Graph *graph) const override;
......
......@@ -31,7 +31,7 @@ namespace ir {
GET_IR_NODE(reshape2_op); \
GET_IR_NODE(reshape2_out);
ShuffleChannelDetectPass::ShuffleChannelDetectPass() {
ShuffleChannelDetectPass::ShuffleChannelDetectPass() { // NOLINT
AddOpCompat(OpCompat("reshape2"))
.AddInput("X")
.IsTensor()
......
......@@ -27,7 +27,7 @@ class Graph;
class ShuffleChannelDetectPass : public FusePassBase {
public:
ShuffleChannelDetectPass();
virtual ~ShuffleChannelDetectPass() {}
virtual ~ShuffleChannelDetectPass() = default;
protected:
void ApplyImpl(ir::Graph* graph) const override;
......
......@@ -67,7 +67,7 @@ SigmoidElementmulFusePattern::SigmoidElementmulFusePattern(
} // namespace patterns
SigmoidElementmulFusePass::SigmoidElementmulFusePass() {}
SigmoidElementmulFusePass::SigmoidElementmulFusePass() = default;
void SigmoidElementmulFusePass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE_NOT_NULL(
......
......@@ -28,7 +28,7 @@ namespace ir {
class Node;
TrtMapOpsToMatrixMultiplyPass::TrtMapOpsToMatrixMultiplyPass() {}
TrtMapOpsToMatrixMultiplyPass::TrtMapOpsToMatrixMultiplyPass() = default;
void TrtMapOpsToMatrixMultiplyPass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE_NOT_NULL(
......
......@@ -149,7 +149,7 @@ struct YoloBoxPattern : public PatternBase {
};
} // namespace patterns
YoloBoxFusePass::YoloBoxFusePass() {}
YoloBoxFusePass::YoloBoxFusePass() = default;
void YoloBoxFusePass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE_NOT_NULL(
......
......@@ -27,7 +27,7 @@ InterpreterCoreNoEventGarbageCollector::
}
InterpreterCoreNoEventGarbageCollector::
~InterpreterCoreNoEventGarbageCollector() {
~InterpreterCoreNoEventGarbageCollector() { // NOLINT
queue_.reset(nullptr);
}
......
......@@ -37,7 +37,7 @@ VariableScope::VariableScope(Scope* scope) {
"You have passed a nullptr to construct VariableScope."));
}
VariableScope::~VariableScope() {}
VariableScope::~VariableScope() = default;
Scope* VariableScope::GetMutableScope() const { return scope_; }
......
......@@ -40,7 +40,7 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker {
platform::errors::InvalidArgument("Op proto cannot be nullptr."));
}
~KernelArgsNameMakerByOpProto() override {}
~KernelArgsNameMakerByOpProto() override = default;
const paddle::small_vector<const char*>& GetInputArgsNames() override;
const paddle::small_vector<const char*>& GetOutputArgsNames() override;
......
......@@ -187,7 +187,7 @@ void ProgramProcessor::AddDepToBlockOp(const BlockDesc &block) {
}
}
ProgramProcessor::ProgramProcessor() {}
ProgramProcessor::ProgramProcessor() = default;
} // namespace framework
} // namespace paddle
......@@ -72,7 +72,7 @@ void ReaderBase::Start() {
}
}
ReaderBase::~ReaderBase() {}
ReaderBase::~ReaderBase() = default;
DecoratedReader::~DecoratedReader() {
VLOG(1) << "~DecoratedReader";
......
......@@ -114,7 +114,7 @@ struct VarIdToTypeIndexMapHolder {
}
private:
VarIdToTypeIndexMapHolder() {
VarIdToTypeIndexMapHolder() { // NOLINT
VarIdToTypeIndexMapInitializer::Init(&id_to_type_map_, &type_to_id_map_);
}
......
......@@ -131,7 +131,9 @@ AutoCastGuard::AutoCastGuard(std::shared_ptr<Tracer> tracer, AmpLevel level)
}
}
AutoCastGuard::~AutoCastGuard() { tracer_->SetAmpLevel(pre_amp_level_); }
AutoCastGuard::~AutoCastGuard() { // NOLINT
tracer_->SetAmpLevel(pre_amp_level_);
}
AmpOperators::AmpOperators()
: allow_ops_(new std::unordered_set<std::string>()),
......@@ -163,7 +165,7 @@ AmpOperators::AmpOperators()
<< unsupported_bf16_ops_->size();
}
AmpOperators::~AmpOperators() {}
AmpOperators::~AmpOperators() = default;
AmpOperators& AmpOperators::Instance() {
static AmpOperators instance;
......
......@@ -23,7 +23,7 @@ namespace paddle {
namespace inference {
namespace analysis {
Analyzer::Analyzer() {}
Analyzer::Analyzer() = default;
void Analyzer::Run(Argument *argument) { RunAnalysis(argument); }
......
......@@ -27,7 +27,7 @@ namespace paddle {
namespace inference {
namespace analysis {
PassRegistry::PassRegistry() {
PassRegistry::PassRegistry() { // NOLINT
// Register manually to avoid the trivial `USE_OP` like macro for easier use
// and link.
passes_.emplace("ir_analysis_pass",
......
......@@ -50,7 +50,7 @@ class EigenGpuStreamDevice : public Eigen::StreamInterface {
EigenGpuStreamDevice() : scratch_(nullptr), semaphore_(nullptr) {
Eigen::initializeDeviceProp();
}
~EigenGpuStreamDevice() override {}
~EigenGpuStreamDevice() override = default;
void Reinitialize(gpuStream_t cuda_stream,
phi::Allocator* allocator,
......
......@@ -29,7 +29,7 @@ namespace tensorrt {
class ActivationOpConverter : public OpConverter {
public:
ActivationOpConverter() {}
ActivationOpConverter() = default;
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
......
......@@ -21,7 +21,7 @@ namespace tensorrt {
class ElementwiseTensorOpConverter : public OpConverter {
public:
ElementwiseTensorOpConverter() {}
ElementwiseTensorOpConverter() = default;
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
......@@ -325,7 +325,7 @@ class ElementwiseTensorModOpConverter : public ElementwiseTensorOpConverter {
// https://github.com/PaddlePaddle/Paddle/blob/release/2.4/python/paddle/tensor/math.py#L420
class PowOpConverter : public OpConverter {
public:
PowOpConverter() {}
PowOpConverter() = default;
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
......
......@@ -21,7 +21,7 @@ namespace tensorrt {
class EqualOpConverter : public OpConverter {
public:
EqualOpConverter() {}
EqualOpConverter() = default;
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
......@@ -74,7 +74,7 @@ class EqualOpConverter : public OpConverter {
class NotEqualOpConverter : public OpConverter {
public:
NotEqualOpConverter() {}
NotEqualOpConverter() = default;
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
......
......@@ -20,7 +20,7 @@ namespace tensorrt {
class SquareOpConverter : public OpConverter {
public:
SquareOpConverter() {}
SquareOpConverter() = default;
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
......
......@@ -29,7 +29,7 @@ namespace tensorrt {
class TopKOpConverter : public OpConverter {
public:
TopKOpConverter() {}
TopKOpConverter() = default;
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
......
......@@ -29,7 +29,7 @@ namespace tensorrt {
class UnaryOpConverter : public OpConverter {
public:
UnaryOpConverter() {}
UnaryOpConverter() = default;
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
......
......@@ -23,7 +23,7 @@ namespace tensorrt {
class ExprWrapper {
public:
ExprWrapper() {}
ExprWrapper() = default;
ExprWrapper(const nvinfer1::IDimensionExpr* expr,
nvinfer1::IExprBuilder* expr_builder) {
this->expr = expr;
......
......@@ -35,7 +35,7 @@ namespace tensorrt {
// Just tell by the op_types.
struct SimpleOpTypeSetTeller : public Teller {
SimpleOpTypeSetTeller() {
SimpleOpTypeSetTeller() { // NOLINT
#if IS_TRT_VERSION_GE(7130)
// use TensorRT plugin
teller_set.insert("group_norm");
......@@ -3083,7 +3083,7 @@ struct SimpleOpTypeSetTeller : public Teller {
struct GenericPluginTeller : public Teller {
public:
GenericPluginTeller() {}
GenericPluginTeller() = default;
bool operator()(const framework::OpDesc& desc,
bool use_no_calib_int8 = false,
bool with_dynamic_shape = false) override {
......@@ -3125,7 +3125,7 @@ struct GenericPluginTeller : public Teller {
struct CustomPluginTeller : public Teller {
public:
CustomPluginTeller() {}
CustomPluginTeller() = default;
bool operator()(const framework::OpDesc& desc,
bool use_no_calib_int8 = false,
bool with_dynamic_shape = false) override {
......@@ -3178,7 +3178,7 @@ bool OpTeller::Tell(const framework::ir::Node* node,
return false;
}
OpTeller::OpTeller() {
OpTeller::OpTeller() { // NOLINT
tellers_.emplace_back(new tensorrt::SimpleOpTypeSetTeller);
tellers_.emplace_back(new tensorrt::GenericPluginTeller);
tellers_.emplace_back(new tensorrt::CustomPluginTeller);
......
......@@ -405,7 +405,7 @@ char const* EmbLayerNormPlugin::getPluginNamespace() const noexcept {
return mNamespace.c_str();
}
EmbLayerNormPluginCreator::EmbLayerNormPluginCreator() {}
EmbLayerNormPluginCreator::EmbLayerNormPluginCreator() = default;
char const* EmbLayerNormPluginCreator::getPluginName() const noexcept {
return EMB_LAYER_NORM_NAME;
......
......@@ -771,7 +771,7 @@ char const* EmbLayerNormVarSeqlenPluginBase::getPluginNamespace()
}
EmbLayerNormVarSeqlenPluginBaseCreator::
EmbLayerNormVarSeqlenPluginBaseCreator() {}
EmbLayerNormVarSeqlenPluginBaseCreator() = default;
char const* EmbLayerNormVarSeqlenPluginBaseCreator::getPluginName()
const noexcept {
......
......@@ -405,7 +405,7 @@ void RoiAlignPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
void RoiAlignPluginDynamic::destroy() TRT_NOEXCEPT {}
RoiAlignPluginDynamicCreator::RoiAlignPluginDynamicCreator() {}
RoiAlignPluginDynamicCreator::RoiAlignPluginDynamicCreator() = default;
void RoiAlignPluginDynamicCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
......
......@@ -36,7 +36,7 @@ StackPluginDynamic::StackPluginDynamic(void const* serial_data,
DeserializeValue(&serial_data, &serial_length, &with_fp16_);
}
StackPluginDynamic::~StackPluginDynamic() {}
StackPluginDynamic::~StackPluginDynamic() = default;
nvinfer1::IPluginV2DynamicExt* StackPluginDynamic::clone() const TRT_NOEXCEPT {
return new StackPluginDynamic(axis_, num_stack_, with_fp16_);
......@@ -230,7 +230,7 @@ int StackPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
return cudaGetLastError() != cudaSuccess;
}
StackPluginDynamicCreator::StackPluginDynamicCreator() {}
StackPluginDynamicCreator::StackPluginDynamicCreator() = default;
const char* StackPluginDynamicCreator::getPluginName() const TRT_NOEXCEPT {
return "stack_plugin";
......
......@@ -437,7 +437,7 @@ nvinfer1::IPluginV2Ext* YoloBoxPlugin::clone() const TRT_NOEXCEPT {
input_w_);
}
YoloBoxPluginCreator::YoloBoxPluginCreator() {}
YoloBoxPluginCreator::YoloBoxPluginCreator() = default;
void YoloBoxPluginCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
......
......@@ -34,7 +34,7 @@ class AttributeVisitor {
public:
ir::IrContext* ctx;
AttributeVisitor() { ctx = ir::IrContext::Instance(); }
~AttributeVisitor() {}
~AttributeVisitor() = default;
public:
virtual ir::Attribute operator()(int i) {
......
......@@ -127,7 +127,7 @@ class CUDAGraphAllocator
: underlying_allocator_(allocator) {}
public:
~CUDAGraphAllocator() override {}
~CUDAGraphAllocator() override = default;
static std::shared_ptr<Allocator> Create(
const std::shared_ptr<Allocator>& allocator) {
......@@ -1272,7 +1272,7 @@ AllocatorFacadePrivate::AllocatorMap AllocatorFacadePrivate::system_allocators_;
AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {}
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade::~AllocatorFacade() {}
AllocatorFacade::~AllocatorFacade() = default;
AllocatorFacade& AllocatorFacade::Instance() {
static AllocatorFacade* instance = new AllocatorFacade;
......
......@@ -38,7 +38,7 @@ namespace operators {
class CCommInitAllInferShape : public framework::InferShapeBase {
public:
~CCommInitAllInferShape() override {}
~CCommInitAllInferShape() override = default;
void operator()(framework::InferShapeContext* ctx) const override{};
};
......
......@@ -38,7 +38,7 @@ namespace operators {
class CCommInitMultiTrainerInferShape : public framework::InferShapeBase {
public:
~CCommInitMultiTrainerInferShape() override {}
~CCommInitMultiTrainerInferShape() override = default;
void operator()(framework::InferShapeContext* ctx) const override{};
};
......
......@@ -44,7 +44,7 @@ static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm() = default;
__device__ __forceinline__ PairForLayerNorm(const T& first, const T& second)
: first_(first), second_(second) {}
......
......@@ -113,7 +113,7 @@ class BeamSearchFunctor<phi::CPUContext, T> {
* The basic items help to sort.
*/
struct Item {
Item() {}
Item() = default;
Item(size_t offset, size_t id, float score)
: offset(offset), id(id), score(score) {}
// offset in the higher lod level.
......
......@@ -21,7 +21,7 @@ namespace operators {
namespace math {
struct Triple {
__device__ __forceinline__ Triple() {}
__device__ __forceinline__ Triple() = default;
__device__ __forceinline__ Triple(int o, int i, float s)
: offset(o), id(i), score(s) {}
......
......@@ -22,7 +22,7 @@ namespace paddle {
namespace operators {
namespace math {
Sampler::~Sampler() {}
Sampler::~Sampler() = default;
UniformSampler::UniformSampler(int64_t range, unsigned int seed)
: Sampler(range, seed), inv_range_(1.0 / (range + 1)) {
......
......@@ -36,7 +36,9 @@ void PyReader::ReadNext(paddle::framework::LoDTensorArray* out) {
if (!success) out->clear();
}
PyReader::~PyReader() { queue_->Close(); }
PyReader::~PyReader() { // NOLINT
queue_->Close();
}
void PyReader::Shutdown() { queue_->Close(); }
......
......@@ -47,7 +47,7 @@ std::unordered_map<uint32_t, uint64_t> CreateThreadIdMapping() {
}
} // namespace details
CudaTracer::CudaTracer() {}
CudaTracer::CudaTracer() = default;
void CudaTracer::PrepareTracing() {
PADDLE_ENFORCE_EQ(
......
......@@ -32,7 +32,7 @@ CustomTracer::CustomTracer(const std::string& dev_type) : dev_type_(dev_type) {
#endif
}
CustomTracer::~CustomTracer() {
CustomTracer::~CustomTracer() { // NOLINT
#ifdef PADDLE_WITH_CUSTOM_DEVICE
phi::DeviceManager::ProfilerFinalize(dev_type_, &collector_, context_);
#endif
......
......@@ -29,7 +29,7 @@ namespace ir {
// AbstractType, TypeStorage, AbstractAttribute, AttributeStorage, Dialect.
class IrContextImpl {
public:
IrContextImpl() {}
IrContextImpl() = default;
~IrContextImpl() {
std::lock_guard<ir::SpinLock> guard(destructor_lock_);
......
......@@ -66,7 +66,7 @@ struct ParametricStorageManager {
std::function<void(StorageBase *)> destroy_;
};
StorageManager::StorageManager() {}
StorageManager::StorageManager() = default;
StorageManager::~StorageManager() = default;
......
......@@ -53,7 +53,7 @@ using IntArray = experimental::IntArray;
class AbstractAutogradMeta {
public:
// No AbstractAutogradMeta should be created
virtual ~AbstractAutogradMeta() {}
virtual ~AbstractAutogradMeta() = default;
};
/**
......
......@@ -403,12 +403,7 @@ void Tensor::reset() {
/* Part 6: Operator overloading */
Tensor &Tensor::operator=(const Tensor &x) & {
impl_ = x.impl_;
autograd_meta_ = x.autograd_meta_;
name_ = x.name_;
return *this;
}
Tensor &Tensor::operator=(const Tensor &x) & = default;
Tensor &Tensor::operator=(Tensor &&x) & {
impl_ = std::move(x.impl_);
......
......@@ -109,7 +109,7 @@ namespace phi {
namespace dynload {
struct PathNode {
PathNode() {}
PathNode() = default;
std::string path = "";
};
......
......@@ -66,7 +66,7 @@ class EigenGpuStreamDevice : public Eigen::StreamInterface {
EigenGpuStreamDevice() : scratch_(nullptr), semaphore_(nullptr) {
Eigen::initializeDeviceProp();
}
~EigenGpuStreamDevice() override {}
~EigenGpuStreamDevice() override = default;
void Reinitialize(gpuStream_t cuda_stream,
Allocator* allocator,
......
......@@ -95,7 +95,7 @@ struct OneDNNContext::Impl {
p_mutex_.reset(new std::mutex());
}
~Impl() {}
~Impl() = default;
void ResetBlobMap(void* ptr) {
VLOG(4) << OneDNNContext::tls().get_curr_exec() << " " << ptr;
......
......@@ -27,7 +27,7 @@ template <class T>
class Point_ {
public:
// default constructor
Point_() {}
Point_() = default;
Point_(T _x, T _y) {}
Point_(const Point_& pt UNUSED) {}
......
......@@ -53,7 +53,7 @@ void CreateLstmGrad(phi::funcs::LstmMetaGrad<T>* lstm_grad) {
template <typename T>
struct GradCell {
virtual ~GradCell() {}
virtual ~GradCell() = default;
virtual void operator()(const CPUContext& dev_ctx UNUSED,
DenseTensor* gate_tensor UNUSED,
DenseTensor* state_tensor UNUSED,
......@@ -355,7 +355,7 @@ struct LSTMGradCell : GradCell<T> {
template <typename T, typename GradCellType>
struct GradLayer {
explicit GradLayer(const GradCellType& cell) : cell_(cell) {}
virtual ~GradLayer() {}
virtual ~GradLayer() = default;
void run_rnn_grad_function(
const CPUContext& dev_ctx,
const DenseTensor* input,
......@@ -690,7 +690,7 @@ struct SingleGradLayer : GradLayer<T, GradCellType> {
// explicit SingleGradLayer(GradCellType& cell) : cell_(cell) {}
explicit SingleGradLayer(const GradCellType& cell)
: GradLayer<T, GradCellType>(cell) {}
~SingleGradLayer() override {}
~SingleGradLayer() override = default;
void operator()(const CPUContext& dev_ctx,
const DenseTensor* input,
const DenseTensor* output,
......@@ -802,7 +802,7 @@ template <typename T, typename GradCellType>
struct BidirGradLayer : GradLayer<T, GradCellType> {
explicit BidirGradLayer(const GradCellType& cell)
: GradLayer<T, GradCellType>(cell) {}
~BidirGradLayer() override {}
~BidirGradLayer() override = default;
void operator()(const CPUContext& dev_ctx,
const DenseTensor* input,
const DenseTensor* output,
......
......@@ -34,7 +34,7 @@ namespace phi {
template <typename T>
struct Cell {
virtual ~Cell() {}
virtual ~Cell() = default;
virtual void operator()(const CPUContext* dev_ctx UNUSED,
DenseTensor* input UNUSED,
const DenseTensor* weight_hh UNUSED,
......@@ -208,7 +208,7 @@ struct LSTMCell : Cell<T> {
template <typename T, typename CellType>
struct Layer {
explicit Layer(const CellType& cell) : cell_(cell) {}
virtual ~Layer() {}
virtual ~Layer() = default;
void preprocess(const CPUContext& dev_ctx,
const DenseTensor& input,
const DenseTensor& weight,
......
......@@ -65,7 +65,7 @@ struct PointerWrapper {
const void* ins_addr[Size];
__device__ inline const void* operator[](int i) const { return ins_addr[i]; }
PointerWrapper() {}
PointerWrapper() = default;
PointerWrapper(const phi::GPUContext& ctx,
const std::vector<phi::DenseTensor>& ins,
const T** pre_alloced_host_ptr) {
......@@ -84,7 +84,7 @@ template <typename T, int Size>
struct PADDLE_ALIGN(256) AlignedPointerWrapper
: public PointerWrapper<T, Size> {
public:
AlignedPointerWrapper() {}
AlignedPointerWrapper() = default;
AlignedPointerWrapper(const phi::GPUContext& ctx,
const std::vector<phi::DenseTensor>& ins,
const T** pre_alloced_host_ptr) {
......@@ -98,7 +98,7 @@ struct PointerToPointer {
void** ins_addr{nullptr};
__device__ inline const void* operator[](int i) const { return ins_addr[i]; }
PointerToPointer() {}
PointerToPointer() = default;
PointerToPointer(const phi::GPUContext& ctx,
const std::vector<phi::DenseTensor>& ins,
const T** pre_alloced_host_ptr,
......@@ -186,9 +186,7 @@ struct PointerToPointerAndCol {
template <int MovSize>
struct alignas(MovSize) Packed {
__device__ Packed() {
// do nothing
}
__device__ Packed() = default;
union {
char buf[MovSize];
};
......@@ -621,7 +619,7 @@ struct PointerAndColArray
public:
funcs::ValueArray<IndexT, Size> val_array;
PointerAndColArray() {}
PointerAndColArray() = default;
PointerAndColArray(const phi::GPUContext& ctx,
const int out_col_num,
IndexT* out_cols,
......
......@@ -32,7 +32,7 @@ namespace phi {
template <typename MT, typename InT, typename OutT>
struct MaskedSelectGradFunctor {
HOSTDEVICE MaskedSelectGradFunctor() {}
HOSTDEVICE MaskedSelectGradFunctor() = default;
HOSTDEVICE inline void operator()(OutT* out,
const MT* mask,
......
......@@ -30,7 +30,7 @@ namespace phi {
template <typename MT, typename InT, typename OutT>
struct MaskedSelectFunctor {
HOSTDEVICE MaskedSelectFunctor() {}
HOSTDEVICE MaskedSelectFunctor() = default;
HOSTDEVICE inline void operator()(OutT* out,
const MT* mask,
......
......@@ -63,7 +63,7 @@ __device__ __forceinline__ double inline_pow(double base, double exponent) {
template <typename T>
struct NonzeroFunctor {
HOSTDEVICE explicit inline NonzeroFunctor() {}
HOSTDEVICE explicit inline NonzeroFunctor() = default;
HOSTDEVICE inline T operator()(const T x) const {
return static_cast<T>(static_cast<double>(x) != 0);
}
......@@ -71,7 +71,7 @@ struct NonzeroFunctor {
template <typename T>
struct AbsFunctor {
HOSTDEVICE explicit inline AbsFunctor() {}
HOSTDEVICE explicit inline AbsFunctor() = default;
HOSTDEVICE inline T operator()(const T x) const {
return static_cast<T>(inline_abs(x));
}
......
......@@ -177,9 +177,7 @@ typename std::enable_if<HasCanPackAs<T>::value == false, bool>::type CanPackAs(
template <typename T, int N>
struct alignas(sizeof(T) * N) Pack {
__device__ Pack() {
// do nothing
}
__device__ Pack() = default;
T elem[N];
};
......
......@@ -375,7 +375,7 @@ class CudnnBNAddReluTester {
SetUp();
}
~CudnnBNAddReluTester() {}
~CudnnBNAddReluTester() = default;
void CheckForward(float diff, bool is_relative_atol = false) {
LOG(INFO) << "[CheckForward, diff=" << diff
......
......@@ -235,7 +235,7 @@ class CudnnNormConvolutionTester {
SetUp();
}
~CudnnNormConvolutionTester() {}
~CudnnNormConvolutionTester() = default;
void CheckForward(float diff, bool is_relative_atol = false) {
phi::GPUContext *ctx = static_cast<phi::GPUContext *>(
......
......@@ -89,7 +89,7 @@ struct TestFusedDropoutActBias {
ctx = reinterpret_cast<phi::GPUContext *>(devicectx);
}
~TestFusedDropoutActBias() {}
~TestFusedDropoutActBias() = default;
void SetUp() {
const int n = rows * cols;
......
......@@ -95,7 +95,7 @@ struct TestFusedLayernormResidualDropoutBias {
ctx = reinterpret_cast<phi::GPUContext *>(devicectx);
}
~TestFusedLayernormResidualDropoutBias() {}
~TestFusedLayernormResidualDropoutBias() = default;
void SetUp() {
using U = LayerNormParamType<T>;
......
......@@ -35,7 +35,7 @@ struct DataRecord {
size_t batch_size{1};
size_t num_samples; // total number of samples
DataRecord() {
DataRecord() { // NOLINT
turns = new std::vector<std::vector<
int64_t>>[FLAGS_max_turn_num]; // turns data : FLAGS_max_turn_num
turns_mask = new std::vector<std::vector<
......@@ -48,7 +48,7 @@ struct DataRecord {
Load(path);
}
~DataRecord() {
~DataRecord() { // NOLINT
delete[] turns;
delete[] turns_mask;
}
......
......@@ -50,7 +50,7 @@ class DemoPredictor : public PaddlePredictor {
return nullptr;
}
~DemoPredictor() override {}
~DemoPredictor() override = default;
};
template <>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册