From ac80251a198e5d24198ef18ddc360761a99b660b Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 30 Aug 2023 16:02:33 +0800 Subject: [PATCH] [clang-tidy][task 5] enable `modernize-make-shared` and fix existing linter errors (#55807) --- .clang-tidy | 2 +- .../eager_manual/forwards/add_n_fwd_func.cc | 4 ++-- .../forwards/conv2d_fwd_function.cc | 4 ++-- .../forwards/multiply_fwd_func.cc | 11 +++++----- .../forwards/sync_batch_norm_fwd_func.cc | 8 ++++---- .../manual/eager_manual/nodes/conv2d_nodes.cc | 2 +- .../eager_manual/nodes/multiply_node.cc | 2 +- .../forwards/fused_attention_fwd_func.cc | 5 +++-- ...as_dropout_residual_layer_norm_fwd_func.cc | 6 +++--- .../forwards/fused_feedforward_fwd_func.cc | 5 +++-- .../forwards/fused_gate_attention_fwd_func.cc | 5 +++-- .../forwards/fused_gemm_epilogue_fwd_func.cc | 5 +++-- .../auto_code_generator/eager_generator.cc | 15 +++++++------- .../generator/eager_gen.py | 6 +++--- paddle/fluid/pybind/eager_functions.cc | 4 ++-- paddle/fluid/pybind/imperative.cc | 4 ++-- .../performance_tests/benchmark_utils.cc | 20 +++++-------------- 17 files changed, 51 insertions(+), 57 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 37d956eaa94..c0d5b09ac1f 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -170,7 +170,7 @@ modernize-avoid-c-arrays, -modernize-deprecated-headers, -modernize-deprecated-ios-base-aliases, modernize-loop-convert, --modernize-make-shared, +modernize-make-shared, modernize-make-unique, -modernize-pass-by-value, modernize-raw-string-literal, diff --git a/paddle/fluid/eager/api/manual/eager_manual/forwards/add_n_fwd_func.cc b/paddle/fluid/eager/api/manual/eager_manual/forwards/add_n_fwd_func.cc index d14832b80a1..7dca372a23b 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/forwards/add_n_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/forwards/add_n_fwd_func.cc @@ -83,8 +83,8 @@ paddle::Tensor add_n_ad_func(const std::vector& x) { egr::EagerUtils::PassStopGradient(false, out_autograd_meta); // Node Construction - auto grad_node = - std::shared_ptr(new AddNGradNodeFinal(1, 1)); + auto grad_node = std::shared_ptr( // NOLINT + new AddNGradNodeFinal(1, 1)); // Set forward's stack if (FLAGS_check_nan_inf) { diff --git a/paddle/fluid/eager/api/manual/eager_manual/forwards/conv2d_fwd_function.cc b/paddle/fluid/eager/api/manual/eager_manual/forwards/conv2d_fwd_function.cc index b7ca5a7c267..5ef6fcf9c3f 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/forwards/conv2d_fwd_function.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/forwards/conv2d_fwd_function.cc @@ -137,8 +137,8 @@ paddle::Tensor conv2d_ad_func(const paddle::Tensor& input, egr::EagerUtils::PassStopGradient(false, out_autograd_meta); // Node Construction - auto grad_node = - std::shared_ptr(new Conv2dGradNodeFinal(1, 2)); + auto grad_node = std::shared_ptr( // NOLINT + new Conv2dGradNodeFinal(1, 2)); // Set forward's stack if (FLAGS_check_nan_inf) { diff --git a/paddle/fluid/eager/api/manual/eager_manual/forwards/multiply_fwd_func.cc b/paddle/fluid/eager/api/manual/eager_manual/forwards/multiply_fwd_func.cc index d9b34643034..0a72ab810fc 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/forwards/multiply_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/forwards/multiply_fwd_func.cc @@ -132,8 +132,8 @@ paddle::Tensor multiply_ad_func(const paddle::Tensor& x, egr::EagerUtils::PassStopGradient(false, out_autograd_meta); // Node Construction - auto grad_node = - std::shared_ptr(new MultiplyGradNode(1, 2)); + auto grad_node = std::shared_ptr( // NOLINT + new MultiplyGradNode(1, 2)); // Set for forward trace if (FLAGS_check_nan_inf) { grad_node->SetForwardTrace(egr::Controller::Instance().GetPythonStack()); @@ -275,7 +275,8 @@ paddle::Tensor& multiply__ad_func(paddle::Tensor& x, // NOLINT paddle::platform::TracerEventType::OperatorInner, 1); - grad_node = std::shared_ptr(new MultiplyGradNode(1, 2)); + grad_node = std::shared_ptr( // NOLINT + new MultiplyGradNode(1, 2)); // Set for forward trace if (FLAGS_check_nan_inf) { grad_node->SetForwardTrace(egr::Controller::Instance().GetPythonStack()); @@ -462,8 +463,8 @@ paddle::Tensor multiply_ad_func(const paddle::Tensor& x, egr::EagerUtils::PassStopGradient(false, out_autograd_meta); // Node Construction - auto grad_node = - std::shared_ptr(new MultiplyGradNode(1, 2)); + auto grad_node = std::shared_ptr( // NOLINT + new MultiplyGradNode(1, 2)); // Set for forward trace if (FLAGS_check_nan_inf) { grad_node->SetForwardTrace(egr::Controller::Instance().GetPythonStack()); diff --git a/paddle/fluid/eager/api/manual/eager_manual/forwards/sync_batch_norm_fwd_func.cc b/paddle/fluid/eager/api/manual/eager_manual/forwards/sync_batch_norm_fwd_func.cc index a5b0c4d70b0..79b5c60c66b 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/forwards/sync_batch_norm_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/forwards/sync_batch_norm_fwd_func.cc @@ -225,8 +225,8 @@ sync_batch_norm__ad_func(const paddle::Tensor& x, reserve_space_autograd_meta); // Node Construction - auto grad_node = - std::shared_ptr(new SyncBatchNormGradNode(6, 5)); + auto grad_node = std::shared_ptr( // NOLINT + new SyncBatchNormGradNode(6, 5)); // Set forward's stack if (FLAGS_check_nan_inf) { @@ -567,8 +567,8 @@ sync_batch_norm__ad_func(const paddle::Tensor& x, reserve_space_autograd_meta); // Node Construction - auto grad_node = - std::shared_ptr(new SyncBatchNormGradNode(6, 5)); + auto grad_node = std::shared_ptr( // NOLINT + new SyncBatchNormGradNode(6, 5)); egr::Controller::Instance().PushBackForceSequentialNodes(grad_node.get()); // SetAttributes if needed grad_node->SetAttributemomentum(momentum); diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc index a7d00f8df18..438188ea4b7 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc @@ -123,7 +123,7 @@ Conv2dGradNodeFinal::operator()( 1); // Node Construction - auto grad_node = std::shared_ptr( + auto grad_node = std::shared_ptr( // NOLINT new Conv2dDoubleGradNodeFinal(2, 3)); // SetAttributes if needed grad_node->SetAttributestrides(strides); diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc index f7a90c43e7d..f3ae667777a 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc @@ -158,7 +158,7 @@ MultiplyGradNode::operator()( 1); // Node Construction - auto grad_node = std::shared_ptr( + auto grad_node = std::shared_ptr( // NOLINT new MultiplyDoubleGradNode(2, 3)); // SetAttributes if needed grad_node->SetAttributeaxis(axis); diff --git a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_attention_fwd_func.cc b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_attention_fwd_func.cc index 77ecc8a30e1..0f1192ae1bd 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_attention_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_attention_fwd_func.cc @@ -390,8 +390,9 @@ fused_attention_dygraph_function( p_autograd_CacheKVOut, p_autograd_Y); // Create GradOpNode - auto grad_node = std::shared_ptr( - new fused_attentionGradNodeCompat(20, 23)); + auto grad_node = + std::shared_ptr( // NOLINT + new fused_attentionGradNodeCompat(20, 23)); bool pre_layer_norm = false; if (attrs.count("pre_layer_norm")) { diff --git a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_bias_dropout_residual_layer_norm_fwd_func.cc b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_bias_dropout_residual_layer_norm_fwd_func.cc index 4b57d2e3c5b..1e61714525f 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_bias_dropout_residual_layer_norm_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_bias_dropout_residual_layer_norm_fwd_func.cc @@ -184,9 +184,9 @@ fused_bias_dropout_residual_layer_norm_dygraph_function( p_autograd_LnVariance, p_autograd_Y); // Create GradOpNode - auto grad_node = - std::shared_ptr( - new fused_bias_dropout_residual_layer_normGradNodeCompat(5, 5)); + auto grad_node = std::shared_ptr< // NOLINT + fused_bias_dropout_residual_layer_normGradNodeCompat>( + new fused_bias_dropout_residual_layer_normGradNodeCompat(5, 5)); // Set Attributes grad_node->SetAttrMap(std::move(attrs)); diff --git a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_feedforward_fwd_func.cc b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_feedforward_fwd_func.cc index 9f13579c5aa..e46e677c318 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_feedforward_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_feedforward_fwd_func.cc @@ -310,8 +310,9 @@ fused_feedforward_dygraph_function( p_autograd_Dropout1Out, p_autograd_Dropout2Out); // Create GradOpNode - auto grad_node = std::shared_ptr( - new fused_feedforwardGradNodeCompat(11, 11)); + auto grad_node = + std::shared_ptr( // NOLINT + new fused_feedforwardGradNodeCompat(11, 11)); bool pre_layer_norm = false; if (attrs.count("pre_layer_norm")) { diff --git a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gate_attention_fwd_func.cc b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gate_attention_fwd_func.cc index 546b60438fe..8c66a106311 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gate_attention_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gate_attention_fwd_func.cc @@ -301,8 +301,9 @@ fused_gate_attention_dygraph_function( p_autograd_GateOut, p_autograd_Out); // Create GradOpNode - auto grad_node = std::shared_ptr( - new fused_gate_attentionGradNodeCompat(9, 12)); + auto grad_node = + std::shared_ptr( // NOLINT + new fused_gate_attentionGradNodeCompat(9, 12)); bool merge_qkv = true; if (attrs.count("merge_qkv")) { diff --git a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gemm_epilogue_fwd_func.cc b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gemm_epilogue_fwd_func.cc index 2eb73276011..74e10cd0685 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gemm_epilogue_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gemm_epilogue_fwd_func.cc @@ -102,8 +102,9 @@ paddle::Tensor fused_gemm_epilogue_dygraph_function( VLOG(6) << " Construct Grad for fused_gemm_epilogue "; egr::EagerUtils::PassStopGradient(false, p_autograd_Out); // Create GradOpNode - auto grad_node = std::shared_ptr( - new fused_gemm_epilogueGradNodeCompat(1, 3)); + auto grad_node = + std::shared_ptr( // NOLINT + new fused_gemm_epilogueGradNodeCompat(1, 3)); // Set Attributes grad_node->SetAttrMap(std::move(attrs)); diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 6cc98ed6fb3..d250989199a 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -827,9 +827,8 @@ static bool CollectGradInformationFromOpInfo( const std::string& in_name = op_proto.inputs()[0].name(); ins[in_name] = {}; for (size_t i = 0; i < NUM_CREATED_DUP_INPUTS; i++) { - ins[in_name].emplace_back(std::shared_ptr( - new paddle::imperative::VarBase("auto_" + in_name + "_" + - std::to_string(i)))); + ins[in_name].emplace_back(std::make_shared( + "auto_" + in_name + "_" + std::to_string(i))); ins[in_name][i]->SetOverridedStopGradient(false); ins[in_name][i]->MutableVar()->GetMutable(); } @@ -852,8 +851,8 @@ static bool CollectGradInformationFromOpInfo( // but we only need to identify the slot name order, // therefore fill in 1 single input VarBase is enough in this scenario - ins[in_name] = {std::shared_ptr( - new paddle::imperative::VarBase("auto_" + in_name))}; + ins[in_name] = { + std::make_shared("auto_" + in_name)}; ins[in_name][0]->SetOverridedStopGradient(false); ins[in_name][0]->MutableVar()->GetMutable(); } @@ -870,8 +869,8 @@ static bool CollectGradInformationFromOpInfo( // We always create output VarBase regardless of its dispensability. // We dont know the exact number of outputs during code generation, // however, simply identifying the slot name order would be enough - outs[out_name] = {std::shared_ptr( - new paddle::imperative::VarBase("auto_" + out_name))}; + outs[out_name] = { + std::make_shared("auto_" + out_name)}; outs[out_name][0]->SetOverridedStopGradient(false); outs[out_name][0]->MutableVar()->GetMutable(); } @@ -1179,7 +1178,7 @@ static std::string GenerateGradNodeCreationContent( const char* GRAD_OP_NODE_TEMPLATE = " auto grad_node = std::shared_ptr<%sGradNodeCompat>(new " "%sGradNodeCompat(%d, " - "%d));\n"; + "%d)); // NOLINT\n"; grad_node_creation_str += " // Create GradOpNode\n"; grad_node_creation_str += paddle::string::Sprintf(GRAD_OP_NODE_TEMPLATE, op_type, diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 9bbe26d0f8a..da4a9aab538 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -953,13 +953,13 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): # Helper indent = GetIndent(2) - # NOTE(Aurelius74): DO NOT use make_shared here. Because some Node contains experimental::Scalar + # NOTE(Aurelius84): DO NOT use make_shared here. Because some Node contains experimental::Scalar # which contains "complex128" as data. "complex128" is memory-aligned manually. But make_shared # request MEMALIGN for allocation (Maybe). # See https://stackoverflow.com/questions/31228656/how-can-shared-ptr-disrupt-alignment # and https://github.com/MRtrix3/mrtrix3/issues/957 - node_construction_str = f"{indent}auto grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs}));" - node_assignment_str = f"{indent}grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs}));" + node_construction_str = f"{indent}auto grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs})); // NOLINT" + node_assignment_str = f"{indent}grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs})); // NOLINT" # SetAttributes set_attributes_list = [] diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index db6ce75c7d0..d03a20537ee 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -1187,8 +1187,8 @@ static PyObject* eager_api_to_uva_tensor(PyObject* self, PyObject* kwargs) { EAGER_TRY VLOG(4) << "Running in eager_api_to_uva_tensor."; - auto new_tensor = std::shared_ptr( - new paddle::Tensor(egr::Controller::Instance().GenerateUniqueName())); + auto new_tensor = std::make_shared( + egr::Controller::Instance().GenerateUniqueName()); PyObject* obj = PyTuple_GET_ITEM(args, 0); auto array = py::cast(py::handle(obj)); diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 2e67906f4f2..111e8ebdfdf 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -976,8 +976,8 @@ void BindImperative(py::module *m_ptr) { "to_uva_tensor", [](const py::object &obj, int device_id) { const auto &tracer = imperative::GetCurrentTracer(); - auto new_tensor = std::shared_ptr( - new imperative::VarBase(tracer->GenerateUniqueName())); + auto new_tensor = + std::make_shared(tracer->GenerateUniqueName()); auto array = obj.cast(); if (py::isinstance>(array)) { SetUVATensorFromPyArray(new_tensor, array, device_id); diff --git a/test/cpp/eager/performance_tests/benchmark_utils.cc b/test/cpp/eager/performance_tests/benchmark_utils.cc index 83d14a6b45b..73c4404fca7 100644 --- a/test/cpp/eager/performance_tests/benchmark_utils.cc +++ b/test/cpp/eager/performance_tests/benchmark_utils.cc @@ -241,9 +241,7 @@ void benchmark_fluid_scale(const std::shared_ptr& X, for (size_t i = 0; i < max_num_runs; i++) { imperative::NameVarBaseMap ins = {{"X", {tmp_out}}}; imperative::NameVarBaseMap outs = { - {"Out", - {std::shared_ptr( - new imperative::VarBase(true, "Out"))}}}; + {"Out", {std::make_shared(true, "Out")}}}; tracer.TraceOp("scale", ins, outs, attrs, place, true); @@ -277,9 +275,7 @@ void benchmark_fluid_matmul(const std::shared_ptr& X, framework::AttributeMap attrs; imperative::NameVarBaseMap ins = {{"X", {tmp_out}}, {"Y", {Y}}}; imperative::NameVarBaseMap outs = { - {"Out", - {std::shared_ptr( - new imperative::VarBase(true, "Out"))}}}; + {"Out", {std::make_shared(true, "Out")}}}; tracer.TraceOp("matmul_v2", ins, outs, attrs, place, true); @@ -316,17 +312,13 @@ void benchmark_fluid_mlp( for (size_t i = 0; i < MLP_NUM_LINEAR; i++) { // Matmul0 ins = {{"X", {input0}}, {"Y", {Ws[0]}}}; - outs = {{"Out", - {std::shared_ptr( - new imperative::VarBase(true, "Out"))}}}; + outs = {{"Out", {std::make_shared(true, "Out")}}}; tracer.TraceOp("matmul_v2", ins, outs, attrs, place, true); // EW-Add0 ins = {{"X", outs["Out"]}, {"Y", {Bs[i]}}}; - outs = {{"Out", - {std::shared_ptr( - new imperative::VarBase(true, "Out"))}}}; + outs = {{"Out", {std::make_shared(true, "Out")}}}; tracer.TraceOp("elementwise_add", ins, outs, attrs, place, true); input0 = outs["Out"][0]; @@ -334,9 +326,7 @@ void benchmark_fluid_mlp( // ReduceSum ins = {{"X", {input0}}}; - outs = {{"Out", - {std::shared_ptr( - new imperative::VarBase(true, "Out"))}}}; + outs = {{"Out", {std::make_shared(true, "Out")}}}; attrs = {{"reduce_all", true}}; tracer.TraceOp("reduce_sum", ins, outs, attrs, place, true); -- GitLab