未验证 提交 ac80251a 编写于 作者: N Nyakku Shigure 提交者: GitHub

[clang-tidy][task 5] enable `modernize-make-shared` and fix existing linter errors (#55807)

上级 e285234c
......@@ -170,7 +170,7 @@ modernize-avoid-c-arrays,
-modernize-deprecated-headers,
-modernize-deprecated-ios-base-aliases,
modernize-loop-convert,
-modernize-make-shared,
modernize-make-shared,
modernize-make-unique,
-modernize-pass-by-value,
modernize-raw-string-literal,
......
......@@ -83,8 +83,8 @@ paddle::Tensor add_n_ad_func(const std::vector<paddle::Tensor>& x) {
egr::EagerUtils::PassStopGradient(false, out_autograd_meta);
// Node Construction
auto grad_node =
std::shared_ptr<AddNGradNodeFinal>(new AddNGradNodeFinal(1, 1));
auto grad_node = std::shared_ptr<AddNGradNodeFinal>( // NOLINT
new AddNGradNodeFinal(1, 1));
// Set forward's stack
if (FLAGS_check_nan_inf) {
......
......@@ -137,8 +137,8 @@ paddle::Tensor conv2d_ad_func(const paddle::Tensor& input,
egr::EagerUtils::PassStopGradient(false, out_autograd_meta);
// Node Construction
auto grad_node =
std::shared_ptr<Conv2dGradNodeFinal>(new Conv2dGradNodeFinal(1, 2));
auto grad_node = std::shared_ptr<Conv2dGradNodeFinal>( // NOLINT
new Conv2dGradNodeFinal(1, 2));
// Set forward's stack
if (FLAGS_check_nan_inf) {
......
......@@ -132,8 +132,8 @@ paddle::Tensor multiply_ad_func(const paddle::Tensor& x,
egr::EagerUtils::PassStopGradient(false, out_autograd_meta);
// Node Construction
auto grad_node =
std::shared_ptr<MultiplyGradNode>(new MultiplyGradNode(1, 2));
auto grad_node = std::shared_ptr<MultiplyGradNode>( // NOLINT
new MultiplyGradNode(1, 2));
// Set for forward trace
if (FLAGS_check_nan_inf) {
grad_node->SetForwardTrace(egr::Controller::Instance().GetPythonStack());
......@@ -275,7 +275,8 @@ paddle::Tensor& multiply__ad_func(paddle::Tensor& x, // NOLINT
paddle::platform::TracerEventType::OperatorInner,
1);
grad_node = std::shared_ptr<MultiplyGradNode>(new MultiplyGradNode(1, 2));
grad_node = std::shared_ptr<MultiplyGradNode>( // NOLINT
new MultiplyGradNode(1, 2));
// Set for forward trace
if (FLAGS_check_nan_inf) {
grad_node->SetForwardTrace(egr::Controller::Instance().GetPythonStack());
......@@ -462,8 +463,8 @@ paddle::Tensor multiply_ad_func(const paddle::Tensor& x,
egr::EagerUtils::PassStopGradient(false, out_autograd_meta);
// Node Construction
auto grad_node =
std::shared_ptr<MultiplyGradNode>(new MultiplyGradNode(1, 2));
auto grad_node = std::shared_ptr<MultiplyGradNode>( // NOLINT
new MultiplyGradNode(1, 2));
// Set for forward trace
if (FLAGS_check_nan_inf) {
grad_node->SetForwardTrace(egr::Controller::Instance().GetPythonStack());
......
......@@ -225,8 +225,8 @@ sync_batch_norm__ad_func(const paddle::Tensor& x,
reserve_space_autograd_meta);
// Node Construction
auto grad_node =
std::shared_ptr<SyncBatchNormGradNode>(new SyncBatchNormGradNode(6, 5));
auto grad_node = std::shared_ptr<SyncBatchNormGradNode>( // NOLINT
new SyncBatchNormGradNode(6, 5));
// Set forward's stack
if (FLAGS_check_nan_inf) {
......@@ -567,8 +567,8 @@ sync_batch_norm__ad_func(const paddle::Tensor& x,
reserve_space_autograd_meta);
// Node Construction
auto grad_node =
std::shared_ptr<SyncBatchNormGradNode>(new SyncBatchNormGradNode(6, 5));
auto grad_node = std::shared_ptr<SyncBatchNormGradNode>( // NOLINT
new SyncBatchNormGradNode(6, 5));
egr::Controller::Instance().PushBackForceSequentialNodes(grad_node.get());
// SetAttributes if needed
grad_node->SetAttributemomentum(momentum);
......
......@@ -123,7 +123,7 @@ Conv2dGradNodeFinal::operator()(
1);
// Node Construction
auto grad_node = std::shared_ptr<Conv2dDoubleGradNodeFinal>(
auto grad_node = std::shared_ptr<Conv2dDoubleGradNodeFinal>( // NOLINT
new Conv2dDoubleGradNodeFinal(2, 3));
// SetAttributes if needed
grad_node->SetAttributestrides(strides);
......
......@@ -158,7 +158,7 @@ MultiplyGradNode::operator()(
1);
// Node Construction
auto grad_node = std::shared_ptr<MultiplyDoubleGradNode>(
auto grad_node = std::shared_ptr<MultiplyDoubleGradNode>( // NOLINT
new MultiplyDoubleGradNode(2, 3));
// SetAttributes if needed
grad_node->SetAttributeaxis(axis);
......
......@@ -390,8 +390,9 @@ fused_attention_dygraph_function(
p_autograd_CacheKVOut,
p_autograd_Y);
// Create GradOpNode
auto grad_node = std::shared_ptr<fused_attentionGradNodeCompat>(
new fused_attentionGradNodeCompat(20, 23));
auto grad_node =
std::shared_ptr<fused_attentionGradNodeCompat>( // NOLINT
new fused_attentionGradNodeCompat(20, 23));
bool pre_layer_norm = false;
if (attrs.count("pre_layer_norm")) {
......
......@@ -184,9 +184,9 @@ fused_bias_dropout_residual_layer_norm_dygraph_function(
p_autograd_LnVariance,
p_autograd_Y);
// Create GradOpNode
auto grad_node =
std::shared_ptr<fused_bias_dropout_residual_layer_normGradNodeCompat>(
new fused_bias_dropout_residual_layer_normGradNodeCompat(5, 5));
auto grad_node = std::shared_ptr< // NOLINT
fused_bias_dropout_residual_layer_normGradNodeCompat>(
new fused_bias_dropout_residual_layer_normGradNodeCompat(5, 5));
// Set Attributes
grad_node->SetAttrMap(std::move(attrs));
......
......@@ -310,8 +310,9 @@ fused_feedforward_dygraph_function(
p_autograd_Dropout1Out,
p_autograd_Dropout2Out);
// Create GradOpNode
auto grad_node = std::shared_ptr<fused_feedforwardGradNodeCompat>(
new fused_feedforwardGradNodeCompat(11, 11));
auto grad_node =
std::shared_ptr<fused_feedforwardGradNodeCompat>( // NOLINT
new fused_feedforwardGradNodeCompat(11, 11));
bool pre_layer_norm = false;
if (attrs.count("pre_layer_norm")) {
......
......@@ -301,8 +301,9 @@ fused_gate_attention_dygraph_function(
p_autograd_GateOut,
p_autograd_Out);
// Create GradOpNode
auto grad_node = std::shared_ptr<fused_gate_attentionGradNodeCompat>(
new fused_gate_attentionGradNodeCompat(9, 12));
auto grad_node =
std::shared_ptr<fused_gate_attentionGradNodeCompat>( // NOLINT
new fused_gate_attentionGradNodeCompat(9, 12));
bool merge_qkv = true;
if (attrs.count("merge_qkv")) {
......
......@@ -102,8 +102,9 @@ paddle::Tensor fused_gemm_epilogue_dygraph_function(
VLOG(6) << " Construct Grad for fused_gemm_epilogue ";
egr::EagerUtils::PassStopGradient(false, p_autograd_Out);
// Create GradOpNode
auto grad_node = std::shared_ptr<fused_gemm_epilogueGradNodeCompat>(
new fused_gemm_epilogueGradNodeCompat(1, 3));
auto grad_node =
std::shared_ptr<fused_gemm_epilogueGradNodeCompat>( // NOLINT
new fused_gemm_epilogueGradNodeCompat(1, 3));
// Set Attributes
grad_node->SetAttrMap(std::move(attrs));
......
......@@ -827,9 +827,8 @@ static bool CollectGradInformationFromOpInfo(
const std::string& in_name = op_proto.inputs()[0].name();
ins[in_name] = {};
for (size_t i = 0; i < NUM_CREATED_DUP_INPUTS; i++) {
ins[in_name].emplace_back(std::shared_ptr<paddle::imperative::VarBase>(
new paddle::imperative::VarBase("auto_" + in_name + "_" +
std::to_string(i))));
ins[in_name].emplace_back(std::make_shared<paddle::imperative::VarBase>(
"auto_" + in_name + "_" + std::to_string(i)));
ins[in_name][i]->SetOverridedStopGradient(false);
ins[in_name][i]->MutableVar()->GetMutable<phi::DenseTensor>();
}
......@@ -852,8 +851,8 @@ static bool CollectGradInformationFromOpInfo(
// but we only need to identify the slot name order,
// therefore fill in 1 single input VarBase is enough in this scenario
ins[in_name] = {std::shared_ptr<paddle::imperative::VarBase>(
new paddle::imperative::VarBase("auto_" + in_name))};
ins[in_name] = {
std::make_shared<paddle::imperative::VarBase>("auto_" + in_name)};
ins[in_name][0]->SetOverridedStopGradient(false);
ins[in_name][0]->MutableVar()->GetMutable<phi::DenseTensor>();
}
......@@ -870,8 +869,8 @@ static bool CollectGradInformationFromOpInfo(
// We always create output VarBase regardless of its dispensability.
// We dont know the exact number of outputs during code generation,
// however, simply identifying the slot name order would be enough
outs[out_name] = {std::shared_ptr<paddle::imperative::VarBase>(
new paddle::imperative::VarBase("auto_" + out_name))};
outs[out_name] = {
std::make_shared<paddle::imperative::VarBase>("auto_" + out_name)};
outs[out_name][0]->SetOverridedStopGradient(false);
outs[out_name][0]->MutableVar()->GetMutable<phi::DenseTensor>();
}
......@@ -1179,7 +1178,7 @@ static std::string GenerateGradNodeCreationContent(
const char* GRAD_OP_NODE_TEMPLATE =
" auto grad_node = std::shared_ptr<%sGradNodeCompat>(new "
"%sGradNodeCompat(%d, "
"%d));\n";
"%d)); // NOLINT\n";
grad_node_creation_str += " // Create GradOpNode\n";
grad_node_creation_str += paddle::string::Sprintf(GRAD_OP_NODE_TEMPLATE,
op_type,
......
......@@ -953,13 +953,13 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase):
# Helper
indent = GetIndent(2)
# NOTE(Aurelius74): DO NOT use make_shared here. Because some Node contains experimental::Scalar
# NOTE(Aurelius84): DO NOT use make_shared here. Because some Node contains experimental::Scalar
# which contains "complex128" as data. "complex128" is memory-aligned manually. But make_shared
# request MEMALIGN for allocation (Maybe).
# See https://stackoverflow.com/questions/31228656/how-can-shared-ptr-disrupt-alignment
# and https://github.com/MRtrix3/mrtrix3/issues/957
node_construction_str = f"{indent}auto grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs}));"
node_assignment_str = f"{indent}grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs}));"
node_construction_str = f"{indent}auto grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs})); // NOLINT"
node_assignment_str = f"{indent}grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs})); // NOLINT"
# SetAttributes
set_attributes_list = []
......
......@@ -1187,8 +1187,8 @@ static PyObject* eager_api_to_uva_tensor(PyObject* self,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "Running in eager_api_to_uva_tensor.";
auto new_tensor = std::shared_ptr<paddle::Tensor>(
new paddle::Tensor(egr::Controller::Instance().GenerateUniqueName()));
auto new_tensor = std::make_shared<paddle::Tensor>(
egr::Controller::Instance().GenerateUniqueName());
PyObject* obj = PyTuple_GET_ITEM(args, 0);
auto array = py::cast<py::array>(py::handle(obj));
......
......@@ -976,8 +976,8 @@ void BindImperative(py::module *m_ptr) {
"to_uva_tensor",
[](const py::object &obj, int device_id) {
const auto &tracer = imperative::GetCurrentTracer();
auto new_tensor = std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(tracer->GenerateUniqueName()));
auto new_tensor =
std::make_shared<imperative::VarBase>(tracer->GenerateUniqueName());
auto array = obj.cast<py::array>();
if (py::isinstance<py::array_t<int32_t>>(array)) {
SetUVATensorFromPyArray<int32_t>(new_tensor, array, device_id);
......
......@@ -241,9 +241,7 @@ void benchmark_fluid_scale(const std::shared_ptr<imperative::VarBase>& X,
for (size_t i = 0; i < max_num_runs; i++) {
imperative::NameVarBaseMap ins = {{"X", {tmp_out}}};
imperative::NameVarBaseMap outs = {
{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};
tracer.TraceOp<VarBase>("scale", ins, outs, attrs, place, true);
......@@ -277,9 +275,7 @@ void benchmark_fluid_matmul(const std::shared_ptr<imperative::VarBase>& X,
framework::AttributeMap attrs;
imperative::NameVarBaseMap ins = {{"X", {tmp_out}}, {"Y", {Y}}};
imperative::NameVarBaseMap outs = {
{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};
tracer.TraceOp<VarBase>("matmul_v2", ins, outs, attrs, place, true);
......@@ -316,17 +312,13 @@ void benchmark_fluid_mlp(
for (size_t i = 0; i < MLP_NUM_LINEAR; i++) {
// Matmul0
ins = {{"X", {input0}}, {"Y", {Ws[0]}}};
outs = {{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
outs = {{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};
tracer.TraceOp<VarBase>("matmul_v2", ins, outs, attrs, place, true);
// EW-Add0
ins = {{"X", outs["Out"]}, {"Y", {Bs[i]}}};
outs = {{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
outs = {{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};
tracer.TraceOp<VarBase>("elementwise_add", ins, outs, attrs, place, true);
input0 = outs["Out"][0];
......@@ -334,9 +326,7 @@ void benchmark_fluid_mlp(
// ReduceSum
ins = {{"X", {input0}}};
outs = {{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
outs = {{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};
attrs = {{"reduce_all", true}};
tracer.TraceOp<VarBase>("reduce_sum", ins, outs, attrs, place, true);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册