diff --git a/.clang-tidy b/.clang-tidy index c31fb9ba8f1af6319b8ab49a6131b9d3c4760749..a4871eddb7fb333620f4100d3cd18382c1dc8626 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -180,7 +180,7 @@ modernize-redundant-void-arg, -modernize-shrink-to-fit, -modernize-unary-static-assert, -modernize-use-bool-literals, --modernize-use-emplace, +modernize-use-emplace, -modernize-use-equals-default, -modernize-use-equals-delete, -modernize-use-noexcept, diff --git a/paddle/fluid/distributed/auto_parallel/spmd_rules/common.cc b/paddle/fluid/distributed/auto_parallel/spmd_rules/common.cc index a0f46e1c462990d8b343ec21887e05e791968d7d..8c71bf111a948fa67cf8b8dfbd9f8c1fa170d807 100644 --- a/paddle/fluid/distributed/auto_parallel/spmd_rules/common.cc +++ b/paddle/fluid/distributed/auto_parallel/spmd_rules/common.cc @@ -203,8 +203,7 @@ GetAxesDimsMappingPair(const std::vector& tensor_axes, std::vector>> res; size_t ntensor = specs.size(); for (size_t i = 0; i < ntensor; ++i) { - res.emplace_back(std::pair>( - tensor_axes[i], specs[i].dims_mapping())); + res.emplace_back(tensor_axes[i], specs[i].dims_mapping()); } return res; } diff --git a/paddle/fluid/distributed/collective/reducer.cc b/paddle/fluid/distributed/collective/reducer.cc index fbd7c7ebefb42a19ec04154d33503a89745ce881..c3262b8db8d5688441ec6f9575da8683a0c8d5f1 100644 --- a/paddle/fluid/distributed/collective/reducer.cc +++ b/paddle/fluid/distributed/collective/reducer.cc @@ -609,8 +609,8 @@ void EagerReducer::InitializeDenseGroups( p_group->length_.push_back(size); // for concat operator - p_group->origin_shapes_.push_back(IntArray(tensor.shape())); - p_group->dense_tensors_.push_back(phi::DenseTensor()); + p_group->origin_shapes_.emplace_back(tensor.shape()); + p_group->dense_tensors_.emplace_back(); const auto &dtype = tensor.dtype(); const auto &inner_place = tensor.impl()->place(); diff --git a/paddle/fluid/distributed/ps/table/graph/graph_node.cc b/paddle/fluid/distributed/ps/table/graph/graph_node.cc index c505d49ab06a73113d8d77da789358d24a84d7c8..8be6734992ee9cf71f6a3952c485a90385cfa0ac 100644 --- a/paddle/fluid/distributed/ps/table/graph/graph_node.cc +++ b/paddle/fluid/distributed/ps/table/graph/graph_node.cc @@ -114,7 +114,7 @@ void FeatureNode::recover_from_buffer(char* buffer) { memcpy(str, buffer, feat_len); buffer += feat_len; str[feat_len] = '\0'; - feature.push_back(std::string(str)); + feature.push_back(str); // NOLINT } } } // namespace distributed diff --git a/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc b/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc index 2f24f303f2eb047d3dbbf0d6684a67a81517bbd7..7d96e0f49d1adff789f327c795c3046ea06cf045 100644 --- a/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc +++ b/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc @@ -88,7 +88,7 @@ int32_t SSDSparseTable::PullSparse(float* pull_values, _real_local_shard_num); for (size_t i = 0; i < num; ++i) { int shard_id = (keys[i] % _sparse_table_shard_num) % _avg_local_shard_num; - task_keys[shard_id].push_back({keys[i], i}); + task_keys[shard_id].emplace_back(keys[i], i); } std::atomic missed_keys{0}; @@ -202,8 +202,8 @@ int32_t SSDSparseTable::PullSparsePtr(int shard_id, auto itr = local_shard.find(key); if (itr == local_shard.end()) { cur_ctx->batch_index.push_back(i); - cur_ctx->batch_keys.push_back(rocksdb::Slice( - (char*)&(pull_keys[i]), sizeof(uint64_t))); // NOLINT + cur_ctx->batch_keys.emplace_back( + reinterpret_cast(&(pull_keys[i])), sizeof(uint64_t)); if (cur_ctx->batch_keys.size() == 1024) { cur_ctx->batch_values.resize(cur_ctx->batch_keys.size()); cur_ctx->status.resize(cur_ctx->batch_keys.size()); @@ -334,7 +334,7 @@ int32_t SSDSparseTable::PushSparse(const uint64_t* keys, _real_local_shard_num); for (size_t i = 0; i < num; ++i) { int shard_id = (keys[i] % _sparse_table_shard_num) % _avg_local_shard_num; - task_keys[shard_id].push_back({keys[i], i}); + task_keys[shard_id].emplace_back(keys[i], i); } for (int shard_id = 0; shard_id < _real_local_shard_num; ++shard_id) { tasks[shard_id] = @@ -440,7 +440,7 @@ int32_t SSDSparseTable::PushSparse(const uint64_t* keys, _real_local_shard_num); for (size_t i = 0; i < num; ++i) { int shard_id = (keys[i] % _sparse_table_shard_num) % _avg_local_shard_num; - task_keys[shard_id].push_back({keys[i], i}); + task_keys[shard_id].emplace_back(keys[i], i); } for (int shard_id = 0; shard_id < _real_local_shard_num; ++shard_id) { tasks[shard_id] = @@ -1658,11 +1658,10 @@ int32_t SSDSparseTable::LoadWithString( // ssd or mem if (_value_accesor->SaveSSD(data_buffer_ptr)) { tmp_key.emplace_back(key); - ssd_keys.emplace_back(std::make_pair( - reinterpret_cast(&tmp_key.back()), sizeof(uint64_t))); - ssd_values.emplace_back( - std::make_pair(reinterpret_cast(data_buffer_ptr), - value_size * sizeof(float))); + ssd_keys.emplace_back(reinterpret_cast(&tmp_key.back()), + sizeof(uint64_t)); + ssd_values.emplace_back(reinterpret_cast(data_buffer_ptr), + value_size * sizeof(float)); data_buffer_ptr += feature_value_size; if (static_cast(ssd_keys.size()) == FLAGS_pserver_load_batch_size) { diff --git a/paddle/fluid/framework/attribute_test.cc b/paddle/fluid/framework/attribute_test.cc index 3c3f333d203426f0c559c9c80b58485409c97afd..83b9f099d2bd78d1a0966e2423bce6ac54f9f381 100644 --- a/paddle/fluid/framework/attribute_test.cc +++ b/paddle/fluid/framework/attribute_test.cc @@ -297,7 +297,7 @@ TEST(Attribute, ProtoAttrToAttribute_scalars) { std::vector scalars; for (int i = 0; i < 10; i++) { - scalars.push_back(paddle::experimental::Scalar(i)); + scalars.emplace_back(i); } std::vector proto_scalars; proto_scalars.reserve(scalars.size()); diff --git a/paddle/fluid/framework/data_feed.cc b/paddle/fluid/framework/data_feed.cc index b5584e94e8fc495db0ef42b128b89f5cb105b376..45d29c6c2ea9ba594b77eadde6ea5e5361b3023a 100644 --- a/paddle/fluid/framework/data_feed.cc +++ b/paddle/fluid/framework/data_feed.cc @@ -1084,13 +1084,13 @@ void MultiSlotInMemoryDataFeed::Init( feed_vec_.resize(use_slots_.size()); const int kEstimatedFeasignNumPerSlot = 5; // Magic Number for (size_t i = 0; i < all_slot_num; i++) { - batch_float_feasigns_.push_back(std::vector()); - batch_uint64_feasigns_.push_back(std::vector()); + batch_float_feasigns_.emplace_back(); + batch_uint64_feasigns_.emplace_back(); batch_float_feasigns_[i].reserve(default_batch_size_ * kEstimatedFeasignNumPerSlot); batch_uint64_feasigns_[i].reserve(default_batch_size_ * kEstimatedFeasignNumPerSlot); - offset_.push_back(std::vector()); + offset_.emplace_back(); offset_[i].reserve(default_batch_size_ + 1); // Each lod info will prepend a zero } @@ -1224,7 +1224,7 @@ bool MultiSlotInMemoryDataFeed::ParseOneInstanceFromPipe(Record* instance) { } FeatureFeasign f; f.float_feasign_ = feasign; - instance->float_feasigns_.push_back(FeatureItem(f, idx)); + instance->float_feasigns_.emplace_back(f, idx); } } else if (all_slots_type_[i][0] == 'u') { // uint64 for (int j = 0; j < num; ++j) { @@ -1236,7 +1236,7 @@ bool MultiSlotInMemoryDataFeed::ParseOneInstanceFromPipe(Record* instance) { } FeatureFeasign f; f.uint64_feasign_ = feasign; - instance->uint64_feasigns_.push_back(FeatureItem(f, idx)); + instance->uint64_feasigns_.emplace_back(f, idx); } } pos = endptr - str; @@ -1297,7 +1297,7 @@ bool MultiSlotInMemoryDataFeed::ParseOneInstance(Record* instance) { } FeatureFeasign f; f.float_feasign_ = feasign; - instance->float_feasigns_.push_back(FeatureItem(f, idx)); + instance->float_feasigns_.emplace_back(f, idx); } } else if (all_slots_type_[i][0] == 'u') { // uint64 for (int j = 0; j < num; ++j) { @@ -1307,7 +1307,7 @@ bool MultiSlotInMemoryDataFeed::ParseOneInstance(Record* instance) { } FeatureFeasign f; f.uint64_feasign_ = feasign; - instance->uint64_feasigns_.push_back(FeatureItem(f, idx)); + instance->uint64_feasigns_.emplace_back(f, idx); } } pos = endptr - str; @@ -2093,13 +2093,13 @@ void SlotRecordInMemoryDataFeed::Init(const DataFeedDesc& data_feed_desc) { feed_vec_.resize(used_slots_info_.size()); const int kEstimatedFeasignNumPerSlot = 5; // Magic Number for (size_t i = 0; i < all_slot_num; i++) { - batch_float_feasigns_.push_back(std::vector()); - batch_uint64_feasigns_.push_back(std::vector()); + batch_float_feasigns_.emplace_back(); + batch_uint64_feasigns_.emplace_back(); batch_float_feasigns_[i].reserve(default_batch_size_ * kEstimatedFeasignNumPerSlot); batch_uint64_feasigns_[i].reserve(default_batch_size_ * kEstimatedFeasignNumPerSlot); - offset_.push_back(std::vector()); + offset_.emplace_back(); offset_[i].reserve(default_batch_size_ + 1); // Each lod info will prepend a zero } diff --git a/paddle/fluid/framework/data_set.cc b/paddle/fluid/framework/data_set.cc index c35e15b279311800d8d0c64a81a7a67bd1d74486..4243ca6175f861aa63df22ded97cda34965cabeb 100644 --- a/paddle/fluid/framework/data_set.cc +++ b/paddle/fluid/framework/data_set.cc @@ -510,8 +510,8 @@ void DatasetImpl::LoadIntoMemory() { #endif } else { for (int64_t i = 0; i < thread_num_; ++i) { - load_threads.push_back(std::thread( - &paddle::framework::DataFeed::LoadIntoMemory, readers_[i].get())); + load_threads.emplace_back(&paddle::framework::DataFeed::LoadIntoMemory, + readers_[i].get()); } for (std::thread& t : load_threads) { t.join(); @@ -534,16 +534,16 @@ void DatasetImpl::PreLoadIntoMemory() { CHECK(static_cast(preload_thread_num_) == preload_readers_.size()); preload_threads_.clear(); for (int64_t i = 0; i < preload_thread_num_; ++i) { - preload_threads_.push_back( - std::thread(&paddle::framework::DataFeed::LoadIntoMemory, - preload_readers_[i].get())); + preload_threads_.emplace_back( + &paddle::framework::DataFeed::LoadIntoMemory, + preload_readers_[i].get()); } } else { CHECK(static_cast(thread_num_) == readers_.size()); preload_threads_.clear(); for (int64_t i = 0; i < thread_num_; ++i) { - preload_threads_.push_back(std::thread( - &paddle::framework::DataFeed::LoadIntoMemory, readers_[i].get())); + preload_threads_.emplace_back( + &paddle::framework::DataFeed::LoadIntoMemory, readers_[i].get()); } } VLOG(3) << "DatasetImpl::PreLoadIntoMemory() end"; @@ -849,7 +849,7 @@ void MultiSlotDataset::GlobalShuffle(int thread_num) { } VLOG(3) << "start global shuffle threads, num = " << thread_num; for (int i = 0; i < thread_num; ++i) { - global_shuffle_threads.push_back(std::thread(global_shuffle_func)); + global_shuffle_threads.emplace_back(global_shuffle_func); } for (std::thread& t : global_shuffle_threads) { t.join(); @@ -1618,7 +1618,7 @@ void MultiSlotDataset::GetRandomData( for (auto slot : slots_to_replace) { auto range = rand_rec.feas_.equal_range(slot); for (auto it = range.first; it != range.second; ++it) { - new_rec.uint64_feasigns_.push_back({it->second, it->first}); + new_rec.uint64_feasigns_.emplace_back(it->second, it->first); debug_push_cnt += 1; } } diff --git a/paddle/fluid/framework/dist_multi_trainer.cc b/paddle/fluid/framework/dist_multi_trainer.cc index 3a5787a665cfc4517c40dd79f19bd6a8f17c971c..5e11de0ea9d1c9e92a60fb173f9418f52b26f491 100644 --- a/paddle/fluid/framework/dist_multi_trainer.cc +++ b/paddle/fluid/framework/dist_multi_trainer.cc @@ -88,7 +88,7 @@ void DistMultiTrainer::InitDumpEnv() { } } for (int i = 0; i < dump_thread_num_; i++) { - dump_thread_.push_back(std::thread([this, i] { DumpWork(i); })); + dump_thread_.emplace_back([this, i] { DumpWork(i); }); } } @@ -131,11 +131,10 @@ void DistMultiTrainer::InitOtherEnv(const ProgramDesc &main_program) { void DistMultiTrainer::Run() { for (int thidx = 0; thidx < thread_num_; ++thidx) { if (!debug_) { - threads_.push_back( - std::thread(&DeviceWorker::TrainFiles, workers_[thidx].get())); + threads_.emplace_back(&DeviceWorker::TrainFiles, workers_[thidx].get()); } else { - threads_.push_back(std::thread(&DeviceWorker::TrainFilesWithProfiler, - workers_[thidx].get())); + threads_.emplace_back(&DeviceWorker::TrainFilesWithProfiler, + workers_[thidx].get()); } } } diff --git a/paddle/fluid/framework/downpour_worker.cc b/paddle/fluid/framework/downpour_worker.cc index 34e5fcdf980b46211b645e9d3ee1e8e8c2888d13..7d16226bcfb58f89944c1c9407e783c1c948bf9d 100644 --- a/paddle/fluid/framework/downpour_worker.cc +++ b/paddle/fluid/framework/downpour_worker.cc @@ -107,14 +107,14 @@ void DownpourWorker::Initialize(const TrainerDesc& desc) { uint64_t dest_table = copy_table_config_.dest_sparse_tables(i); VLOG(3) << "copy_sparse_tables_ push back " << src_table << "->" << dest_table; - copy_sparse_tables_.push_back(std::make_pair(src_table, dest_table)); + copy_sparse_tables_.emplace_back(src_table, dest_table); } for (int i = 0; i < copy_table_config_.src_dense_tables_size(); ++i) { uint64_t src_table = copy_table_config_.src_dense_tables(i); uint64_t dest_table = copy_table_config_.dest_dense_tables(i); VLOG(3) << "copy_dense_tables_ push back " << src_table << "->" << dest_table; - copy_dense_tables_.push_back(std::make_pair(src_table, dest_table)); + copy_dense_tables_.emplace_back(src_table, dest_table); } for (auto& m : copy_table_config_.table_denpendency_map()) { if (sparse_key_names_.find(m.key()) != sparse_key_names_.end()) { diff --git a/paddle/fluid/framework/downpour_worker_opt.cc b/paddle/fluid/framework/downpour_worker_opt.cc index 539673ba39e8c5668042b4d826c9c3d8897eacc5..7810ad32c98ac1ba49e8389ebc464be7f14996ea 100644 --- a/paddle/fluid/framework/downpour_worker_opt.cc +++ b/paddle/fluid/framework/downpour_worker_opt.cc @@ -168,14 +168,14 @@ void DownpourWorkerOpt::Initialize(const TrainerDesc& desc) { uint64_t dest_table = copy_table_config_.dest_sparse_tables(i); VLOG(3) << "copy_sparse_tables_ push back " << src_table << "->" << dest_table; - copy_sparse_tables_.push_back(std::make_pair(src_table, dest_table)); + copy_sparse_tables_.emplace_back(src_table, dest_table); } for (int i = 0; i < copy_table_config_.src_dense_tables_size(); ++i) { uint64_t src_table = copy_table_config_.src_dense_tables(i); uint64_t dest_table = copy_table_config_.dest_dense_tables(i); VLOG(3) << "copy_dense_tables_ push back " << src_table << "->" << dest_table; - copy_dense_tables_.push_back(std::make_pair(src_table, dest_table)); + copy_dense_tables_.emplace_back(src_table, dest_table); } for (auto& m : copy_table_config_.table_denpendency_map()) { if (sparse_key_names_.find(m.key()) != sparse_key_names_.end()) { diff --git a/paddle/fluid/framework/io/fs.cc b/paddle/fluid/framework/io/fs.cc index 3d5f956c26b962d97428c554b2ca18837046b366..a39147a97cf7e1ccd8db2594a763534060ea6369 100644 --- a/paddle/fluid/framework/io/fs.cc +++ b/paddle/fluid/framework/io/fs.cc @@ -179,7 +179,7 @@ std::vector localfs_list(const std::string& path) { std::vector list; while (reader.getline(&*pipe)) { - list.push_back(reader.get()); + list.emplace_back(reader.get()); } return list; diff --git a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.cc index a96d94cf38c9db54f64d28c31215b05e08a97384..1c2b35c691a08ac3bcdae26d14042834c364b965 100644 --- a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.cc @@ -174,8 +174,8 @@ int EmbeddingEltwiseLayerNormFusePass::BuildFusion( return; } std::vector> ins; - ins.push_back(std::make_pair(lookup_table1_x, lookup_table1_w)); - ins.push_back(std::make_pair(lookup_table2_x, lookup_table2_w)); + ins.emplace_back(lookup_table1_x, lookup_table1_w); + ins.emplace_back(lookup_table2_x, lookup_table2_w); start_pattern_in_nodes.push_back(ins); start_pattern_out_node.push_back(eltwise_add_out); @@ -294,7 +294,7 @@ int EmbeddingEltwiseLayerNormFusePass::BuildFusion( for (size_t k = 0; k < end_pattern_elt_out.size(); ++k) { if (tmp == end_pattern_elt_out[k]) { - fusion_ids.push_back(std::make_pair(i, std::make_pair(k, js))); + fusion_ids.emplace_back(i, std::make_pair(k, js)); break; } } diff --git a/paddle/fluid/framework/multi_trainer.cc b/paddle/fluid/framework/multi_trainer.cc index 923b8c406c0a5f19fcfd3e671d02f207a5830488..c1ffe1f46e877c1d53f95833d3b5fbe76a9e8697 100644 --- a/paddle/fluid/framework/multi_trainer.cc +++ b/paddle/fluid/framework/multi_trainer.cc @@ -107,7 +107,7 @@ void MultiTrainer::InitDumpEnv() { } } for (int i = 0; i < dump_thread_num_; i++) { - dump_thread_.push_back(std::thread([this, i] { DumpWork(i); })); + dump_thread_.emplace_back([this, i] { DumpWork(i); }); } } diff --git a/paddle/fluid/framework/new_executor/executor_statistics.cc b/paddle/fluid/framework/new_executor/executor_statistics.cc index fcb3f5edae998e8f38e49eb270b76bb403ab42df..14436e9c76358596214840133b3b5d8ee725730e 100644 --- a/paddle/fluid/framework/new_executor/executor_statistics.cc +++ b/paddle/fluid/framework/new_executor/executor_statistics.cc @@ -174,31 +174,31 @@ int StatisticsEngine::Init(const platform::NodeTrees& trees) { void StatisticsEngine::InitStdEvents() { name2idx_["Total"] = names_.size(); - names_.push_back("Total"); + names_.emplace_back("Total"); name2idx_["PythonEnd"] = names_.size(); - names_.push_back("PythonEnd"); + names_.emplace_back("PythonEnd"); name2idx_["CplusplusEnd"] = names_.size(); - names_.push_back("CplusplusEnd"); + names_.emplace_back("CplusplusEnd"); name2idx_["RunOp"] = names_.size(); - names_.push_back("RunOp"); + names_.emplace_back("RunOp"); name2idx_["LaunchKernel"] = names_.size(); - names_.push_back("LaunchKernel"); + names_.emplace_back("LaunchKernel"); name2idx_["OpCompute"] = names_.size(); - names_.push_back("OpCompute"); + names_.emplace_back("OpCompute"); name2idx_["OpInfershape"] = names_.size(); - names_.push_back("OpInfershape"); + names_.emplace_back("OpInfershape"); name2idx_["DataTransform"] = names_.size(); - names_.push_back("DataTransform"); + names_.emplace_back("DataTransform"); name2idx_["GarbageCollect"] = names_.size(); - names_.push_back("GarbageCollect"); + names_.emplace_back("GarbageCollect"); name2idx_["CalcNextOp"] = names_.size(); - names_.push_back("CalcNextOp"); + names_.emplace_back("CalcNextOp"); name2idx_["AllocateDeviceMem"] = names_.size(); - names_.push_back("AllocateDeviceMem"); + names_.emplace_back("AllocateDeviceMem"); name2idx_["FreeDeviceMem"] = names_.size(); - names_.push_back("FreeDeviceMem"); + names_.emplace_back("FreeDeviceMem"); name2idx_["ThreadpoolAddTask"] = names_.size(); - names_.push_back("ThreadpoolAddTask"); + names_.emplace_back("ThreadpoolAddTask"); size_t n = names_.size(); filters_.resize(n); diff --git a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc index d4606c4bef003d7503f66819c70caa8f5b387fa0..1dc918bf51966de1678789a7ec898387e03381ad 100644 --- a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc +++ b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc @@ -299,8 +299,8 @@ std::shared_ptr TransferLayout(const std::string& var_name, VLOG(3) << "Create Variable " << *new_var_name << " locally, which pointer is " << ptr << "Variable Type " << var_type; - var_scope->MutableDataTransferAddedVars().push_back( - std::make_pair(*new_var_name, var_type)); + var_scope->MutableDataTransferAddedVars().emplace_back(*new_var_name, + var_type); var_scope->AddVar(*new_var_name, nullptr); // 2. Construct VariableNameMap @@ -347,8 +347,8 @@ std::shared_ptr TransferDtype(const std::string& var_name, VLOG(3) << "Create Variable " << *new_var_name << " locally, which pointer is " << ptr << "Variable Type " << var_type; - var_scope->MutableDataTransferAddedVars().push_back( - std::make_pair(*new_var_name, var_type)); + var_scope->MutableDataTransferAddedVars().emplace_back(*new_var_name, + var_type); var_scope->AddVar(*new_var_name, nullptr); // 2. Construct VariableNameMap @@ -398,8 +398,8 @@ std::shared_ptr TransferDevice(const std::string& var_name, VLOG(3) << "Create Variable " << *new_var_name << " locally, which pointer is " << ptr << "Variable Type " << var_type; - var_scope->MutableDataTransferAddedVars().push_back( - std::make_pair(*new_var_name, var_type)); + var_scope->MutableDataTransferAddedVars().emplace_back(*new_var_name, + var_type); var_scope->AddVar(*new_var_name, nullptr); // 2. Construct VariableNameMap diff --git a/paddle/fluid/framework/op_def_api.cc b/paddle/fluid/framework/op_def_api.cc index b62f17987e651bc842296b9b5120837baa986545..1204c95dedc198f6b42b040a63b7765d554c64fa 100644 --- a/paddle/fluid/framework/op_def_api.cc +++ b/paddle/fluid/framework/op_def_api.cc @@ -61,9 +61,9 @@ const proto::OpDef& GetOpDef(const std::string& op_name) { } if (op_def.type() != op_name) { LOG(WARNING) << op_name << ".pbtxt has error type :" << op_def.type(); - ops_definition.emplace(std::make_pair(op_name, proto::OpDef())); + ops_definition.emplace(op_name, proto::OpDef()); } else { - ops_definition.emplace(std::make_pair(op_name, std::move(op_def))); + ops_definition.emplace(op_name, std::move(op_def)); } } } diff --git a/paddle/fluid/framework/op_desc_test.cc b/paddle/fluid/framework/op_desc_test.cc index 69ee71404a025abea66922ffc8679d265d37e973..174612e6b3f738bcff2a0932af9a9490fc75d00f 100644 --- a/paddle/fluid/framework/op_desc_test.cc +++ b/paddle/fluid/framework/op_desc_test.cc @@ -30,7 +30,7 @@ TEST(OpDesc, SetScalarsAttr) { std::vector scalars; for (int i = 0; i < 4; i++) { - scalars.push_back(paddle::experimental::Scalar(i)); + scalars.emplace_back(i); } opdesc.SetPlainAttr("scalars", scalars); ASSERT_EQ(opdesc.GetAttrType("scalars"), paddle::framework::proto::SCALARS); diff --git a/paddle/fluid/framework/pipeline_trainer.cc b/paddle/fluid/framework/pipeline_trainer.cc index bf3a0ea31cf25ea8dc7043422aafc26e88a4d47b..4566927e068ca607bf0d8f636cb5554e7eaa3eee 100644 --- a/paddle/fluid/framework/pipeline_trainer.cc +++ b/paddle/fluid/framework/pipeline_trainer.cc @@ -64,8 +64,7 @@ void PipelineTrainer::InitDumpEnv() { // TODO(sandyhouse): should make it as a config dump_thread_num_ = 1; for (int i = 0; i < dump_thread_num_; i++) { - dump_thread_.push_back( - std::thread(std::bind(&TrainerBase::DumpWork, this, i))); + dump_thread_.emplace_back(std::bind(&TrainerBase::DumpWork, this, i)); } } diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index c3153a80a02b7ecbc10efdf2e6b881ae146bee2a..502eeb59114d0ef7557713022c94c959eead0471 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -377,7 +377,7 @@ void Reducer::InitializeDenseGroups( p_group->length_.push_back(size); // for concat operator - p_group->dense_tensors_.push_back(phi::DenseTensor()); + p_group->dense_tensors_.emplace_back(); // check the dtype and place, it must be same. const auto &dtype = var->DataType(); diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 929dcb34a2be26500a25948ebabf2ed4838c7c49..037b4a32211a718b888f34f90b8aaa84fcc0b0db 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -2338,7 +2338,7 @@ void AnalysisPredictor::StatisticShapeRangeInfo() { auto ShapeMaxFreq = [](const std::map &m) -> int32_t { std::vector> counter; - for (auto &it : m) counter.push_back(it); + for (auto &it : m) counter.emplace_back(it); std::sort(counter.begin(), counter.end(), [](std::pair &a, diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index 28353150c265c65c27152b03ea8ede981f7a418e..05ce09cca1a42dfea51735f8f62747a5856c4a41 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -373,7 +373,7 @@ CreatePaddlePredictor( std::vector flags; if (config.fraction_of_gpu_memory >= 0.0f || config.fraction_of_gpu_memory <= 0.95f) { - flags.push_back("dummpy"); + flags.emplace_back("dummpy"); std::string flag = "--fraction_of_gpu_memory_to_use=" + num2str(config.fraction_of_gpu_memory); flags.push_back(flag); diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index cb94cf4a5a5be82c16b91d56b4fec0efebb2cd61..42a0606fa146b4b2d164f840ac64ee9eeeea113f 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -393,7 +393,7 @@ void CpuPassStrategy::EnableMKLDNN() { void CpuPassStrategy::EnableMkldnnQuantizer() { #ifdef PADDLE_WITH_MKLDNN if (!use_mkldnn_quantizer_) { - passes_.push_back("cpu_quantize_placement_pass"); + passes_.emplace_back("cpu_quantize_placement_pass"); } use_mkldnn_quantizer_ = true; #else @@ -404,12 +404,12 @@ void CpuPassStrategy::EnableMkldnnQuantizer() { void CpuPassStrategy::EnableMkldnnBfloat16() { #ifdef PADDLE_WITH_MKLDNN if (!use_mkldnn_bfloat16_) { - passes_.push_back("fc_mkldnn_pass"); - passes_.push_back("fc_act_mkldnn_fuse_pass"); + passes_.emplace_back("fc_mkldnn_pass"); + passes_.emplace_back("fc_act_mkldnn_fuse_pass"); - passes_.push_back("cpu_bfloat16_placement_pass"); - passes_.push_back("cpu_bfloat16_pass"); - passes_.push_back("cpu_quantize_squash_pass"); + passes_.emplace_back("cpu_bfloat16_placement_pass"); + passes_.emplace_back("cpu_bfloat16_pass"); + passes_.emplace_back("cpu_quantize_squash_pass"); } use_mkldnn_bfloat16_ = true; #else @@ -421,60 +421,60 @@ void CpuPassStrategy::EnableMkldnnInt8() { #ifdef PADDLE_WITH_MKLDNN if (!use_mkldnn_int8_) { passes_.clear(); - passes_.push_back("simplify_with_basic_ops_pass"); - passes_.push_back("quant_dequant_mkldnn_pass"); - passes_.push_back("mkldnn_placement_pass"); - passes_.push_back("constant_folding_pass"); - passes_.push_back("squeeze2_transpose2_onednn_fuse_pass"); - passes_.push_back("layer_norm_fuse_pass"); - passes_.push_back("attention_lstm_fuse_pass"); - passes_.push_back("seqconv_eltadd_relu_fuse_pass"); - passes_.push_back("fc_lstm_fuse_pass"); - passes_.push_back("mul_lstm_fuse_pass"); - passes_.push_back("fc_gru_fuse_pass"); - passes_.push_back("mul_gru_fuse_pass"); - passes_.push_back("multi_gru_fuse_pass"); - passes_.push_back("multi_gru_seq_fuse_pass"); - passes_.push_back("seq_concat_fc_fuse_pass"); - passes_.push_back("gpu_cpu_squeeze2_matmul_fuse_pass"); - passes_.push_back("gpu_cpu_reshape2_matmul_fuse_pass"); - passes_.push_back("gpu_cpu_flatten2_matmul_fuse_pass"); - passes_.push_back("matmul_v2_scale_fuse_pass"); - passes_.push_back("squared_mat_sub_fuse_pass"); - passes_.push_back("is_test_pass"); - passes_.push_back("gpu_cpu_map_matmul_v2_to_mul_pass"); - passes_.push_back("gpu_cpu_map_matmul_v2_to_matmul_pass"); - passes_.push_back("matmul_scale_fuse_pass"); - passes_.push_back("gpu_cpu_map_matmul_to_mul_pass"); - passes_.push_back("repeated_fc_relu_fuse_pass"); - passes_.push_back("depthwise_conv_mkldnn_pass"); - passes_.push_back("conv_bn_fuse_pass"); - passes_.push_back("conv_eltwiseadd_bn_fuse_pass"); - passes_.push_back("conv_affine_channel_mkldnn_fuse_pass"); - passes_.push_back("conv_transpose_bn_fuse_pass"); - passes_.push_back("conv_transpose_eltwiseadd_bn_fuse_pass"); - passes_.push_back("conv_bias_mkldnn_fuse_pass"); - passes_.push_back("conv_transpose_bias_mkldnn_fuse_pass"); - passes_.push_back("conv_elementwise_add_mkldnn_fuse_pass"); - passes_.push_back("conv_activation_mkldnn_fuse_pass"); - passes_.push_back("fc_fuse_pass"); - passes_.push_back("repeated_fc_relu_fuse_pass"); - passes_.push_back("fc_mkldnn_pass"); - passes_.push_back("fc_act_mkldnn_fuse_pass"); - passes_.push_back("matmul_transpose_reshape_mkldnn_fuse_pass"); - passes_.push_back("batch_norm_act_fuse_pass"); - passes_.push_back("softplus_activation_onednn_fuse_pass"); - passes_.push_back("compute_propagate_scales_mkldnn_pass"); - passes_.push_back("scale_matmul_fuse_pass"); - passes_.push_back("reshape_transpose_matmul_mkldnn_fuse_pass"); - passes_.push_back("matmul_elementwise_add_mkldnn_fuse_pass"); - passes_.push_back("operator_scale_onednn_fuse_pass"); - passes_.push_back("operator_unsqueeze2_onednn_fuse_pass"); - passes_.push_back("operator_reshape2_onednn_fuse_pass"); - passes_.push_back("cpu_quantize_placement_pass"); - passes_.push_back("cpu_quantize_pass"); - passes_.push_back("cpu_quantize_squash_pass"); - passes_.push_back("quant_transpose2_dequant_onednn_fuse_pass"); + passes_.emplace_back("simplify_with_basic_ops_pass"); + passes_.emplace_back("quant_dequant_mkldnn_pass"); + passes_.emplace_back("mkldnn_placement_pass"); + passes_.emplace_back("constant_folding_pass"); + passes_.emplace_back("squeeze2_transpose2_onednn_fuse_pass"); + passes_.emplace_back("layer_norm_fuse_pass"); + passes_.emplace_back("attention_lstm_fuse_pass"); + passes_.emplace_back("seqconv_eltadd_relu_fuse_pass"); + passes_.emplace_back("fc_lstm_fuse_pass"); + passes_.emplace_back("mul_lstm_fuse_pass"); + passes_.emplace_back("fc_gru_fuse_pass"); + passes_.emplace_back("mul_gru_fuse_pass"); + passes_.emplace_back("multi_gru_fuse_pass"); + passes_.emplace_back("multi_gru_seq_fuse_pass"); + passes_.emplace_back("seq_concat_fc_fuse_pass"); + passes_.emplace_back("gpu_cpu_squeeze2_matmul_fuse_pass"); + passes_.emplace_back("gpu_cpu_reshape2_matmul_fuse_pass"); + passes_.emplace_back("gpu_cpu_flatten2_matmul_fuse_pass"); + passes_.emplace_back("matmul_v2_scale_fuse_pass"); + passes_.emplace_back("squared_mat_sub_fuse_pass"); + passes_.emplace_back("is_test_pass"); + passes_.emplace_back("gpu_cpu_map_matmul_v2_to_mul_pass"); + passes_.emplace_back("gpu_cpu_map_matmul_v2_to_matmul_pass"); + passes_.emplace_back("matmul_scale_fuse_pass"); + passes_.emplace_back("gpu_cpu_map_matmul_to_mul_pass"); + passes_.emplace_back("repeated_fc_relu_fuse_pass"); + passes_.emplace_back("depthwise_conv_mkldnn_pass"); + passes_.emplace_back("conv_bn_fuse_pass"); + passes_.emplace_back("conv_eltwiseadd_bn_fuse_pass"); + passes_.emplace_back("conv_affine_channel_mkldnn_fuse_pass"); + passes_.emplace_back("conv_transpose_bn_fuse_pass"); + passes_.emplace_back("conv_transpose_eltwiseadd_bn_fuse_pass"); + passes_.emplace_back("conv_bias_mkldnn_fuse_pass"); + passes_.emplace_back("conv_transpose_bias_mkldnn_fuse_pass"); + passes_.emplace_back("conv_elementwise_add_mkldnn_fuse_pass"); + passes_.emplace_back("conv_activation_mkldnn_fuse_pass"); + passes_.emplace_back("fc_fuse_pass"); + passes_.emplace_back("repeated_fc_relu_fuse_pass"); + passes_.emplace_back("fc_mkldnn_pass"); + passes_.emplace_back("fc_act_mkldnn_fuse_pass"); + passes_.emplace_back("matmul_transpose_reshape_mkldnn_fuse_pass"); + passes_.emplace_back("batch_norm_act_fuse_pass"); + passes_.emplace_back("softplus_activation_onednn_fuse_pass"); + passes_.emplace_back("compute_propagate_scales_mkldnn_pass"); + passes_.emplace_back("scale_matmul_fuse_pass"); + passes_.emplace_back("reshape_transpose_matmul_mkldnn_fuse_pass"); + passes_.emplace_back("matmul_elementwise_add_mkldnn_fuse_pass"); + passes_.emplace_back("operator_scale_onednn_fuse_pass"); + passes_.emplace_back("operator_unsqueeze2_onednn_fuse_pass"); + passes_.emplace_back("operator_reshape2_onednn_fuse_pass"); + passes_.emplace_back("cpu_quantize_placement_pass"); + passes_.emplace_back("cpu_quantize_pass"); + passes_.emplace_back("cpu_quantize_squash_pass"); + passes_.emplace_back("quant_transpose2_dequant_onednn_fuse_pass"); } use_mkldnn_int8_ = true; #else diff --git a/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc b/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc index 11a909cfb3bc3adeaf9ec68d10917abce1be7720..4dfbf8e754e452f339a79813c8171441352aac10 100644 --- a/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc +++ b/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc @@ -122,7 +122,7 @@ static std::vector DimsExprs2VecExprWrapper( ) { std::vector x_dims_wrap; for (int i = 0; i < x_dims.nbDims; i++) { - x_dims_wrap.push_back(ExprWrapper(x_dims.d[i], &expr_builder)); + x_dims_wrap.emplace_back(x_dims.d[i], &expr_builder); } return x_dims_wrap; } @@ -643,7 +643,7 @@ nvinfer1::DimsExprs Conv2dFusionInferMeta( std::vector paddings_wrap; for (size_t i = 0; i < paddings.size(); ++i) { - paddings_wrap.emplace_back(ExprWrapper(paddings[i], &expr_builder)); + paddings_wrap.emplace_back(paddings[i], &expr_builder); } UpdatePaddingAndDilation(&paddings_wrap, diff --git a/paddle/fluid/inference/utils/table_printer.cc b/paddle/fluid/inference/utils/table_printer.cc index 70ab1b17dbfb2aea1cda91b3e3332e205f553297..e75d62d135896de1b63d579ee5eb20ab2694a53f 100644 --- a/paddle/fluid/inference/utils/table_printer.cc +++ b/paddle/fluid/inference/utils/table_printer.cc @@ -92,7 +92,7 @@ void TablePrinter::InsertRow(const std::vector& row) { size_t max_height = 0; for (size_t i = 0; i < row.size(); ++i) { - table_row.emplace_back(std::vector()); + table_row.emplace_back(); std::stringstream ss(row[i]); std::string line; size_t max_width = 0; @@ -113,7 +113,7 @@ void TablePrinter::InsertRow(const std::vector& row) { void TablePrinter::InsetDivider() { heights_.emplace_back(1); - data_.emplace_back(std::vector>()); + data_.emplace_back(); } void TablePrinter::CalcLayout() { diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc index 0f367bb689334f96ff7a36196fc9a83c31fd7e3a..c4ecc721da95e1add95cc97104ec0cf0ede486d7 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc @@ -260,7 +260,8 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, op_item->dyn_cast(); std::unique_ptr op_info_parser; if (op_info_interface) { - op_info_parser.reset(new OpYamlInfoParser(op_info_interface.GetOpInfo())); + op_info_parser = + std::make_unique(op_info_interface.GetOpInfo()); } std::string kernel_fn_str; @@ -328,7 +329,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, ctx, phi::TransToPhiPlace(kernel_key.backend()), result_type.dyn_cast()); - op_output_types.push_back(allocated_selected_rows_dtype); + op_output_types.emplace_back(allocated_selected_rows_dtype); } else { PADDLE_THROW(phi::errors::Unimplemented( "Result type only support DenseTensorType and VectorType")); @@ -343,7 +344,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, for (size_t i = 0; i < op_item->num_operands(); ++i) { auto cur_in = op_item->operand_source(i); if (!cur_in) { - vec_inputs.push_back(ir::OpResult()); + vec_inputs.emplace_back(); continue; } PADDLE_ENFORCE_EQ(map_value_pair.count(cur_in), diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 72b80440b58f80c2db5761c6afbe0425abe12bb4..2f6aaa13fd26abd6abf38912b6bf35226777bd0c 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -388,7 +388,7 @@ std::vector OpTranscriber::GenerateOperationInput( if (legacy_input_vars.empty()) { if (info.optional) { - op_inputs.push_back(ir::OpResult(nullptr)); + op_inputs.emplace_back(nullptr); continue; } } @@ -484,7 +484,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, "Op %s arg %s should be optional if it can be empty", op_desc.Type(), legacy_output_name); - op_output_types.push_back(ir::Type(nullptr)); + op_output_types.emplace_back(nullptr); continue; } @@ -521,7 +521,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, << info.type_name << " " << legacy_output_name << " " << legacy_output_vars.size(); if (legacy_output_vars.empty()) { - op_output_types.push_back(ir::Type(nullptr)); + op_output_types.emplace_back(nullptr); continue; } @@ -548,7 +548,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, std::vector types; for (const auto& var_name : legacy_output_vars) { if (var_name == kEmptyVarName) { - types.push_back(ir::Type(nullptr)); + types.emplace_back(nullptr); arg_to_idx[var_name] = cur_output_idx; continue; } diff --git a/paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.cc b/paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.cc index 4b6b6b0292c3d369d2f6ae4866746aab3feb39f2..0c5bfe7bd1a90ecd5ff45ea9008ea3e542ce48f6 100644 --- a/paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.cc @@ -146,7 +146,7 @@ void VirtualMemoryAutoGrowthBestFitAllocator::ExtendAndMerge(size_t size) { allocations_.push_back(std::move(allocateptr)); // hold allocation if (all_blocks_.empty()) { - all_blocks_.push_back(Block(ptr, size, true)); + all_blocks_.emplace_back(ptr, size, true); free_blocks_.emplace(std::make_pair(size, ptr), all_blocks_.begin()); return; } @@ -165,7 +165,7 @@ void VirtualMemoryAutoGrowthBestFitAllocator::ExtendAndMerge(size_t size) { block_it); } else { // do not merge - all_blocks_.push_front(Block(ptr, size, true)); + all_blocks_.emplace_back(ptr, size, true); free_blocks_.emplace(std::make_pair(size, ptr), all_blocks_.begin()); } } else { @@ -222,7 +222,7 @@ void VirtualMemoryAutoGrowthBestFitAllocator::ExtendAndMerge(size_t size) { block_it); } else { // do not merge - all_blocks_.push_back(Block(ptr, size, true)); + all_blocks_.emplace_back(ptr, size, true); auto block_it = all_blocks_.end(); block_it--; free_blocks_.emplace(std::make_pair(size, ptr), block_it); diff --git a/paddle/fluid/memory/malloc_test.cu b/paddle/fluid/memory/malloc_test.cu index 9a8ab9324f1c29bff2774811c7b212694e659402..ceea040c9fc0c292e60b0678a68329ac1041a8b3 100644 --- a/paddle/fluid/memory/malloc_test.cu +++ b/paddle/fluid/memory/malloc_test.cu @@ -193,8 +193,8 @@ TEST(Malloc, GPUContextMultiThreadMultiStream) { .get()); ctx->PartialInitWithAllocator(); dev_ctx.emplace_back(std::move(ctx)); - threads.push_back(std::thread( - MultiStreamCompute, &data[i], &second_data[i], std::cref(*dev_ctx[i]))); + threads.emplace_back( + MultiStreamCompute, &data[i], &second_data[i], std::cref(*dev_ctx[i])); } for (int i = 0; i < NUM_STREAMS; ++i) { diff --git a/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu b/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu index c4e87bef953d8f2529c59a479e521bdcbe313cf8..1a3823767ad6339a20eaf27402e5959ff22c261a 100644 --- a/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu +++ b/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu @@ -289,8 +289,7 @@ class StreamSafeCUDAAllocTest : public ::testing::Test { void MultiThreadMultiStreamRun() { std::vector threads; for (size_t i = 0; i < stream_num_; ++i) { - threads.push_back( - std::thread(&StreamSafeCUDAAllocTest::SingleStreamRun, this, i)); + threads.emplace_back(&StreamSafeCUDAAllocTest::SingleStreamRun, this, i); } for (size_t i = 0; i < stream_num_; ++i) { threads[i].join(); diff --git a/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc index 7d7c34a9f226645ef23429b8cbb7116a9a6db0ff..90ecbe4506d98b5fb7c2717ad44bbdc9f025c135 100644 --- a/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc @@ -124,7 +124,7 @@ class MultiGRUHandler { // Create attributes for each oneDNN gru for (int i = 0; i < 2 * layers_; ++i) { - attrs_.push_back(dnnl::primitive_attr()); + attrs_.emplace_back(); } if (is_int8) { diff --git a/paddle/fluid/operators/math/tree2col.cc b/paddle/fluid/operators/math/tree2col.cc index 1bf20c9cc75a153d0c7cd467bd9bc053fd4d7ab5..77eb7f68ec1c210ce5c9a377ff82feb3a0f4c5b7 100644 --- a/paddle/fluid/operators/math/tree2col.cc +++ b/paddle/fluid/operators/math/tree2col.cc @@ -26,8 +26,8 @@ std::vector Tree2ColUtil::construct_patch( std::unordered_map visited; std::vector patch; - stack.push(TreeNode(root, 1, 1, 0)); - patch.emplace_back(TreeNode(root, 1, 1, 0)); + stack.emplace(root, 1, 1, 0); + patch.emplace_back(root, 1, 1, 0); visited[root] = true; while (!stack.empty()) { @@ -39,8 +39,8 @@ std::vector Tree2ColUtil::construct_patch( size_t v = tr[node][i]; if (!visited[v] && static_cast(u.get_depth()) + 1 < max_depth) { visited[v] = true; - stack.push(TreeNode(v, i, sz, u.get_depth() + 1)); - patch.push_back(TreeNode(v, i + 1, sz, u.get_depth() + 1)); + stack.emplace(v, i, sz, u.get_depth() + 1); + patch.emplace_back(v, i + 1, sz, u.get_depth() + 1); end = false; } } diff --git a/paddle/fluid/platform/profiler/dump/test_serialization_logger.cc b/paddle/fluid/platform/profiler/dump/test_serialization_logger.cc index cc99cff78541286c0d5dc4013b78a86a4be29b0a..8cfcbc15b88ad2c4750892cd0f3ff10e57576c38 100644 --- a/paddle/fluid/platform/profiler/dump/test_serialization_logger.cc +++ b/paddle/fluid/platform/profiler/dump/test_serialization_logger.cc @@ -44,104 +44,104 @@ TEST(SerializationLoggerTest, dump_case0) { std::list device_events; std::list mem_events; std::list op_supplement_events; - host_events.push_back(HostTraceEvent(std::string("dataloader#1"), - TracerEventType::Dataloader, - 1000, - 10000, - 10, - 10)); - host_events.push_back(HostTraceEvent( - std::string("op1"), TracerEventType::Operator, 11000, 20000, 10, 10)); - host_events.push_back(HostTraceEvent( - std::string("op2"), TracerEventType::Operator, 21000, 30000, 10, 10)); - host_events.push_back(HostTraceEvent( - std::string("op3"), TracerEventType::Operator, 31000, 40000, 10, 11)); - mem_events.push_back(MemTraceEvent(11500, - 0x1000, - TracerMemEventType::Allocate, - 10, - 10, - 50, - "GPU:0", - 50, - 50, - 100, - 100)); - mem_events.push_back(MemTraceEvent(11900, - 0x1000, - TracerMemEventType::Free, - 10, - 10, - -50, - "GPU:0", - 0, - 50, - 100, - 100)); + host_events.emplace_back(std::string("dataloader#1"), + TracerEventType::Dataloader, + 1000, + 10000, + 10, + 10); + host_events.emplace_back( + std::string("op1"), TracerEventType::Operator, 11000, 20000, 10, 10); + host_events.emplace_back( + std::string("op2"), TracerEventType::Operator, 21000, 30000, 10, 10); + host_events.emplace_back( + std::string("op3"), TracerEventType::Operator, 31000, 40000, 10, 11); + mem_events.emplace_back(11500, + 0x1000, + TracerMemEventType::Allocate, + 10, + 10, + 50, + "GPU:0", + 50, + 50, + 100, + 100); + mem_events.emplace_back(11900, + 0x1000, + TracerMemEventType::Free, + 10, + 10, + -50, + "GPU:0", + 0, + 50, + 100, + 100); std::map>> input_shapes; std::map> dtypes; input_shapes[std::string("X")].push_back(std::vector{1, 2, 3}); input_shapes[std::string("X")].push_back(std::vector{4, 5, 6, 7}); - dtypes[std::string("X")].push_back(std::string("int8")); - dtypes[std::string("X")].push_back(std::string("float32")); + dtypes[std::string("X")].emplace_back("int8"); + dtypes[std::string("X")].emplace_back("float32"); AttributeMap attrs; - op_supplement_events.push_back(OperatorSupplementEvent( - 11600, "op1", input_shapes, dtypes, "op1()", attrs, 0, 10, 10)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch1"), 15000, 17000, 10, 10, 1, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch2"), 25000, 35000, 10, 10, 2, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch3"), 33000, 37000, 10, 11, 3, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudaMemcpy1"), 18000, 19000, 10, 10, 4, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudaMemset1"), 38000, 39000, 10, 11, 5, 0)); - device_events.push_back(DeviceTraceEvent(std::string("kernel1"), - TracerEventType::Kernel, - 40000, - 55000, - 0, - 10, - 10, - 1, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel2"), - TracerEventType::Kernel, - 70000, - 95000, - 0, - 10, - 10, - 2, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel3"), - TracerEventType::Kernel, - 60000, - 65000, - 0, - 10, - 11, - 3, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("memcpy1"), - TracerEventType::Memcpy, - 56000, - 59000, - 0, - 10, - 10, - 4, - MemcpyEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("memset1"), - TracerEventType::Memset, - 66000, - 69000, - 0, - 10, - 11, - 5, - MemsetEventInfo())); + op_supplement_events.emplace_back( + 11600, "op1", input_shapes, dtypes, "op1()", attrs, 0, 10, 10); + runtime_events.emplace_back( + std::string("cudalaunch1"), 15000, 17000, 10, 10, 1, 0); + runtime_events.emplace_back( + std::string("cudalaunch2"), 25000, 35000, 10, 10, 2, 0); + runtime_events.emplace_back( + std::string("cudalaunch3"), 33000, 37000, 10, 11, 3, 0); + runtime_events.emplace_back( + std::string("cudaMemcpy1"), 18000, 19000, 10, 10, 4, 0); + runtime_events.emplace_back( + std::string("cudaMemset1"), 38000, 39000, 10, 11, 5, 0); + device_events.emplace_back(std::string("kernel1"), + TracerEventType::Kernel, + 40000, + 55000, + 0, + 10, + 10, + 1, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel2"), + TracerEventType::Kernel, + 70000, + 95000, + 0, + 10, + 10, + 2, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel3"), + TracerEventType::Kernel, + 60000, + 65000, + 0, + 10, + 11, + 3, + KernelEventInfo()); + device_events.emplace_back(std::string("memcpy1"), + TracerEventType::Memcpy, + 56000, + 59000, + 0, + 10, + 10, + 4, + MemcpyEventInfo()); + device_events.emplace_back(std::string("memset1"), + TracerEventType::Memset, + 66000, + 69000, + 0, + 10, + 11, + 5, + MemsetEventInfo()); SerializationLogger logger("test_serialization_logger_case0.pb"); logger.LogMetaInfo(std::string("1.0.2"), 0); NodeTrees tree(host_events, @@ -182,61 +182,61 @@ TEST(SerializationLoggerTest, dump_case1) { std::list device_events; std::list mem_events; std::list op_supplement_events; - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch1"), 15000, 17000, 10, 10, 1, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch2"), 25000, 35000, 10, 10, 2, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch3"), 33000, 37000, 10, 11, 3, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudaMemcpy1"), 18000, 19000, 10, 10, 4, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudaMemset1"), 38000, 39000, 10, 11, 5, 0)); - device_events.push_back(DeviceTraceEvent(std::string("kernel1"), - TracerEventType::Kernel, - 40000, - 55000, - 0, - 10, - 10, - 1, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel2"), - TracerEventType::Kernel, - 70000, - 95000, - 0, - 10, - 10, - 2, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel3"), - TracerEventType::Kernel, - 60000, - 65000, - 0, - 10, - 11, - 3, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("memcpy1"), - TracerEventType::Memcpy, - 56000, - 59000, - 0, - 10, - 10, - 4, - MemcpyEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("memset1"), - TracerEventType::Memset, - 66000, - 69000, - 0, - 10, - 11, - 5, - MemsetEventInfo())); + runtime_events.emplace_back( + std::string("cudalaunch1"), 15000, 17000, 10, 10, 1, 0); + runtime_events.emplace_back( + std::string("cudalaunch2"), 25000, 35000, 10, 10, 2, 0); + runtime_events.emplace_back( + std::string("cudalaunch3"), 33000, 37000, 10, 11, 3, 0); + runtime_events.emplace_back( + std::string("cudaMemcpy1"), 18000, 19000, 10, 10, 4, 0); + runtime_events.emplace_back( + std::string("cudaMemset1"), 38000, 39000, 10, 11, 5, 0); + device_events.emplace_back(std::string("kernel1"), + TracerEventType::Kernel, + 40000, + 55000, + 0, + 10, + 10, + 1, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel2"), + TracerEventType::Kernel, + 70000, + 95000, + 0, + 10, + 10, + 2, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel3"), + TracerEventType::Kernel, + 60000, + 65000, + 0, + 10, + 11, + 3, + KernelEventInfo()); + device_events.emplace_back(std::string("memcpy1"), + TracerEventType::Memcpy, + 56000, + 59000, + 0, + 10, + 10, + 4, + MemcpyEventInfo()); + device_events.emplace_back(std::string("memset1"), + TracerEventType::Memset, + 66000, + 69000, + 0, + 10, + 11, + 5, + MemsetEventInfo()); SerializationLogger logger("test_serialization_logger_case1.pb"); logger.LogMetaInfo(std::string("1.0.2"), 0); NodeTrees tree(host_events, diff --git a/paddle/fluid/platform/profiler/test_event_node.cc b/paddle/fluid/platform/profiler/test_event_node.cc index e1dfc36d99db5f363934107d15f48490099bfa10..252d21fcaf3611157069fb2f4c90d26bc6065013 100644 --- a/paddle/fluid/platform/profiler/test_event_node.cc +++ b/paddle/fluid/platform/profiler/test_event_node.cc @@ -42,104 +42,104 @@ TEST(NodeTreesTest, LogMe_case0) { std::list device_events; std::list mem_events; std::list op_supplement_events; - host_events.push_back(HostTraceEvent(std::string("dataloader#1"), - TracerEventType::Dataloader, - 1000, - 10000, - 10, - 10)); - host_events.push_back(HostTraceEvent( - std::string("op1"), TracerEventType::Operator, 11000, 20000, 10, 10)); - host_events.push_back(HostTraceEvent( - std::string("op2"), TracerEventType::Operator, 21000, 30000, 10, 10)); - host_events.push_back(HostTraceEvent( - std::string("op3"), TracerEventType::Operator, 31000, 40000, 10, 11)); - mem_events.push_back(MemTraceEvent(11500, - 0x1000, - TracerMemEventType::Allocate, - 10, - 10, - 50, - "GPU:0", - 50, - 50, - 100, - 100)); - mem_events.push_back(MemTraceEvent(11900, - 0x1000, - TracerMemEventType::Free, - 10, - 10, - -50, - "GPU:0", - 0, - 50, - 100, - 100)); + host_events.emplace_back(std::string("dataloader#1"), + TracerEventType::Dataloader, + 1000, + 10000, + 10, + 10); + host_events.emplace_back( + std::string("op1"), TracerEventType::Operator, 11000, 20000, 10, 10); + host_events.emplace_back( + std::string("op2"), TracerEventType::Operator, 21000, 30000, 10, 10); + host_events.emplace_back( + std::string("op3"), TracerEventType::Operator, 31000, 40000, 10, 11); + mem_events.emplace_back(11500, + 0x1000, + TracerMemEventType::Allocate, + 10, + 10, + 50, + "GPU:0", + 50, + 50, + 100, + 100); + mem_events.emplace_back(11900, + 0x1000, + TracerMemEventType::Free, + 10, + 10, + -50, + "GPU:0", + 0, + 50, + 100, + 100); std::map>> input_shapes; std::map> dtypes; input_shapes[std::string("X")].push_back(std::vector{1, 2, 3}); input_shapes[std::string("X")].push_back(std::vector{4, 5, 6, 7}); - dtypes[std::string("X")].push_back(std::string("int8")); - dtypes[std::string("X")].push_back(std::string("float32")); + dtypes[std::string("X")].emplace_back("int8"); + dtypes[std::string("X")].emplace_back("float32"); AttributeMap attrs; - op_supplement_events.push_back(OperatorSupplementEvent( - 11600, "op1", input_shapes, dtypes, "op1()", attrs, 0, 10, 10)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch1"), 15000, 17000, 10, 10, 1, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch2"), 25000, 35000, 10, 10, 2, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch3"), 33000, 37000, 10, 11, 3, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudaMemcpy1"), 18000, 19000, 10, 10, 4, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudaMemset1"), 38000, 39000, 10, 11, 5, 0)); - device_events.push_back(DeviceTraceEvent(std::string("kernel1"), - TracerEventType::Kernel, - 40000, - 55000, - 0, - 10, - 10, - 1, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel2"), - TracerEventType::Kernel, - 70000, - 95000, - 0, - 10, - 10, - 2, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel3"), - TracerEventType::Kernel, - 60000, - 65000, - 0, - 10, - 11, - 3, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("memcpy1"), - TracerEventType::Memcpy, - 56000, - 59000, - 0, - 10, - 10, - 4, - MemcpyEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("memset1"), - TracerEventType::Memset, - 66000, - 69000, - 0, - 10, - 11, - 5, - MemsetEventInfo())); + op_supplement_events.emplace_back( + 11600, "op1", input_shapes, dtypes, "op1()", attrs, 0, 10, 10); + runtime_events.emplace_back( + std::string("cudalaunch1"), 15000, 17000, 10, 10, 1, 0); + runtime_events.emplace_back( + std::string("cudalaunch2"), 25000, 35000, 10, 10, 2, 0); + runtime_events.emplace_back( + std::string("cudalaunch3"), 33000, 37000, 10, 11, 3, 0); + runtime_events.emplace_back( + std::string("cudaMemcpy1"), 18000, 19000, 10, 10, 4, 0); + runtime_events.emplace_back( + std::string("cudaMemset1"), 38000, 39000, 10, 11, 5, 0); + device_events.emplace_back(std::string("kernel1"), + TracerEventType::Kernel, + 40000, + 55000, + 0, + 10, + 10, + 1, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel2"), + TracerEventType::Kernel, + 70000, + 95000, + 0, + 10, + 10, + 2, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel3"), + TracerEventType::Kernel, + 60000, + 65000, + 0, + 10, + 11, + 3, + KernelEventInfo()); + device_events.emplace_back(std::string("memcpy1"), + TracerEventType::Memcpy, + 56000, + 59000, + 0, + 10, + 10, + 4, + MemcpyEventInfo()); + device_events.emplace_back(std::string("memset1"), + TracerEventType::Memset, + 66000, + 69000, + 0, + 10, + 11, + 5, + MemsetEventInfo()); ChromeTracingLogger logger("test_nodetrees_logme_case0.json"); logger.LogMetaInfo(std::string("1.0.2"), 0); NodeTrees tree(host_events, @@ -180,61 +180,61 @@ TEST(NodeTreesTest, LogMe_case1) { std::list device_events; std::list mem_events; std::list op_supplement_events; - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch1"), 15000, 17000, 10, 10, 1, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch2"), 25000, 35000, 10, 10, 2, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch3"), 33000, 37000, 10, 11, 3, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudaMemcpy1"), 18000, 19000, 10, 10, 4, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudaMemset1"), 38000, 39000, 10, 11, 5, 0)); - device_events.push_back(DeviceTraceEvent(std::string("kernel1"), - TracerEventType::Kernel, - 40000, - 55000, - 0, - 10, - 10, - 1, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel2"), - TracerEventType::Kernel, - 70000, - 95000, - 0, - 10, - 10, - 2, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel3"), - TracerEventType::Kernel, - 60000, - 65000, - 0, - 10, - 11, - 3, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("memcpy1"), - TracerEventType::Memcpy, - 56000, - 59000, - 0, - 10, - 10, - 4, - MemcpyEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("memset1"), - TracerEventType::Memset, - 66000, - 69000, - 0, - 10, - 11, - 5, - MemsetEventInfo())); + runtime_events.emplace_back( + std::string("cudalaunch1"), 15000, 17000, 10, 10, 1, 0); + runtime_events.emplace_back( + std::string("cudalaunch2"), 25000, 35000, 10, 10, 2, 0); + runtime_events.emplace_back( + std::string("cudalaunch3"), 33000, 37000, 10, 11, 3, 0); + runtime_events.emplace_back( + std::string("cudaMemcpy1"), 18000, 19000, 10, 10, 4, 0); + runtime_events.emplace_back( + std::string("cudaMemset1"), 38000, 39000, 10, 11, 5, 0); + device_events.emplace_back(std::string("kernel1"), + TracerEventType::Kernel, + 40000, + 55000, + 0, + 10, + 10, + 1, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel2"), + TracerEventType::Kernel, + 70000, + 95000, + 0, + 10, + 10, + 2, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel3"), + TracerEventType::Kernel, + 60000, + 65000, + 0, + 10, + 11, + 3, + KernelEventInfo()); + device_events.emplace_back(std::string("memcpy1"), + TracerEventType::Memcpy, + 56000, + 59000, + 0, + 10, + 10, + 4, + MemcpyEventInfo()); + device_events.emplace_back(std::string("memset1"), + TracerEventType::Memset, + 66000, + 69000, + 0, + 10, + 11, + 5, + MemsetEventInfo()); ChromeTracingLogger logger("test_nodetrees_logme_case1.json"); logger.LogMetaInfo(std::string("1.0.2"), 0); NodeTrees tree(host_events, @@ -269,36 +269,36 @@ TEST(NodeTreesTest, HandleTrees_case0) { std::list device_events; std::list mem_events; std::list op_supplement_events; - host_events.push_back(HostTraceEvent( - std::string("op1"), TracerEventType::Operator, 10000, 100000, 10, 10)); - host_events.push_back(HostTraceEvent( - std::string("op2"), TracerEventType::Operator, 30000, 70000, 10, 10)); - host_events.push_back(HostTraceEvent( - std::string("op3"), TracerEventType::Operator, 2000, 120000, 10, 11)); - mem_events.push_back(MemTraceEvent(11500, - 0x1000, - TracerMemEventType::Allocate, - 10, - 10, - 50, - "GPU:0", - 50, - 50, - 100, - 100)); - mem_events.push_back(MemTraceEvent(11900, - 0x1000, - TracerMemEventType::Free, - 10, - 10, - -50, - "GPU:0", - 0, - 50, - 100, - 100)); + host_events.emplace_back( + std::string("op1"), TracerEventType::Operator, 10000, 100000, 10, 10); + host_events.emplace_back( + std::string("op2"), TracerEventType::Operator, 30000, 70000, 10, 10); + host_events.emplace_back( + std::string("op3"), TracerEventType::Operator, 2000, 120000, 10, 11); + mem_events.emplace_back(11500, + 0x1000, + TracerMemEventType::Allocate, + 10, + 10, + 50, + "GPU:0", + 50, + 50, + 100, + 100); + mem_events.emplace_back(11900, + 0x1000, + TracerMemEventType::Free, + 10, + 10, + -50, + "GPU:0", + 0, + 50, + 100, + 100); AttributeMap attrs; - op_supplement_events.push_back(OperatorSupplementEvent( + op_supplement_events.emplace_back( 11600, "op1", std::map>>(), @@ -307,40 +307,40 @@ TEST(NodeTreesTest, HandleTrees_case0) { attrs, 0, 10, - 10)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch1"), 15000, 25000, 10, 10, 1, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch2"), 35000, 45000, 10, 10, 2, 0)); - runtime_events.push_back(RuntimeTraceEvent( - std::string("cudalaunch3"), 10000, 55000, 10, 11, 3, 0)); - device_events.push_back(DeviceTraceEvent(std::string("kernel1"), - TracerEventType::Kernel, - 40000, - 55000, - 0, - 10, - 10, - 1, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel2"), - TracerEventType::Kernel, - 70000, - 95000, - 0, - 10, - 10, - 2, - KernelEventInfo())); - device_events.push_back(DeviceTraceEvent(std::string("kernel3"), - TracerEventType::Kernel, - 60000, - 75000, - 0, - 10, - 11, - 3, - KernelEventInfo())); + 10); + runtime_events.emplace_back( + std::string("cudalaunch1"), 15000, 25000, 10, 10, 1, 0); + runtime_events.emplace_back( + std::string("cudalaunch2"), 35000, 45000, 10, 10, 2, 0); + runtime_events.emplace_back( + std::string("cudalaunch3"), 10000, 55000, 10, 11, 3, 0); + device_events.emplace_back(std::string("kernel1"), + TracerEventType::Kernel, + 40000, + 55000, + 0, + 10, + 10, + 1, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel2"), + TracerEventType::Kernel, + 70000, + 95000, + 0, + 10, + 10, + 2, + KernelEventInfo()); + device_events.emplace_back(std::string("kernel3"), + TracerEventType::Kernel, + 60000, + 75000, + 0, + 10, + 11, + 3, + KernelEventInfo()); ChromeTracingLogger logger("test_nodetrees_handletrees_case0.json"); logger.LogMetaInfo(std::string("1.0.2"), 0); NodeTrees tree(host_events, diff --git a/paddle/ir/core/ir_printer.cc b/paddle/ir/core/ir_printer.cc index e9229b86245a124f9a7c769b3348122a9947207b..f8549433f75c70c47c01101e508b3c1e9e54f71e 100644 --- a/paddle/ir/core/ir_printer.cc +++ b/paddle/ir/core/ir_printer.cc @@ -257,7 +257,7 @@ void IrPrinter::PrintOperandsType(const Operation* op) { if (op_operand) { op_operand_types.push_back(op_operand.type()); } else { - op_operand_types.push_back(Type()); + op_operand_types.emplace_back(); } } os << " ("; @@ -278,7 +278,7 @@ void IrPrinter::PrintOpReturnType(const Operation* op) { if (op_result) { op_result_types.push_back(op_result.type()); } else { - op_result_types.push_back(Type(nullptr)); + op_result_types.emplace_back(nullptr); } } PrintInterleave( diff --git a/paddle/phi/api/lib/op_meta_info.cc b/paddle/phi/api/lib/op_meta_info.cc index 90335269536f15f82c48c172c0abc1c3d281aa4f..fcf6cd29f85cb5a5a06515e8b0822225bfe24df9 100644 --- a/paddle/phi/api/lib/op_meta_info.cc +++ b/paddle/phi/api/lib/op_meta_info.cc @@ -86,13 +86,13 @@ PADDLE_API void AssignTensorImpl(const Tensor& src, Tensor* dst) { void CustomOpKernelContext::EmplaceBackInput(Tensor&& input) { size_t index = inputs_.size(); inputs_.emplace_back(input); - input_range_.emplace_back(std::make_pair(index, index + 1)); + input_range_.emplace_back(index, index + 1); } void CustomOpKernelContext::EmplaceBackInputs( const std::vector& inputs) { size_t index = inputs_.size(); - input_range_.emplace_back(std::make_pair(index, index + inputs.size())); + input_range_.emplace_back(index, index + inputs.size()); inputs_.insert(inputs_.end(), std::make_move_iterator(inputs.begin()), std::make_move_iterator(inputs.end())); @@ -101,13 +101,13 @@ void CustomOpKernelContext::EmplaceBackInputs( void CustomOpKernelContext::EmplaceBackOutput(Tensor&& output) { size_t index = outputs_.size(); outputs_.emplace_back(output); - output_range_.emplace_back(std::make_pair(index, index + 1)); + output_range_.emplace_back(index, index + 1); } void CustomOpKernelContext::EmplaceBackOutputs( const std::vector& outputs) { size_t index = outputs_.size(); - output_range_.emplace_back(std::make_pair(index, index + outputs.size())); + output_range_.emplace_back(index, index + outputs.size()); outputs_.insert(outputs_.end(), std::make_move_iterator(outputs.begin()), std::make_move_iterator(outputs.end())); diff --git a/paddle/utils/string/string_helper_test.cc b/paddle/utils/string/string_helper_test.cc index 68382e692d6cfc28a65d6a934fd5cb67dc14515e..631edca2408cbcf589e415cca7477a571c5b1212 100644 --- a/paddle/utils/string/string_helper_test.cc +++ b/paddle/utils/string/string_helper_test.cc @@ -41,8 +41,8 @@ TEST(StringHelper, FormatStringAppend) { TEST(StringHelper, JoinStrings) { std::vector v; - v.push_back("hello"); - v.push_back("world"); + v.emplace_back("hello"); + v.emplace_back("world"); std::string result = paddle::string::join_strings(v, ' '); EXPECT_EQ(result, "hello world"); diff --git a/test/cpp/fluid/reader/reader_blocking_queue_test.cc b/test/cpp/fluid/reader/reader_blocking_queue_test.cc index 85439b4bb1d6b84c1806eef9f7548e3f7134d0f0..75e5d12777190a32f3bb47739712005fcf1d8652 100644 --- a/test/cpp/fluid/reader/reader_blocking_queue_test.cc +++ b/test/cpp/fluid/reader/reader_blocking_queue_test.cc @@ -146,18 +146,18 @@ void MultiSenderMultiReceiver(const size_t queue_cap, size_t sender_num = to_send.size(); std::vector senders; for (size_t s_idx = 0; s_idx < sender_num; ++s_idx) { - senders.emplace_back(std::thread([&, s_idx] { + senders.emplace_back([&, s_idx] { for (size_t elem : to_send[s_idx]) { std::this_thread::sleep_for(std::chrono::milliseconds(send_time_gap)); EXPECT_TRUE(q.Send(elem)); } - })); + }); } std::vector receivers; std::mutex mu; std::vector> res; for (size_t r_idx = 0; r_idx < receiver_num; ++r_idx) { - receivers.emplace_back(std::thread([&] { + receivers.emplace_back([&] { std::vector receiver_res; while (true) { std::this_thread::sleep_for( @@ -170,7 +170,7 @@ void MultiSenderMultiReceiver(const size_t queue_cap, } std::lock_guard lock(mu); res.push_back(receiver_res); - })); + }); } for (auto& t : senders) { t.join(); diff --git a/test/cpp/phi/kernels/test_fused_adam_kernel.cc b/test/cpp/phi/kernels/test_fused_adam_kernel.cc index 5202dc3a9468b81775706639ac118b2d9bb8f339..7084b85ba73882ebd977053687df9cbff8a7ed80 100644 --- a/test/cpp/phi/kernels/test_fused_adam_kernel.cc +++ b/test/cpp/phi/kernels/test_fused_adam_kernel.cc @@ -89,7 +89,7 @@ static auto ToMutableTensorPtrVector( static auto ToMetaTensorVector(const std::vector &tensors) { std::vector results; for (auto &t : tensors) { - results.push_back(t); + results.emplace_back(t); } return results; }