未验证 提交 dcaca0f4 编写于 作者: G gouzil 提交者: GitHub

[clang-tidy] enable bugprone-exception-escape check (#56692)

上级 c0f5dac6
......@@ -8,7 +8,7 @@ bugprone-argument-comment,
bugprone-copy-constructor-init,
-bugprone-dangling-handle,
-bugprone-dynamic-static-initializers,
-bugprone-exception-escape,
bugprone-exception-escape,
-bugprone-fold-init-type,
-bugprone-forwarding-reference-overload,
-bugprone-inaccurate-erase,
......
......@@ -72,7 +72,7 @@ Flatten::Flatten(const std::vector<DimTrans*>& dims)
all_dim_trans.emplace_back(this);
}
Flatten::~Flatten() {
Flatten::~Flatten() { // NOLINT
input_dims_.assign(input_dims_.size(), nullptr);
std::vector<DimTrans*>().swap(input_dims_);
}
......
......@@ -3299,7 +3299,7 @@ static void DygraphCodeGeneration(const std::string& output_dir,
} // namespace framework
} // namespace paddle
int main(int argc, char* argv[]) {
int main(int argc, char* argv[]) { // NOLINT
if (argc != 3) {
std::cerr << "argc must be 3" << std::endl;
return -1;
......
......@@ -27,7 +27,7 @@
#include "pybind11/pytypes.h"
namespace egr {
GradNodePyLayer::~GradNodePyLayer() {
GradNodePyLayer::~GradNodePyLayer() { // NOLINT
pybind11::gil_scoped_acquire gil;
Py_XDECREF(ctx_);
}
......
......@@ -916,7 +916,8 @@ static void RegisterOperatorKernel(
OperatorWithKernel::OpKernelFunc op_kernel_func;
if (kernel_func) {
VLOG(3) << "Register custom operator " << name << " with kernel func";
op_kernel_func = [kernel_func, inputs, outputs, attrs, inplace_map](
op_kernel_func =
[kernel_func, inputs, outputs, attrs, inplace_map]( // NOLINT
const framework::ExecutionContext& ctx) {
VLOG(3) << "Custom Operator: run custom kernel func in lambda.";
RunKernelFunc(ctx, kernel_func, inputs, outputs, attrs, inplace_map);
......@@ -1027,12 +1028,12 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// InferShape
if (infer_shape_func == nullptr) {
// use default InferShape
info.infer_shape_ =
[op_inputs, op_outputs, op_inplace_map](InferShapeContext* ctx) {
info.infer_shape_ = [op_inputs, op_outputs, op_inplace_map]( // NOLINT
InferShapeContext* ctx) {
RunDefaultInferShapeFunc(ctx, op_inputs, op_outputs, op_inplace_map);
};
} else {
info.infer_shape_ = [op_inputs,
info.infer_shape_ = [op_inputs, // NOLINT
op_outputs,
op_attrs,
op_inplace_map,
......@@ -1051,12 +1052,12 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// Infer Dtype
if (infer_dtype_func == nullptr) {
// use default InferDtype
info.infer_var_type_ =
[op_inputs, op_outputs, op_inplace_map](InferVarTypeContext* ctx) {
info.infer_var_type_ = [op_inputs, op_outputs, op_inplace_map]( // NOLINT
InferVarTypeContext* ctx) {
RunDefaultInferDtypeFunc(ctx, op_inputs, op_outputs, op_inplace_map);
};
} else {
info.infer_var_type_ = [op_inputs,
info.infer_var_type_ = [op_inputs, // NOLINT
op_outputs,
op_attrs,
op_inplace_map,
......@@ -1115,7 +1116,10 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// GradOpDescMaker
info.grad_op_maker_ =
[grad_op_name, grad_op_inputs, grad_op_outputs, is_double_grad](
[grad_op_name, // NOLINT
grad_op_inputs,
grad_op_outputs,
is_double_grad](
const OpDesc& fwd_op,
const std::unordered_set<std::string>& no_grad_set,
std::unordered_map<std::string, std::string>* grad_to_var,
......@@ -1133,7 +1137,10 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// GradOpBaseMaker
info.dygraph_grad_op_maker_ =
[grad_op_name, grad_op_inputs, grad_op_outputs, is_double_grad](
[grad_op_name, // NOLINT
grad_op_inputs,
grad_op_outputs,
is_double_grad](
const std::string& type,
const imperative::NameVarBaseMap& var_base_map_in,
const imperative::NameVarBaseMap& var_base_map_out,
......@@ -1173,7 +1180,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// Grad InferShape
if (grad_infer_shape_fn == nullptr) {
grad_info.infer_shape_ = [grad_op_inputs,
grad_info.infer_shape_ = [grad_op_inputs, // NOLINT
grad_op_outputs,
is_double_grad](InferShapeContext* ctx) {
// 1. if forward input exists, gradient's shape is same with forward
......@@ -1211,7 +1218,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
}
};
} else {
grad_info.infer_shape_ = [grad_op_inputs,
grad_info.infer_shape_ = [grad_op_inputs, // NOLINT
grad_op_outputs,
grad_op_attrs,
grad_op_inplace_map,
......@@ -1230,7 +1237,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// Grad InferDtype
if (grad_infer_dtype_fn != nullptr) {
grad_info.infer_var_type_ =
[grad_op_inputs,
[grad_op_inputs, // NOLINT
grad_op_outputs,
grad_op_attrs,
grad_op_inplace_map,
......
......@@ -72,7 +72,7 @@ class SingleStreamGuard {
}
}
~SingleStreamGuard() {
~SingleStreamGuard() { // NOLINT
if (!is_changed) {
return;
}
......
......@@ -33,7 +33,7 @@ PADDLE_DEFINE_EXPORTED_bool(
namespace paddle {
namespace framework {
Scope::~Scope() { DropKids(); }
Scope::~Scope() { DropKids(); } // NOLINT
Scope& Scope::NewScope() const {
Scope* child = new Scope(this);
......
......@@ -2581,7 +2581,7 @@ bool AnalysisPredictor::SaveTrtCalibToDisk() {
}
#endif
AnalysisPredictor::~AnalysisPredictor() {
AnalysisPredictor::~AnalysisPredictor() { // NOLINT
#ifdef PADDLE_WITH_TENSORRT
if (config_.tensorrt_engine_enabled() &&
config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
......
......@@ -391,7 +391,7 @@ void MemoryMapAllocationPool::Clear() {
memory_map_allocations_.clear();
}
MemoryMapAllocationPool::~MemoryMapAllocationPool() { Clear(); }
MemoryMapAllocationPool::~MemoryMapAllocationPool() { Clear(); } // NOLINT
} // namespace allocation
} // namespace memory
......
......@@ -591,7 +591,7 @@ MemEvenRecorder::RecordMemEvent::RecordMemEvent(const Place &place,
PushMemEvent(start_ns_, end_ns_, bytes_, place_, alloc_in_);
}
MemEvenRecorder::RecordMemEvent::~RecordMemEvent() {
MemEvenRecorder::RecordMemEvent::~RecordMemEvent() { // NOLINT
phi::DeviceTracer *tracer = phi::GetDeviceTracer();
end_ns_ = PosixInNsec();
......
......@@ -60,7 +60,7 @@ ChromeTracingLogger::ChromeTracingLogger(const char* filename_cstr) {
StartLog();
}
ChromeTracingLogger::~ChromeTracingLogger() {
ChromeTracingLogger::~ChromeTracingLogger() { // NOLINT
EndLog();
output_file_stream_.close();
}
......
......@@ -150,7 +150,7 @@ std::unique_ptr<ProfilerResult> DeserializationReader::Parse() {
return std::unique_ptr<ProfilerResult>(profiler_result_ptr);
}
DeserializationReader::~DeserializationReader() {
DeserializationReader::~DeserializationReader() { // NOLINT
delete node_trees_proto_;
input_file_stream_.close();
}
......
......@@ -371,7 +371,7 @@ SerializationLogger::SerializationLogger(const char* filename_cstr) {
OpenFile();
}
SerializationLogger::~SerializationLogger() {
SerializationLogger::~SerializationLogger() { // NOLINT
if (!output_file_stream_) {
delete node_trees_proto_;
return;
......
......@@ -106,7 +106,7 @@ class EagerNumpyAllocation : public phi::Allocation {
"The underlying PyObject pointer of numpy array cannot be None"));
Py_INCREF(arr_);
}
~EagerNumpyAllocation() override {
~EagerNumpyAllocation() override { // NOLINT
py::gil_scoped_acquire gil;
Py_DECREF(arr_);
}
......
......@@ -477,7 +477,7 @@ GenerateOpFunctions() {
return std::make_tuple(op_function_list, bind_function_list);
}
int main(int argc, char* argv[]) {
int main(int argc, char* argv[]) { // NOLINT
if (argc != 2) {
std::cerr << "argc must be 2" << std::endl;
return -1;
......
......@@ -1834,7 +1834,7 @@ void PyVoidHook::operator()() {
PyObjectHolder::PyObjectHolder(PyObject* ptr) { ptr_ = ptr; }
PyObjectHolder::~PyObjectHolder() {
PyObjectHolder::~PyObjectHolder() { // NOLINT
::pybind11::gil_scoped_acquire gil;
Py_XDECREF(ptr_);
}
......@@ -1860,7 +1860,7 @@ void PyObjectHolder::dec_ref() {
PackHook::PackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
PackHook::~PackHook() {
PackHook::~PackHook() { // NOLINT
::pybind11::gil_scoped_acquire gil;
Py_DECREF(hook_);
}
......@@ -1899,7 +1899,7 @@ void* PackHook::operator()(void* py_tensor) {
UnPackHook::UnPackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
UnPackHook::~UnPackHook() {
UnPackHook::~UnPackHook() { // NOLINT
::pybind11::gil_scoped_acquire gil;
Py_DECREF(hook_);
}
......
......@@ -88,7 +88,7 @@ class PyVariableWrapperHook : public imperative::VariableWrapperHook {
Py_INCREF(py_func_);
}
~PyVariableWrapperHook() override {
~PyVariableWrapperHook() override { // NOLINT
py::gil_scoped_acquire gil;
Py_DECREF(py_func_);
}
......
......@@ -40,7 +40,7 @@
// },
// ...
// }
int main(int argc, char **argv) {
int main(int argc, char **argv) { // NOLINT
paddle::framework::InitDefaultKernelSignatureMap();
auto &kernel_signature_map = phi::DefaultKernelSignatureMap::Instance();
auto &kernel_factory = phi::KernelFactory::Instance();
......
......@@ -28,7 +28,7 @@ struct ParametricStorageManager {
explicit ParametricStorageManager(std::function<void(StorageBase *)> destroy)
: destroy_(destroy) {}
~ParametricStorageManager() {
~ParametricStorageManager() { // NOLINT
for (const auto &instance : parametric_instances_) {
destroy_(instance.second);
}
......
......@@ -43,7 +43,7 @@ MasterDaemon::MasterDaemon(SocketType socket, int nranks, int timeout)
_background_thread = std::thread{&MasterDaemon::run, this};
}
MasterDaemon::~MasterDaemon() {
MasterDaemon::~MasterDaemon() { // NOLINT
VLOG(4) << ("begin to destruct MasterDaemon");
StopByControlFd();
_background_thread.join();
......
......@@ -21,7 +21,7 @@ SparseCooTensor::SparseCooTensor() {
this->SetMember(non_zero_indices, non_zero_elements, {1}, true);
}
SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) {
SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) { // NOLINT
this->non_zero_elements_ = other.non_zero_elements_;
this->non_zero_indices_ = other.non_zero_indices_;
this->coalesced_ = other.coalesced_;
......
......@@ -48,7 +48,7 @@ StringTensor& StringTensor::operator=(const StringTensor& other) {
return *this;
}
StringTensor& StringTensor::operator=(StringTensor&& other) {
StringTensor& StringTensor::operator=(StringTensor&& other) { // NOLINT
meta_ = std::move(other.meta_);
std::swap(holder_, other.holder_);
return *this;
......
......@@ -183,7 +183,8 @@ DenseTensorMeta& DenseTensorMeta::operator=(const DenseTensorMeta& other) {
return *this;
}
DenseTensorMeta& DenseTensorMeta::operator=(DenseTensorMeta&& other) {
DenseTensorMeta& DenseTensorMeta::operator=( // NOLINT
DenseTensorMeta&& other) {
is_scalar = other.is_scalar;
use_gpudnn = other.use_gpudnn;
dims = std::move(other.dims);
......
......@@ -23,7 +23,7 @@ limitations under the License. */
DECLARE_bool(enable_gpu_memory_usage_log);
#endif
int main(int argc, char** argv) {
int main(int argc, char** argv) { // NOLINT
paddle::memory::allocation::UseAllocatorStrategyGFlag();
testing::InitGoogleTest(&argc, argv);
std::vector<char*> new_argv;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册