未验证 提交 dcaca0f4 编写于 作者: G gouzil 提交者: GitHub

[clang-tidy] enable bugprone-exception-escape check (#56692)

上级 c0f5dac6
...@@ -8,7 +8,7 @@ bugprone-argument-comment, ...@@ -8,7 +8,7 @@ bugprone-argument-comment,
bugprone-copy-constructor-init, bugprone-copy-constructor-init,
-bugprone-dangling-handle, -bugprone-dangling-handle,
-bugprone-dynamic-static-initializers, -bugprone-dynamic-static-initializers,
-bugprone-exception-escape, bugprone-exception-escape,
-bugprone-fold-init-type, -bugprone-fold-init-type,
-bugprone-forwarding-reference-overload, -bugprone-forwarding-reference-overload,
-bugprone-inaccurate-erase, -bugprone-inaccurate-erase,
......
...@@ -72,7 +72,7 @@ Flatten::Flatten(const std::vector<DimTrans*>& dims) ...@@ -72,7 +72,7 @@ Flatten::Flatten(const std::vector<DimTrans*>& dims)
all_dim_trans.emplace_back(this); all_dim_trans.emplace_back(this);
} }
Flatten::~Flatten() { Flatten::~Flatten() { // NOLINT
input_dims_.assign(input_dims_.size(), nullptr); input_dims_.assign(input_dims_.size(), nullptr);
std::vector<DimTrans*>().swap(input_dims_); std::vector<DimTrans*>().swap(input_dims_);
} }
......
...@@ -3299,7 +3299,7 @@ static void DygraphCodeGeneration(const std::string& output_dir, ...@@ -3299,7 +3299,7 @@ static void DygraphCodeGeneration(const std::string& output_dir,
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
int main(int argc, char* argv[]) { int main(int argc, char* argv[]) { // NOLINT
if (argc != 3) { if (argc != 3) {
std::cerr << "argc must be 3" << std::endl; std::cerr << "argc must be 3" << std::endl;
return -1; return -1;
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "pybind11/pytypes.h" #include "pybind11/pytypes.h"
namespace egr { namespace egr {
GradNodePyLayer::~GradNodePyLayer() { GradNodePyLayer::~GradNodePyLayer() { // NOLINT
pybind11::gil_scoped_acquire gil; pybind11::gil_scoped_acquire gil;
Py_XDECREF(ctx_); Py_XDECREF(ctx_);
} }
......
...@@ -916,11 +916,12 @@ static void RegisterOperatorKernel( ...@@ -916,11 +916,12 @@ static void RegisterOperatorKernel(
OperatorWithKernel::OpKernelFunc op_kernel_func; OperatorWithKernel::OpKernelFunc op_kernel_func;
if (kernel_func) { if (kernel_func) {
VLOG(3) << "Register custom operator " << name << " with kernel func"; VLOG(3) << "Register custom operator " << name << " with kernel func";
op_kernel_func = [kernel_func, inputs, outputs, attrs, inplace_map]( op_kernel_func =
const framework::ExecutionContext& ctx) { [kernel_func, inputs, outputs, attrs, inplace_map]( // NOLINT
VLOG(3) << "Custom Operator: run custom kernel func in lambda."; const framework::ExecutionContext& ctx) {
RunKernelFunc(ctx, kernel_func, inputs, outputs, attrs, inplace_map); VLOG(3) << "Custom Operator: run custom kernel func in lambda.";
}; RunKernelFunc(ctx, kernel_func, inputs, outputs, attrs, inplace_map);
};
} else { } else {
VLOG(3) << "Register custom operator " << name VLOG(3) << "Register custom operator " << name
<< " with raw op kernel func"; << " with raw op kernel func";
...@@ -1027,12 +1028,12 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos, ...@@ -1027,12 +1028,12 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// InferShape // InferShape
if (infer_shape_func == nullptr) { if (infer_shape_func == nullptr) {
// use default InferShape // use default InferShape
info.infer_shape_ = info.infer_shape_ = [op_inputs, op_outputs, op_inplace_map]( // NOLINT
[op_inputs, op_outputs, op_inplace_map](InferShapeContext* ctx) { InferShapeContext* ctx) {
RunDefaultInferShapeFunc(ctx, op_inputs, op_outputs, op_inplace_map); RunDefaultInferShapeFunc(ctx, op_inputs, op_outputs, op_inplace_map);
}; };
} else { } else {
info.infer_shape_ = [op_inputs, info.infer_shape_ = [op_inputs, // NOLINT
op_outputs, op_outputs,
op_attrs, op_attrs,
op_inplace_map, op_inplace_map,
...@@ -1051,12 +1052,12 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos, ...@@ -1051,12 +1052,12 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// Infer Dtype // Infer Dtype
if (infer_dtype_func == nullptr) { if (infer_dtype_func == nullptr) {
// use default InferDtype // use default InferDtype
info.infer_var_type_ = info.infer_var_type_ = [op_inputs, op_outputs, op_inplace_map]( // NOLINT
[op_inputs, op_outputs, op_inplace_map](InferVarTypeContext* ctx) { InferVarTypeContext* ctx) {
RunDefaultInferDtypeFunc(ctx, op_inputs, op_outputs, op_inplace_map); RunDefaultInferDtypeFunc(ctx, op_inputs, op_outputs, op_inplace_map);
}; };
} else { } else {
info.infer_var_type_ = [op_inputs, info.infer_var_type_ = [op_inputs, // NOLINT
op_outputs, op_outputs,
op_attrs, op_attrs,
op_inplace_map, op_inplace_map,
...@@ -1115,7 +1116,10 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos, ...@@ -1115,7 +1116,10 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// GradOpDescMaker // GradOpDescMaker
info.grad_op_maker_ = info.grad_op_maker_ =
[grad_op_name, grad_op_inputs, grad_op_outputs, is_double_grad]( [grad_op_name, // NOLINT
grad_op_inputs,
grad_op_outputs,
is_double_grad](
const OpDesc& fwd_op, const OpDesc& fwd_op,
const std::unordered_set<std::string>& no_grad_set, const std::unordered_set<std::string>& no_grad_set,
std::unordered_map<std::string, std::string>* grad_to_var, std::unordered_map<std::string, std::string>* grad_to_var,
...@@ -1133,7 +1137,10 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos, ...@@ -1133,7 +1137,10 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// GradOpBaseMaker // GradOpBaseMaker
info.dygraph_grad_op_maker_ = info.dygraph_grad_op_maker_ =
[grad_op_name, grad_op_inputs, grad_op_outputs, is_double_grad]( [grad_op_name, // NOLINT
grad_op_inputs,
grad_op_outputs,
is_double_grad](
const std::string& type, const std::string& type,
const imperative::NameVarBaseMap& var_base_map_in, const imperative::NameVarBaseMap& var_base_map_in,
const imperative::NameVarBaseMap& var_base_map_out, const imperative::NameVarBaseMap& var_base_map_out,
...@@ -1173,7 +1180,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos, ...@@ -1173,7 +1180,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// Grad InferShape // Grad InferShape
if (grad_infer_shape_fn == nullptr) { if (grad_infer_shape_fn == nullptr) {
grad_info.infer_shape_ = [grad_op_inputs, grad_info.infer_shape_ = [grad_op_inputs, // NOLINT
grad_op_outputs, grad_op_outputs,
is_double_grad](InferShapeContext* ctx) { is_double_grad](InferShapeContext* ctx) {
// 1. if forward input exists, gradient's shape is same with forward // 1. if forward input exists, gradient's shape is same with forward
...@@ -1211,7 +1218,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos, ...@@ -1211,7 +1218,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
} }
}; };
} else { } else {
grad_info.infer_shape_ = [grad_op_inputs, grad_info.infer_shape_ = [grad_op_inputs, // NOLINT
grad_op_outputs, grad_op_outputs,
grad_op_attrs, grad_op_attrs,
grad_op_inplace_map, grad_op_inplace_map,
...@@ -1230,7 +1237,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos, ...@@ -1230,7 +1237,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// Grad InferDtype // Grad InferDtype
if (grad_infer_dtype_fn != nullptr) { if (grad_infer_dtype_fn != nullptr) {
grad_info.infer_var_type_ = grad_info.infer_var_type_ =
[grad_op_inputs, [grad_op_inputs, // NOLINT
grad_op_outputs, grad_op_outputs,
grad_op_attrs, grad_op_attrs,
grad_op_inplace_map, grad_op_inplace_map,
......
...@@ -72,7 +72,7 @@ class SingleStreamGuard { ...@@ -72,7 +72,7 @@ class SingleStreamGuard {
} }
} }
~SingleStreamGuard() { ~SingleStreamGuard() { // NOLINT
if (!is_changed) { if (!is_changed) {
return; return;
} }
......
...@@ -33,7 +33,7 @@ PADDLE_DEFINE_EXPORTED_bool( ...@@ -33,7 +33,7 @@ PADDLE_DEFINE_EXPORTED_bool(
namespace paddle { namespace paddle {
namespace framework { namespace framework {
Scope::~Scope() { DropKids(); } Scope::~Scope() { DropKids(); } // NOLINT
Scope& Scope::NewScope() const { Scope& Scope::NewScope() const {
Scope* child = new Scope(this); Scope* child = new Scope(this);
......
...@@ -2581,7 +2581,7 @@ bool AnalysisPredictor::SaveTrtCalibToDisk() { ...@@ -2581,7 +2581,7 @@ bool AnalysisPredictor::SaveTrtCalibToDisk() {
} }
#endif #endif
AnalysisPredictor::~AnalysisPredictor() { AnalysisPredictor::~AnalysisPredictor() { // NOLINT
#ifdef PADDLE_WITH_TENSORRT #ifdef PADDLE_WITH_TENSORRT
if (config_.tensorrt_engine_enabled() && if (config_.tensorrt_engine_enabled() &&
config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 && config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
......
...@@ -391,7 +391,7 @@ void MemoryMapAllocationPool::Clear() { ...@@ -391,7 +391,7 @@ void MemoryMapAllocationPool::Clear() {
memory_map_allocations_.clear(); memory_map_allocations_.clear();
} }
MemoryMapAllocationPool::~MemoryMapAllocationPool() { Clear(); } MemoryMapAllocationPool::~MemoryMapAllocationPool() { Clear(); } // NOLINT
} // namespace allocation } // namespace allocation
} // namespace memory } // namespace memory
......
...@@ -591,7 +591,7 @@ MemEvenRecorder::RecordMemEvent::RecordMemEvent(const Place &place, ...@@ -591,7 +591,7 @@ MemEvenRecorder::RecordMemEvent::RecordMemEvent(const Place &place,
PushMemEvent(start_ns_, end_ns_, bytes_, place_, alloc_in_); PushMemEvent(start_ns_, end_ns_, bytes_, place_, alloc_in_);
} }
MemEvenRecorder::RecordMemEvent::~RecordMemEvent() { MemEvenRecorder::RecordMemEvent::~RecordMemEvent() { // NOLINT
phi::DeviceTracer *tracer = phi::GetDeviceTracer(); phi::DeviceTracer *tracer = phi::GetDeviceTracer();
end_ns_ = PosixInNsec(); end_ns_ = PosixInNsec();
......
...@@ -60,7 +60,7 @@ ChromeTracingLogger::ChromeTracingLogger(const char* filename_cstr) { ...@@ -60,7 +60,7 @@ ChromeTracingLogger::ChromeTracingLogger(const char* filename_cstr) {
StartLog(); StartLog();
} }
ChromeTracingLogger::~ChromeTracingLogger() { ChromeTracingLogger::~ChromeTracingLogger() { // NOLINT
EndLog(); EndLog();
output_file_stream_.close(); output_file_stream_.close();
} }
......
...@@ -150,7 +150,7 @@ std::unique_ptr<ProfilerResult> DeserializationReader::Parse() { ...@@ -150,7 +150,7 @@ std::unique_ptr<ProfilerResult> DeserializationReader::Parse() {
return std::unique_ptr<ProfilerResult>(profiler_result_ptr); return std::unique_ptr<ProfilerResult>(profiler_result_ptr);
} }
DeserializationReader::~DeserializationReader() { DeserializationReader::~DeserializationReader() { // NOLINT
delete node_trees_proto_; delete node_trees_proto_;
input_file_stream_.close(); input_file_stream_.close();
} }
......
...@@ -371,7 +371,7 @@ SerializationLogger::SerializationLogger(const char* filename_cstr) { ...@@ -371,7 +371,7 @@ SerializationLogger::SerializationLogger(const char* filename_cstr) {
OpenFile(); OpenFile();
} }
SerializationLogger::~SerializationLogger() { SerializationLogger::~SerializationLogger() { // NOLINT
if (!output_file_stream_) { if (!output_file_stream_) {
delete node_trees_proto_; delete node_trees_proto_;
return; return;
......
...@@ -106,7 +106,7 @@ class EagerNumpyAllocation : public phi::Allocation { ...@@ -106,7 +106,7 @@ class EagerNumpyAllocation : public phi::Allocation {
"The underlying PyObject pointer of numpy array cannot be None")); "The underlying PyObject pointer of numpy array cannot be None"));
Py_INCREF(arr_); Py_INCREF(arr_);
} }
~EagerNumpyAllocation() override { ~EagerNumpyAllocation() override { // NOLINT
py::gil_scoped_acquire gil; py::gil_scoped_acquire gil;
Py_DECREF(arr_); Py_DECREF(arr_);
} }
......
...@@ -477,7 +477,7 @@ GenerateOpFunctions() { ...@@ -477,7 +477,7 @@ GenerateOpFunctions() {
return std::make_tuple(op_function_list, bind_function_list); return std::make_tuple(op_function_list, bind_function_list);
} }
int main(int argc, char* argv[]) { int main(int argc, char* argv[]) { // NOLINT
if (argc != 2) { if (argc != 2) {
std::cerr << "argc must be 2" << std::endl; std::cerr << "argc must be 2" << std::endl;
return -1; return -1;
......
...@@ -1834,7 +1834,7 @@ void PyVoidHook::operator()() { ...@@ -1834,7 +1834,7 @@ void PyVoidHook::operator()() {
PyObjectHolder::PyObjectHolder(PyObject* ptr) { ptr_ = ptr; } PyObjectHolder::PyObjectHolder(PyObject* ptr) { ptr_ = ptr; }
PyObjectHolder::~PyObjectHolder() { PyObjectHolder::~PyObjectHolder() { // NOLINT
::pybind11::gil_scoped_acquire gil; ::pybind11::gil_scoped_acquire gil;
Py_XDECREF(ptr_); Py_XDECREF(ptr_);
} }
...@@ -1860,7 +1860,7 @@ void PyObjectHolder::dec_ref() { ...@@ -1860,7 +1860,7 @@ void PyObjectHolder::dec_ref() {
PackHook::PackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); } PackHook::PackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
PackHook::~PackHook() { PackHook::~PackHook() { // NOLINT
::pybind11::gil_scoped_acquire gil; ::pybind11::gil_scoped_acquire gil;
Py_DECREF(hook_); Py_DECREF(hook_);
} }
...@@ -1899,7 +1899,7 @@ void* PackHook::operator()(void* py_tensor) { ...@@ -1899,7 +1899,7 @@ void* PackHook::operator()(void* py_tensor) {
UnPackHook::UnPackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); } UnPackHook::UnPackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
UnPackHook::~UnPackHook() { UnPackHook::~UnPackHook() { // NOLINT
::pybind11::gil_scoped_acquire gil; ::pybind11::gil_scoped_acquire gil;
Py_DECREF(hook_); Py_DECREF(hook_);
} }
......
...@@ -88,7 +88,7 @@ class PyVariableWrapperHook : public imperative::VariableWrapperHook { ...@@ -88,7 +88,7 @@ class PyVariableWrapperHook : public imperative::VariableWrapperHook {
Py_INCREF(py_func_); Py_INCREF(py_func_);
} }
~PyVariableWrapperHook() override { ~PyVariableWrapperHook() override { // NOLINT
py::gil_scoped_acquire gil; py::gil_scoped_acquire gil;
Py_DECREF(py_func_); Py_DECREF(py_func_);
} }
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
// }, // },
// ... // ...
// } // }
int main(int argc, char **argv) { int main(int argc, char **argv) { // NOLINT
paddle::framework::InitDefaultKernelSignatureMap(); paddle::framework::InitDefaultKernelSignatureMap();
auto &kernel_signature_map = phi::DefaultKernelSignatureMap::Instance(); auto &kernel_signature_map = phi::DefaultKernelSignatureMap::Instance();
auto &kernel_factory = phi::KernelFactory::Instance(); auto &kernel_factory = phi::KernelFactory::Instance();
......
...@@ -28,7 +28,7 @@ struct ParametricStorageManager { ...@@ -28,7 +28,7 @@ struct ParametricStorageManager {
explicit ParametricStorageManager(std::function<void(StorageBase *)> destroy) explicit ParametricStorageManager(std::function<void(StorageBase *)> destroy)
: destroy_(destroy) {} : destroy_(destroy) {}
~ParametricStorageManager() { ~ParametricStorageManager() { // NOLINT
for (const auto &instance : parametric_instances_) { for (const auto &instance : parametric_instances_) {
destroy_(instance.second); destroy_(instance.second);
} }
......
...@@ -43,7 +43,7 @@ MasterDaemon::MasterDaemon(SocketType socket, int nranks, int timeout) ...@@ -43,7 +43,7 @@ MasterDaemon::MasterDaemon(SocketType socket, int nranks, int timeout)
_background_thread = std::thread{&MasterDaemon::run, this}; _background_thread = std::thread{&MasterDaemon::run, this};
} }
MasterDaemon::~MasterDaemon() { MasterDaemon::~MasterDaemon() { // NOLINT
VLOG(4) << ("begin to destruct MasterDaemon"); VLOG(4) << ("begin to destruct MasterDaemon");
StopByControlFd(); StopByControlFd();
_background_thread.join(); _background_thread.join();
......
...@@ -21,7 +21,7 @@ SparseCooTensor::SparseCooTensor() { ...@@ -21,7 +21,7 @@ SparseCooTensor::SparseCooTensor() {
this->SetMember(non_zero_indices, non_zero_elements, {1}, true); this->SetMember(non_zero_indices, non_zero_elements, {1}, true);
} }
SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) { SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) { // NOLINT
this->non_zero_elements_ = other.non_zero_elements_; this->non_zero_elements_ = other.non_zero_elements_;
this->non_zero_indices_ = other.non_zero_indices_; this->non_zero_indices_ = other.non_zero_indices_;
this->coalesced_ = other.coalesced_; this->coalesced_ = other.coalesced_;
......
...@@ -48,7 +48,7 @@ StringTensor& StringTensor::operator=(const StringTensor& other) { ...@@ -48,7 +48,7 @@ StringTensor& StringTensor::operator=(const StringTensor& other) {
return *this; return *this;
} }
StringTensor& StringTensor::operator=(StringTensor&& other) { StringTensor& StringTensor::operator=(StringTensor&& other) { // NOLINT
meta_ = std::move(other.meta_); meta_ = std::move(other.meta_);
std::swap(holder_, other.holder_); std::swap(holder_, other.holder_);
return *this; return *this;
......
...@@ -183,7 +183,8 @@ DenseTensorMeta& DenseTensorMeta::operator=(const DenseTensorMeta& other) { ...@@ -183,7 +183,8 @@ DenseTensorMeta& DenseTensorMeta::operator=(const DenseTensorMeta& other) {
return *this; return *this;
} }
DenseTensorMeta& DenseTensorMeta::operator=(DenseTensorMeta&& other) { DenseTensorMeta& DenseTensorMeta::operator=( // NOLINT
DenseTensorMeta&& other) {
is_scalar = other.is_scalar; is_scalar = other.is_scalar;
use_gpudnn = other.use_gpudnn; use_gpudnn = other.use_gpudnn;
dims = std::move(other.dims); dims = std::move(other.dims);
......
...@@ -23,7 +23,7 @@ limitations under the License. */ ...@@ -23,7 +23,7 @@ limitations under the License. */
DECLARE_bool(enable_gpu_memory_usage_log); DECLARE_bool(enable_gpu_memory_usage_log);
#endif #endif
int main(int argc, char** argv) { int main(int argc, char** argv) { // NOLINT
paddle::memory::allocation::UseAllocatorStrategyGFlag(); paddle::memory::allocation::UseAllocatorStrategyGFlag();
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
std::vector<char*> new_argv; std::vector<char*> new_argv;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册