diff --git a/.clang-tidy b/.clang-tidy index 5a4baae37c4216c13a9f9d08b61b942b4e42a531..972128176449487301f6da54961e6a8511c72247 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -53,7 +53,7 @@ bugprone-use-after-move, -clang-analyzer-core.CallAndMessage, -clang-analyzer-core.DivideZero, -clang-analyzer-core.DynamicTypePropagation, --clang-analyzer-core.NonNullParamChecker, +clang-analyzer-core.NonNullParamChecker, -clang-analyzer-core.NonnilStringConstants, -clang-analyzer-core.NullDereference, -clang-analyzer-core.StackAddrEscapeBase, @@ -154,7 +154,7 @@ clang-analyzer-unix.Vfork, cppcoreguidelines-avoid-c-arrays, -cppcoreguidelines-avoid-goto, cppcoreguidelines-c-copy-assignment-signature, --cppcoreguidelines-explicit-virtual-functions, +cppcoreguidelines-explicit-virtual-functions, -cppcoreguidelines-init-variables, -cppcoreguidelines-narrowing-conversions, -cppcoreguidelines-no-malloc, diff --git a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc index 1493913f5b2a8ff5f1aeef396cc6c531edc87c55..d39cb285517f2cc398f646f581b8adc985f56752 100644 --- a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc @@ -386,8 +386,7 @@ void *Alloc(const platform::CUDAPinnedPlace &place, if (ptr == nullptr) { LOG(WARNING) << "cudaHostAlloc Cannot allocate " << size << " bytes in CUDAPinnedPlace"; - } - if (FLAGS_init_allocated_mem) { + } else if (FLAGS_init_allocated_mem) { memset(ptr, 0xEF, size); } return ptr; diff --git a/paddle/phi/kernels/cpu/rnn_functor.h b/paddle/phi/kernels/cpu/rnn_functor.h index d711998184d9325fc41f3328b16b61a7fef5b621..87b533799ba902e154a2fe3bfc5fe6afcbed2082 100644 --- a/paddle/phi/kernels/cpu/rnn_functor.h +++ b/paddle/phi/kernels/cpu/rnn_functor.h @@ -14,6 +14,7 @@ #pragma once +#include "paddle/fluid/platform/enforce.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/generator.h" #include "paddle/phi/core/tensor_utils.h" @@ -344,6 +345,12 @@ void RnnFunc(const Context& dev_ctx, auto last_h_unbind = Unbind(*last_h); std::vector init_c_unbind, last_c_unbind; if (is_lstm(cell_type)) { + PADDLE_ENFORCE_NOT_NULL( + init_c, + paddle::platform::errors::InvalidArgument("init_c contains no data.")); + PADDLE_ENFORCE_NOT_NULL( + last_c, + paddle::platform::errors::InvalidArgument("last_c contains no data.")); init_c_unbind = Unbind(*init_c); last_c_unbind = Unbind(*last_c); } diff --git a/paddle/phi/kernels/cpu/rnn_kernel.cc b/paddle/phi/kernels/cpu/rnn_kernel.cc index b2e7dd19fafd86ba1ef71f9096df05bb1068e879..a861f6696473ebd17953eff0a7f64138fc8dc684 100644 --- a/paddle/phi/kernels/cpu/rnn_kernel.cc +++ b/paddle/phi/kernels/cpu/rnn_kernel.cc @@ -706,7 +706,7 @@ struct SingleLayer : public Layer { DenseTensor* cell_value, DenseTensor* cell_act_value, const std::string& mode, - bool is_test) { + bool is_test) override { this->RunIter(dev_ctx, input, vec, @@ -745,7 +745,7 @@ struct BidirLayer : public Layer { DenseTensor* cell_value, DenseTensor* cell_act_value, const std::string& mode, - bool is_test) { + bool is_test) override { std::vector output_vec(2); DenseTensor forward_input_w, forward_cell_value, forward_cell_act_value; DenseTensor backward_input_w, backward_cell_value, backward_cell_act_value;