未验证 提交 0f5148fb 编写于 作者: G gouzil 提交者: GitHub

[clang-tidy] Open cppcoreguidelines-avoid-c-arrays Check (#56208)

上级 2c307457
...@@ -151,7 +151,7 @@ bugprone-unused-raii, ...@@ -151,7 +151,7 @@ bugprone-unused-raii,
-clang-analyzer-valist.Uninitialized, -clang-analyzer-valist.Uninitialized,
-clang-analyzer-valist.Unterminated, -clang-analyzer-valist.Unterminated,
-clang-analyzer-valist.ValistBase, -clang-analyzer-valist.ValistBase,
-cppcoreguidelines-avoid-c-arrays, cppcoreguidelines-avoid-c-arrays,
-cppcoreguidelines-avoid-goto, -cppcoreguidelines-avoid-goto,
-cppcoreguidelines-c-copy-assignment-signature, -cppcoreguidelines-c-copy-assignment-signature,
-cppcoreguidelines-explicit-virtual-functions, -cppcoreguidelines-explicit-virtual-functions,
......
...@@ -30,12 +30,12 @@ PHI_DECLARE_bool(use_mkldnn); ...@@ -30,12 +30,12 @@ PHI_DECLARE_bool(use_mkldnn);
namespace paddle { namespace paddle {
namespace operators { namespace operators {
const char ConditionalOp::kInputs[] = "Input"; // NOLINT const char ConditionalOp::kInputs[] = "Input"; // NOLINT
const char ConditionalOp::kOutputs[] = "Out"; // NOLINT const char ConditionalOp::kOutputs[] = "Out"; // NOLINT
const char ConditionalOp::kCondition[] = "Cond"; // NOLINT const char ConditionalOp::kCondition[] = "Cond"; // NOLINT
const char ConditionalOp::kScope[] = "Scope"; // NOLINT const char ConditionalOp::kScope[] = "Scope"; // NOLINT
const char ConditionalOp::kSkipEagerDeletionVars[] = const char ConditionalOp::kSkipEagerDeletionVars[] = // NOLINT
"skip_eager_deletion_vars"; // NOLINT "skip_eager_deletion_vars";
using Executor = framework::Executor; using Executor = framework::Executor;
using ExecutorPrepareContext = framework::ExecutorPrepareContext; using ExecutorPrepareContext = framework::ExecutorPrepareContext;
......
...@@ -41,14 +41,14 @@ const char RecurrentBase::kStates[] = "states"; // NOLINT ...@@ -41,14 +41,14 @@ const char RecurrentBase::kStates[] = "states"; // NOLINT
const char RecurrentBase::kStepBlock[] = "sub_block"; // NOLINT const char RecurrentBase::kStepBlock[] = "sub_block"; // NOLINT
const char RecurrentBase::kReverse[] = "reverse"; // NOLINT const char RecurrentBase::kReverse[] = "reverse"; // NOLINT
const char RecurrentBase::kIsTrain[] = "is_train"; // NOLINT const char RecurrentBase::kIsTrain[] = "is_train"; // NOLINT
const char RecurrentBase::kSkipEagerDeletionVars[] = const char RecurrentBase::kSkipEagerDeletionVars[] = // NOLINT
"skip_eager_deletion_vars"; // NOLINT "skip_eager_deletion_vars";
#define GRAD_SUFFIX "@GRAD" #define GRAD_SUFFIX "@GRAD"
const char RecurrentBase::kInputGrads[] = "inputs" GRAD_SUFFIX; // NOLINT const char RecurrentBase::kInputGrads[] = "inputs" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kOutputGrads[] = "outputs" GRAD_SUFFIX; // NOLINT const char RecurrentBase::kOutputGrads[] = "outputs" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kParamGrads[] = "parameters" GRAD_SUFFIX; // NOLINT const char RecurrentBase::kParamGrads[] = "parameters" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kInitStateGrads[] = const char RecurrentBase::kInitStateGrads[] = // NOLINT
"initial_states" GRAD_SUFFIX; // NOLINT "initial_states" GRAD_SUFFIX;
static void ClearStepScopes(const platform::DeviceContext &dev_ctx, static void ClearStepScopes(const platform::DeviceContext &dev_ctx,
framework::Scope *parent_scope, framework::Scope *parent_scope,
......
...@@ -701,7 +701,7 @@ void AutoInitStringTensorByStringTensor( ...@@ -701,7 +701,7 @@ void AutoInitStringTensorByStringTensor(
InitStringTensorWithStringTensor(py_tensor_ptr, src_tensor, place, act_name); InitStringTensorWithStringTensor(py_tensor_ptr, src_tensor, place, act_name);
} }
PyDoc_STRVAR( PyDoc_STRVAR( // NOLINT
TensorDoc, TensorDoc,
R"DOC(Tensor($self, /, value, place, persistable, zero_copy, name, stop_gradient, dims, dtype, type) R"DOC(Tensor($self, /, value, place, persistable, zero_copy, name, stop_gradient, dims, dtype, type)
-- --
......
...@@ -1273,7 +1273,7 @@ static PyObject* eager_api_set_master_grads(PyObject* self, ...@@ -1273,7 +1273,7 @@ static PyObject* eager_api_set_master_grads(PyObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyMethodDef variable_functions[] = { PyMethodDef variable_functions[] = { // NOLINT
// TODO(jiabin): Remove scale when we have final state tests // TODO(jiabin): Remove scale when we have final state tests
{"scale", {"scale",
(PyCFunction)(void (*)())eager_api_scale, (PyCFunction)(void (*)())eager_api_scale,
......
...@@ -1833,7 +1833,7 @@ static PyObject* tensor__eq__method(TensorObject* self, ...@@ -1833,7 +1833,7 @@ static PyObject* tensor__eq__method(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyMethodDef math_op_patch_methods[] = { PyMethodDef math_op_patch_methods[] = { // NOLINT
{"__add__", {"__add__",
(PyCFunction)(void (*)())tensor__add__method, (PyCFunction)(void (*)())tensor__add__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
......
...@@ -101,7 +101,8 @@ Py_ssize_t GetSliceIndexFromPyObject(PyObject* obj) { ...@@ -101,7 +101,8 @@ Py_ssize_t GetSliceIndexFromPyObject(PyObject* obj) {
} }
} }
PyDoc_STRVAR(tensor_method_numpy__doc__, R"DOC(numpy($self, /) PyDoc_STRVAR(tensor_method_numpy__doc__, // NOLINT
R"DOC(numpy($self, /)
-- --
Returns a numpy array shows the value of current Tensor. Returns a numpy array shows the value of current Tensor.
...@@ -128,8 +129,8 @@ static PyObject* tensor_method_numpy(TensorObject* self, ...@@ -128,8 +129,8 @@ static PyObject* tensor_method_numpy(TensorObject* self,
EAGER_TRY EAGER_TRY
auto& api = pybind11::detail::npy_api::get(); auto& api = pybind11::detail::npy_api::get();
if (!self->tensor.impl()) { if (!self->tensor.impl()) {
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; // NOLINT
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; // NOLINT
py_dims[0] = 0; py_dims[0] = 0;
py_strides[0] = 0; py_strides[0] = 0;
...@@ -148,8 +149,8 @@ static PyObject* tensor_method_numpy(TensorObject* self, ...@@ -148,8 +149,8 @@ static PyObject* tensor_method_numpy(TensorObject* self,
auto tensor_dims = self->tensor.shape(); auto tensor_dims = self->tensor.shape();
auto numpy_dtype = TensorDtype2NumpyDtype(self->tensor.type()); auto numpy_dtype = TensorDtype2NumpyDtype(self->tensor.type());
auto sizeof_dtype = phi::SizeOf(self->tensor.type()); auto sizeof_dtype = phi::SizeOf(self->tensor.type());
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; // NOLINT
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; // NOLINT
size_t py_rank = tensor_dims.size(); size_t py_rank = tensor_dims.size();
size_t numel = 1; size_t numel = 1;
if (py_rank == 0) { if (py_rank == 0) {
...@@ -419,8 +420,8 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self, ...@@ -419,8 +420,8 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self,
if (!self->tensor.impl() || !self->tensor.impl()->initialized()) { if (!self->tensor.impl() || !self->tensor.impl()->initialized()) {
VLOG(6) << "The StringTensor is uninitialized. Return the empty string " VLOG(6) << "The StringTensor is uninitialized. Return the empty string "
"numpy array."; "numpy array.";
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; // NOLINT
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; // NOLINT
py_dims[0] = 0; py_dims[0] = 0;
py_strides[0] = 0; py_strides[0] = 0;
...@@ -597,7 +598,8 @@ static PyObject* tensor_method_copy_(TensorObject* self, ...@@ -597,7 +598,8 @@ static PyObject* tensor_method_copy_(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method_clone__doc__, R"DOC(clone($self, /) PyDoc_STRVAR(tensor_method_clone__doc__, // NOLINT
R"DOC(clone($self, /)
-- --
Returns a new Tensor, which is clone of origin Tensor, and it remains in the current graph. Returns a new Tensor, which is clone of origin Tensor, and it remains in the current graph.
...@@ -672,7 +674,7 @@ static PyObject* tensor_retain_grads(TensorObject* self, ...@@ -672,7 +674,7 @@ static PyObject* tensor_retain_grads(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_clear_gradient__doc__, PyDoc_STRVAR(tensor_clear_gradient__doc__, // NOLINT
R"DOC(clear_gradient($self, set_to_zero=True, /) R"DOC(clear_gradient($self, set_to_zero=True, /)
-- --
...@@ -895,7 +897,8 @@ static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self, ...@@ -895,7 +897,8 @@ static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method_detach__doc__, R"DOC(detach($self, /) PyDoc_STRVAR(tensor_method_detach__doc__, // NOLINT
R"DOC(detach($self, /)
-- --
Returns a new Tensor, detached from the current graph. Returns a new Tensor, detached from the current graph.
...@@ -1291,8 +1294,8 @@ static PyObject* tensor__getitem_from_offset(TensorObject* self, ...@@ -1291,8 +1294,8 @@ static PyObject* tensor__getitem_from_offset(TensorObject* self,
if (tensor.dtype() == proto_type) { \ if (tensor.dtype() == proto_type) { \
auto numpy_dtype = TensorDtype2NumpyDtype(proto_type); \ auto numpy_dtype = TensorDtype2NumpyDtype(proto_type); \
T b = paddle::pybind::TensorGetElement<T>(tensor, offset); \ T b = paddle::pybind::TensorGetElement<T>(tensor, offset); \
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; \ Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; /* NOLINT */ \
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; \ Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; /* NOLINT */ \
auto& api = pybind11::detail::npy_api::get(); \ auto& api = pybind11::detail::npy_api::get(); \
PyObject* array = api.PyArray_NewFromDescr_( \ PyObject* array = api.PyArray_NewFromDescr_( \
api.PyArray_Type_, \ api.PyArray_Type_, \
...@@ -1638,7 +1641,7 @@ static PyObject* tensor_inplace_assign(TensorObject* self, ...@@ -1638,7 +1641,7 @@ static PyObject* tensor_inplace_assign(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method__register_reduce_hook__doc__, PyDoc_STRVAR(tensor_method__register_reduce_hook__doc__, // NOLINT
R"DOC(_register_backward_hook($self, hook, /) R"DOC(_register_backward_hook($self, hook, /)
-- --
...@@ -2027,7 +2030,8 @@ static PyObject* tensor__inplace_version(TensorObject* self, ...@@ -2027,7 +2030,8 @@ static PyObject* tensor__inplace_version(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method_element_size__doc__, R"DOC(element_size($self, /) PyDoc_STRVAR(tensor_method_element_size__doc__, // NOLINT
R"DOC(element_size($self, /)
-- --
Returns the size in bytes of an element in the Tensor. Returns the size in bytes of an element in the Tensor.
...@@ -2066,7 +2070,7 @@ static PyObject* tensor_method_element_size(TensorObject* self, ...@@ -2066,7 +2070,7 @@ static PyObject* tensor_method_element_size(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method__bump_inplace_version__doc__, PyDoc_STRVAR(tensor_method__bump_inplace_version__doc__, // NOLINT
R"DOC(_bump_inplace_version($self, /) R"DOC(_bump_inplace_version($self, /)
-- --
...@@ -2368,7 +2372,7 @@ static PyObject* tensor_method__is_string_tensor_hold_allocation( ...@@ -2368,7 +2372,7 @@ static PyObject* tensor_method__is_string_tensor_hold_allocation(
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyMethodDef variable_methods[] = { PyMethodDef variable_methods[] = { // NOLINT
{"numpy", {"numpy",
(PyCFunction)(void (*)())tensor_method_numpy, (PyCFunction)(void (*)())tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2630,7 +2634,7 @@ PyMethodDef variable_methods[] = { ...@@ -2630,7 +2634,7 @@ PyMethodDef variable_methods[] = {
{nullptr, nullptr, 0, nullptr}}; {nullptr, nullptr, 0, nullptr}};
// variable_methods for core.eager.StringTensor // variable_methods for core.eager.StringTensor
PyMethodDef string_tensor_variable_methods[] = { PyMethodDef string_tensor_variable_methods[] = { // NOLINT
{"numpy", {"numpy",
(PyCFunction)(void (*)())tensor_method_numpy_for_string_tensor, (PyCFunction)(void (*)())tensor_method_numpy_for_string_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
......
...@@ -74,7 +74,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { ...@@ -74,7 +74,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_is_leaf__doc__, PyDoc_STRVAR(tensor_is_leaf__doc__, // NOLINT
R"DOC(is_leaf R"DOC(is_leaf
Whether a Tensor is leaf Tensor. Whether a Tensor is leaf Tensor.
......
...@@ -40,7 +40,7 @@ static PyObject *divide(PyObject *self, PyObject *args, PyObject *kwargs) { ...@@ -40,7 +40,7 @@ static PyObject *divide(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_divide(self, args, kwargs); return static_api_divide(self, args, kwargs);
} }
static PyMethodDef OpsAPI[] = {{"add_n", static PyMethodDef OpsAPI[] = {{"add_n", // NOLINT
(PyCFunction)(void (*)(void))add_n, (PyCFunction)(void (*)(void))add_n,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
"C++ interface function for add_n."}, "C++ interface function for add_n."},
......
...@@ -152,12 +152,12 @@ bool MayIUse(const cpu_isa_t cpu_isa) { ...@@ -152,12 +152,12 @@ bool MayIUse(const cpu_isa_t cpu_isa) {
#if !defined(WITH_NV_JETSON) && !defined(PADDLE_WITH_ARM) && \ #if !defined(WITH_NV_JETSON) && !defined(PADDLE_WITH_ARM) && \
!defined(PADDLE_WITH_SW) && !defined(PADDLE_WITH_MIPS) && \ !defined(PADDLE_WITH_SW) && !defined(PADDLE_WITH_MIPS) && \
!defined(PADDLE_WITH_LOONGARCH) !defined(PADDLE_WITH_LOONGARCH)
int reg[4]; std::array<int, 4> reg;
cpuid(reg, 0); cpuid(reg.data(), 0);
int nIds = reg[0]; int nIds = reg[0];
if (nIds >= 0x00000001) { if (nIds >= 0x00000001) {
// EAX = 1 // EAX = 1
cpuid(reg, 0x00000001); cpuid(reg.data(), 0x00000001);
// AVX: ECX Bit 28 // AVX: ECX Bit 28
if (cpu_isa == avx) { if (cpu_isa == avx) {
int avx_mask = (1 << 28); int avx_mask = (1 << 28);
...@@ -166,7 +166,7 @@ bool MayIUse(const cpu_isa_t cpu_isa) { ...@@ -166,7 +166,7 @@ bool MayIUse(const cpu_isa_t cpu_isa) {
} }
if (nIds >= 0x00000007) { if (nIds >= 0x00000007) {
// EAX = 7 // EAX = 7
cpuid(reg, 0x00000007); cpuid(reg.data(), 0x00000007);
if (cpu_isa == avx2) { if (cpu_isa == avx2) {
// AVX2: EBX Bit 5 // AVX2: EBX Bit 5
int avx2_mask = (1 << 5); int avx2_mask = (1 << 5);
...@@ -184,7 +184,7 @@ bool MayIUse(const cpu_isa_t cpu_isa) { ...@@ -184,7 +184,7 @@ bool MayIUse(const cpu_isa_t cpu_isa) {
(reg[1] & avx512bw_mask) && (reg[1] & avx512vl_mask)); (reg[1] & avx512bw_mask) && (reg[1] & avx512vl_mask));
} }
// EAX = 7, ECX = 1 // EAX = 7, ECX = 1
cpuid(reg, 0x00010007); cpuid(reg.data(), 0x00010007);
if (cpu_isa == avx512_bf16) { if (cpu_isa == avx512_bf16) {
// AVX512BF16: EAX Bit 5 // AVX512BF16: EAX Bit 5
int avx512bf16_mask = (1 << 5); int avx512bf16_mask = (1 << 5);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/funcs/strided_reshape_utils.h" #include "paddle/phi/kernels/funcs/strided_reshape_utils.h"
#include <array>
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/reshape_kernel.h" #include "paddle/phi/kernels/reshape_kernel.h"
...@@ -24,10 +25,10 @@ bool ReshapeStride(const DDim& old_dims, ...@@ -24,10 +25,10 @@ bool ReshapeStride(const DDim& old_dims,
DDim& new_stride) { // NOLINT DDim& new_stride) { // NOLINT
int64_t numel = product(old_dims); int64_t numel = product(old_dims);
if (numel < 0) { if (numel < 0) {
int64_t tmp[2]; std::array<int64_t, 2> tmp;
tmp[0] = 1; tmp[0] = 1;
tmp[1] = new_dims.size(); tmp[1] = new_dims.size();
new_stride = DDim(tmp, 2); new_stride = DDim(tmp.data(), 2);
return true; return true;
} else if (numel == 0) { } else if (numel == 0) {
if (old_dims == new_dims) { if (old_dims == new_dims) {
......
...@@ -53,8 +53,11 @@ TEST(API, case_convert) { ...@@ -53,8 +53,11 @@ TEST(API, case_convert) {
cpu_strings_x_data[i] = strs[i]; cpu_strings_x_data[i] = strs[i];
} }
// 2. get expected results // 2. get expected results
std::string expected_results[] = { std::string expected_results[] = {// NOLINT
strs[0], strs[0], strs[1], strs[1]}; // NOLINT strs[0],
strs[0],
strs[1],
strs[1]};
std::transform( std::transform(
strs[0].begin(), strs[0].end(), expected_results[0].begin(), ::tolower); strs[0].begin(), strs[0].end(), expected_results[0].begin(), ::tolower);
std::transform( std::transform(
...@@ -103,8 +106,8 @@ TEST(API, case_convert_utf8) { ...@@ -103,8 +106,8 @@ TEST(API, case_convert_utf8) {
pstring* cpu_strings_x_data = pstring* cpu_strings_x_data =
dev_ctx->template Alloc<pstring>(cpu_strings_x.get()); dev_ctx->template Alloc<pstring>(cpu_strings_x.get());
std::string strs[] = {"óÓsscHloëË", std::string strs[] = {"óÓsscHloëË", // NOLINT
"óÓsscHloëËóÓsscHloëËóÓsscHloëË"}; // NOLINT "óÓsscHloëËóÓsscHloëËóÓsscHloëË"};
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
cpu_strings_x_data[i] = strs[i]; cpu_strings_x_data[i] = strs[i];
} }
......
...@@ -56,8 +56,11 @@ TEST(DEV_API, strings_cast_convert) { ...@@ -56,8 +56,11 @@ TEST(DEV_API, strings_cast_convert) {
dense_x_data[1] = long_str; dense_x_data[1] = long_str;
// 2. get expected results // 2. get expected results
std::string expected_results[] = { std::string expected_results[] = {// NOLINT
short_str, short_str, long_str, long_str}; // NOLINT short_str,
short_str,
long_str,
long_str};
std::transform(short_str.begin(), std::transform(short_str.begin(),
short_str.end(), short_str.end(),
expected_results[0].begin(), expected_results[0].begin(),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册