未验证 提交 0f5148fb 编写于 作者: G gouzil 提交者: GitHub

[clang-tidy] Open cppcoreguidelines-avoid-c-arrays Check (#56208)

上级 2c307457
......@@ -151,7 +151,7 @@ bugprone-unused-raii,
-clang-analyzer-valist.Uninitialized,
-clang-analyzer-valist.Unterminated,
-clang-analyzer-valist.ValistBase,
-cppcoreguidelines-avoid-c-arrays,
cppcoreguidelines-avoid-c-arrays,
-cppcoreguidelines-avoid-goto,
-cppcoreguidelines-c-copy-assignment-signature,
-cppcoreguidelines-explicit-virtual-functions,
......
......@@ -30,12 +30,12 @@ PHI_DECLARE_bool(use_mkldnn);
namespace paddle {
namespace operators {
const char ConditionalOp::kInputs[] = "Input"; // NOLINT
const char ConditionalOp::kOutputs[] = "Out"; // NOLINT
const char ConditionalOp::kCondition[] = "Cond"; // NOLINT
const char ConditionalOp::kScope[] = "Scope"; // NOLINT
const char ConditionalOp::kSkipEagerDeletionVars[] =
"skip_eager_deletion_vars"; // NOLINT
const char ConditionalOp::kInputs[] = "Input"; // NOLINT
const char ConditionalOp::kOutputs[] = "Out"; // NOLINT
const char ConditionalOp::kCondition[] = "Cond"; // NOLINT
const char ConditionalOp::kScope[] = "Scope"; // NOLINT
const char ConditionalOp::kSkipEagerDeletionVars[] = // NOLINT
"skip_eager_deletion_vars";
using Executor = framework::Executor;
using ExecutorPrepareContext = framework::ExecutorPrepareContext;
......
......@@ -41,14 +41,14 @@ const char RecurrentBase::kStates[] = "states"; // NOLINT
const char RecurrentBase::kStepBlock[] = "sub_block"; // NOLINT
const char RecurrentBase::kReverse[] = "reverse"; // NOLINT
const char RecurrentBase::kIsTrain[] = "is_train"; // NOLINT
const char RecurrentBase::kSkipEagerDeletionVars[] =
"skip_eager_deletion_vars"; // NOLINT
const char RecurrentBase::kSkipEagerDeletionVars[] = // NOLINT
"skip_eager_deletion_vars";
#define GRAD_SUFFIX "@GRAD"
const char RecurrentBase::kInputGrads[] = "inputs" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kOutputGrads[] = "outputs" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kParamGrads[] = "parameters" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kInitStateGrads[] =
"initial_states" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kInitStateGrads[] = // NOLINT
"initial_states" GRAD_SUFFIX;
static void ClearStepScopes(const platform::DeviceContext &dev_ctx,
framework::Scope *parent_scope,
......
......@@ -701,7 +701,7 @@ void AutoInitStringTensorByStringTensor(
InitStringTensorWithStringTensor(py_tensor_ptr, src_tensor, place, act_name);
}
PyDoc_STRVAR(
PyDoc_STRVAR( // NOLINT
TensorDoc,
R"DOC(Tensor($self, /, value, place, persistable, zero_copy, name, stop_gradient, dims, dtype, type)
--
......
......@@ -1273,7 +1273,7 @@ static PyObject* eager_api_set_master_grads(PyObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef variable_functions[] = {
PyMethodDef variable_functions[] = { // NOLINT
// TODO(jiabin): Remove scale when we have final state tests
{"scale",
(PyCFunction)(void (*)())eager_api_scale,
......
......@@ -1833,7 +1833,7 @@ static PyObject* tensor__eq__method(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef math_op_patch_methods[] = {
PyMethodDef math_op_patch_methods[] = { // NOLINT
{"__add__",
(PyCFunction)(void (*)())tensor__add__method,
METH_VARARGS | METH_KEYWORDS,
......
......@@ -101,7 +101,8 @@ Py_ssize_t GetSliceIndexFromPyObject(PyObject* obj) {
}
}
PyDoc_STRVAR(tensor_method_numpy__doc__, R"DOC(numpy($self, /)
PyDoc_STRVAR(tensor_method_numpy__doc__, // NOLINT
R"DOC(numpy($self, /)
--
Returns a numpy array shows the value of current Tensor.
......@@ -128,8 +129,8 @@ static PyObject* tensor_method_numpy(TensorObject* self,
EAGER_TRY
auto& api = pybind11::detail::npy_api::get();
if (!self->tensor.impl()) {
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank];
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank];
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; // NOLINT
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; // NOLINT
py_dims[0] = 0;
py_strides[0] = 0;
......@@ -148,8 +149,8 @@ static PyObject* tensor_method_numpy(TensorObject* self,
auto tensor_dims = self->tensor.shape();
auto numpy_dtype = TensorDtype2NumpyDtype(self->tensor.type());
auto sizeof_dtype = phi::SizeOf(self->tensor.type());
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank];
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank];
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; // NOLINT
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; // NOLINT
size_t py_rank = tensor_dims.size();
size_t numel = 1;
if (py_rank == 0) {
......@@ -419,8 +420,8 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self,
if (!self->tensor.impl() || !self->tensor.impl()->initialized()) {
VLOG(6) << "The StringTensor is uninitialized. Return the empty string "
"numpy array.";
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank];
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank];
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; // NOLINT
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; // NOLINT
py_dims[0] = 0;
py_strides[0] = 0;
......@@ -597,7 +598,8 @@ static PyObject* tensor_method_copy_(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyDoc_STRVAR(tensor_method_clone__doc__, R"DOC(clone($self, /)
PyDoc_STRVAR(tensor_method_clone__doc__, // NOLINT
R"DOC(clone($self, /)
--
Returns a new Tensor, which is clone of origin Tensor, and it remains in the current graph.
......@@ -672,7 +674,7 @@ static PyObject* tensor_retain_grads(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyDoc_STRVAR(tensor_clear_gradient__doc__,
PyDoc_STRVAR(tensor_clear_gradient__doc__, // NOLINT
R"DOC(clear_gradient($self, set_to_zero=True, /)
--
......@@ -895,7 +897,8 @@ static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyDoc_STRVAR(tensor_method_detach__doc__, R"DOC(detach($self, /)
PyDoc_STRVAR(tensor_method_detach__doc__, // NOLINT
R"DOC(detach($self, /)
--
Returns a new Tensor, detached from the current graph.
......@@ -1291,8 +1294,8 @@ static PyObject* tensor__getitem_from_offset(TensorObject* self,
if (tensor.dtype() == proto_type) { \
auto numpy_dtype = TensorDtype2NumpyDtype(proto_type); \
T b = paddle::pybind::TensorGetElement<T>(tensor, offset); \
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; \
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; \
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; /* NOLINT */ \
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; /* NOLINT */ \
auto& api = pybind11::detail::npy_api::get(); \
PyObject* array = api.PyArray_NewFromDescr_( \
api.PyArray_Type_, \
......@@ -1638,7 +1641,7 @@ static PyObject* tensor_inplace_assign(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyDoc_STRVAR(tensor_method__register_reduce_hook__doc__,
PyDoc_STRVAR(tensor_method__register_reduce_hook__doc__, // NOLINT
R"DOC(_register_backward_hook($self, hook, /)
--
......@@ -2027,7 +2030,8 @@ static PyObject* tensor__inplace_version(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyDoc_STRVAR(tensor_method_element_size__doc__, R"DOC(element_size($self, /)
PyDoc_STRVAR(tensor_method_element_size__doc__, // NOLINT
R"DOC(element_size($self, /)
--
Returns the size in bytes of an element in the Tensor.
......@@ -2066,7 +2070,7 @@ static PyObject* tensor_method_element_size(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyDoc_STRVAR(tensor_method__bump_inplace_version__doc__,
PyDoc_STRVAR(tensor_method__bump_inplace_version__doc__, // NOLINT
R"DOC(_bump_inplace_version($self, /)
--
......@@ -2368,7 +2372,7 @@ static PyObject* tensor_method__is_string_tensor_hold_allocation(
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef variable_methods[] = {
PyMethodDef variable_methods[] = { // NOLINT
{"numpy",
(PyCFunction)(void (*)())tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS,
......@@ -2630,7 +2634,7 @@ PyMethodDef variable_methods[] = {
{nullptr, nullptr, 0, nullptr}};
// variable_methods for core.eager.StringTensor
PyMethodDef string_tensor_variable_methods[] = {
PyMethodDef string_tensor_variable_methods[] = { // NOLINT
{"numpy",
(PyCFunction)(void (*)())tensor_method_numpy_for_string_tensor,
METH_VARARGS | METH_KEYWORDS,
......
......@@ -74,7 +74,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyDoc_STRVAR(tensor_is_leaf__doc__,
PyDoc_STRVAR(tensor_is_leaf__doc__, // NOLINT
R"DOC(is_leaf
Whether a Tensor is leaf Tensor.
......
......@@ -40,7 +40,7 @@ static PyObject *divide(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_divide(self, args, kwargs);
}
static PyMethodDef OpsAPI[] = {{"add_n",
static PyMethodDef OpsAPI[] = {{"add_n", // NOLINT
(PyCFunction)(void (*)(void))add_n,
METH_VARARGS | METH_KEYWORDS,
"C++ interface function for add_n."},
......
......@@ -152,12 +152,12 @@ bool MayIUse(const cpu_isa_t cpu_isa) {
#if !defined(WITH_NV_JETSON) && !defined(PADDLE_WITH_ARM) && \
!defined(PADDLE_WITH_SW) && !defined(PADDLE_WITH_MIPS) && \
!defined(PADDLE_WITH_LOONGARCH)
int reg[4];
cpuid(reg, 0);
std::array<int, 4> reg;
cpuid(reg.data(), 0);
int nIds = reg[0];
if (nIds >= 0x00000001) {
// EAX = 1
cpuid(reg, 0x00000001);
cpuid(reg.data(), 0x00000001);
// AVX: ECX Bit 28
if (cpu_isa == avx) {
int avx_mask = (1 << 28);
......@@ -166,7 +166,7 @@ bool MayIUse(const cpu_isa_t cpu_isa) {
}
if (nIds >= 0x00000007) {
// EAX = 7
cpuid(reg, 0x00000007);
cpuid(reg.data(), 0x00000007);
if (cpu_isa == avx2) {
// AVX2: EBX Bit 5
int avx2_mask = (1 << 5);
......@@ -184,7 +184,7 @@ bool MayIUse(const cpu_isa_t cpu_isa) {
(reg[1] & avx512bw_mask) && (reg[1] & avx512vl_mask));
}
// EAX = 7, ECX = 1
cpuid(reg, 0x00010007);
cpuid(reg.data(), 0x00010007);
if (cpu_isa == avx512_bf16) {
// AVX512BF16: EAX Bit 5
int avx512bf16_mask = (1 << 5);
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include "paddle/phi/kernels/funcs/strided_reshape_utils.h"
#include <array>
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/reshape_kernel.h"
......@@ -24,10 +25,10 @@ bool ReshapeStride(const DDim& old_dims,
DDim& new_stride) { // NOLINT
int64_t numel = product(old_dims);
if (numel < 0) {
int64_t tmp[2];
std::array<int64_t, 2> tmp;
tmp[0] = 1;
tmp[1] = new_dims.size();
new_stride = DDim(tmp, 2);
new_stride = DDim(tmp.data(), 2);
return true;
} else if (numel == 0) {
if (old_dims == new_dims) {
......
......@@ -53,8 +53,11 @@ TEST(API, case_convert) {
cpu_strings_x_data[i] = strs[i];
}
// 2. get expected results
std::string expected_results[] = {
strs[0], strs[0], strs[1], strs[1]}; // NOLINT
std::string expected_results[] = {// NOLINT
strs[0],
strs[0],
strs[1],
strs[1]};
std::transform(
strs[0].begin(), strs[0].end(), expected_results[0].begin(), ::tolower);
std::transform(
......@@ -103,8 +106,8 @@ TEST(API, case_convert_utf8) {
pstring* cpu_strings_x_data =
dev_ctx->template Alloc<pstring>(cpu_strings_x.get());
std::string strs[] = {"óÓsscHloëË",
"óÓsscHloëËóÓsscHloëËóÓsscHloëË"}; // NOLINT
std::string strs[] = {"óÓsscHloëË", // NOLINT
"óÓsscHloëËóÓsscHloëËóÓsscHloëË"};
for (int i = 0; i < 2; ++i) {
cpu_strings_x_data[i] = strs[i];
}
......
......@@ -56,8 +56,11 @@ TEST(DEV_API, strings_cast_convert) {
dense_x_data[1] = long_str;
// 2. get expected results
std::string expected_results[] = {
short_str, short_str, long_str, long_str}; // NOLINT
std::string expected_results[] = {// NOLINT
short_str,
short_str,
long_str,
long_str};
std::transform(short_str.begin(),
short_str.end(),
expected_results[0].begin(),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册