未验证 提交 5ada98b8 编写于 作者: G gouzil 提交者: GitHub

[clang-tidy] NO.6 enable `modernize-avoid-c-arrays` step: 2 (#55954)

上级 ae88111f
...@@ -166,7 +166,7 @@ bugprone-unused-raii, ...@@ -166,7 +166,7 @@ bugprone-unused-raii,
-misc-unused-alias-decls, -misc-unused-alias-decls,
-misc-unused-using-decls, -misc-unused-using-decls,
modernize-avoid-bind, modernize-avoid-bind,
-modernize-avoid-c-arrays, modernize-avoid-c-arrays,
-modernize-deprecated-headers, -modernize-deprecated-headers,
-modernize-deprecated-ios-base-aliases, -modernize-deprecated-ios-base-aliases,
modernize-loop-convert, modernize-loop-convert,
......
...@@ -150,6 +150,7 @@ proto::VarType::Type PromoteTypesIfComplexExists( ...@@ -150,6 +150,7 @@ proto::VarType::Type PromoteTypesIfComplexExists(
// Here is a complete rules table, but some rules are not used. // Here is a complete rules table, but some rules are not used.
// It is still written this way because array accessing is still // It is still written this way because array accessing is still
// more efficient than if-else // more efficient than if-else
// NOLINTBEGIN(*-avoid-c-arrays)
static constexpr proto::VarType::Type promote_types_table[4][4] = { static constexpr proto::VarType::Type promote_types_table[4][4] = {
/* f4 f8 c4 c8*/ /* f4 f8 c4 c8*/
/* f4 */ {f4, f8, c4, c8}, /* f4 */ {f4, f8, c4, c8},
...@@ -157,6 +158,7 @@ proto::VarType::Type PromoteTypesIfComplexExists( ...@@ -157,6 +158,7 @@ proto::VarType::Type PromoteTypesIfComplexExists(
/* c4 */ {c4, c8, c4, c8}, /* c4 */ {c4, c8, c4, c8},
/* c8 */ {c8, c8, c8, c8}, /* c8 */ {c8, c8, c8, c8},
}; };
// NOLINTEND(*-avoid-c-arrays)
return promote_types_table[type_an][type_bn]; return promote_types_table[type_an][type_bn];
} }
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include "paddle/phi/backends/device_memory_aligment.h" #include "paddle/phi/backends/device_memory_aligment.h"
#include "paddle/phi/core/flags.h" #include "paddle/phi/core/flags.h"
DEFINE_bool(skip_fused_all_reduce_check, false, ""); DEFINE_bool(skip_fused_all_reduce_check, false, ""); // NOLINT
PHI_DECLARE_bool(allreduce_record_one_event); PHI_DECLARE_bool(allreduce_record_one_event);
namespace paddle { namespace paddle {
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/framework/device_worker.h" #include "paddle/fluid/framework/device_worker.h"
#include <array>
#include <chrono> #include <chrono>
#include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/convert_utils.h"
namespace phi { namespace phi {
...@@ -90,7 +91,7 @@ void PrintLodTensorType<float>(phi::DenseTensor* tensor, ...@@ -90,7 +91,7 @@ void PrintLodTensorType<float>(phi::DenseTensor* tensor,
std::string& out_val, // NOLINT std::string& out_val, // NOLINT
char separator, char separator,
bool need_leading_separator) { bool need_leading_separator) {
char buf[MAX_FLOAT_BUFF_SIZE]; std::array<char, MAX_FLOAT_BUFF_SIZE> buf;
auto count = tensor->numel(); auto count = tensor->numel();
if (start < 0 || end > count) { if (start < 0 || end > count) {
VLOG(3) << "access violation"; VLOG(3) << "access violation";
...@@ -104,8 +105,8 @@ void PrintLodTensorType<float>(phi::DenseTensor* tensor, ...@@ -104,8 +105,8 @@ void PrintLodTensorType<float>(phi::DenseTensor* tensor,
tensor->data<float>()[i] < FLOAT_EPS) { tensor->data<float>()[i] < FLOAT_EPS) {
out_val += "0"; out_val += "0";
} else { } else {
sprintf(buf, "%.9f", tensor->data<float>()[i]); // NOLINT sprintf(buf.data(), "%.9f", tensor->data<float>()[i]); // NOLINT
out_val += buf; out_val += buf.data();
} }
} }
} }
......
...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <array>
#include <ctime> #include <ctime>
#include "paddle/fluid/framework/barrier.h" #include "paddle/fluid/framework/barrier.h"
...@@ -425,12 +426,14 @@ void HogwildWorker::PrintFetchVars() { ...@@ -425,12 +426,14 @@ void HogwildWorker::PrintFetchVars() {
if (thread_id_ == 0 && batch_num_ % batch_per_print == 0) { if (thread_id_ == 0 && batch_num_ % batch_per_print == 0) {
time_t curtime; time_t curtime;
time(&curtime); time(&curtime);
char mbstr[80]; std::array<char, 80> mbstr;
std::strftime( std::strftime(mbstr.data(),
mbstr, sizeof(mbstr), "%Y-%m-%d %H:%M:%S", std::localtime(&curtime)); sizeof(mbstr),
"%Y-%m-%d %H:%M:%S",
std::localtime(&curtime));
std::stringstream ss; std::stringstream ss;
ss << "time: [" << mbstr << "], "; ss << "time: [" << mbstr.data() << "], ";
ss << "batch: [" << batch_num_ << "], "; ss << "batch: [" << batch_num_ << "], ";
for (int i = 0; i < fetch_var_num; ++i) { for (int i = 0; i < fetch_var_num; ++i) {
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <array>
#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h #define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h
#include "paddle/fluid/framework/io/shell.h" #include "paddle/fluid/framework/io/shell.h"
...@@ -150,14 +151,14 @@ static int shell_popen_fork_internal(const char* real_cmd, ...@@ -150,14 +151,14 @@ static int shell_popen_fork_internal(const char* real_cmd,
} }
static int read_from_pipe(FILE* fp, std::string* output) { static int read_from_pipe(FILE* fp, std::string* output) {
char buf[4096]; std::array<char, 4096> buf;
while (1) { while (1) {
int n = fread(buf, 1, 4096, fp); int n = fread(buf.data(), 1, 4096, fp);
if (n <= 0) { if (n <= 0) {
break; break;
} }
output->append(buf, n); output->append(buf.data(), n);
} }
if (!feof(fp)) { if (!feof(fp)) {
...@@ -249,8 +250,8 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd, ...@@ -249,8 +250,8 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd,
} }
static int shell_p2open_fork_internal(const char* real_cmd, static int shell_p2open_fork_internal(const char* real_cmd,
int pipein_fds[2], int pipein_fds[2], // NOLINT
int pipeout_fds[2]) { int pipeout_fds[2]) { // NOLINT
#if defined(_WIN32) || defined(__APPLE__) || defined(PADDLE_ARM) #if defined(_WIN32) || defined(__APPLE__) || defined(PADDLE_ARM)
return 0; return 0;
#else #else
......
...@@ -25,10 +25,10 @@ namespace paddle { ...@@ -25,10 +25,10 @@ namespace paddle {
namespace framework { namespace framework {
namespace ir { namespace ir {
const char kSumGradOpName[] = "sum"; const char kSumGradOpName[] = "sum"; // NOLINT
// TODO(minqiyang): only support sgd at current time, please add // TODO(minqiyang): only support sgd at current time, please add
// other optimizers later. // other optimizers later.
const char kOptimizerType[] = "sgd"; const char kOptimizerType[] = "sgd"; // NOLINT
void LockFreeOptimizePass::ApplyImpl(ir::Graph* graph) const { void LockFreeOptimizePass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
......
...@@ -23,7 +23,7 @@ namespace paddle { ...@@ -23,7 +23,7 @@ namespace paddle {
namespace framework { namespace framework {
namespace ir { namespace ir {
static const char kNumRepeats[] = "num_repeats"; static const char kNumRepeats[] = "num_repeats"; // NOLINT
typedef std::unordered_map<std::string, std::vector<ir::Node*>> SSAVarList; typedef std::unordered_map<std::string, std::vector<ir::Node*>> SSAVarList;
ir::Node* SameNameVar(std::unordered_set<ir::Node*> all, ir::Node* target) { ir::Node* SameNameVar(std::unordered_set<ir::Node*> all, ir::Node* target) {
......
...@@ -50,7 +50,7 @@ namespace { ...@@ -50,7 +50,7 @@ namespace {
// all operators. NOTE that even we use a vector here, the operators is // all operators. NOTE that even we use a vector here, the operators is
// unordered. // unordered.
typedef std::vector<details::OpHandleBase *> GraphOps; typedef std::vector<details::OpHandleBase *> GraphOps;
const char kGraphOps[] = "ops"; const char kGraphOps[] = "ops"; // NOLINT
bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) { bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) {
return PADDLE_GET_CONST( return PADDLE_GET_CONST(
......
...@@ -23,7 +23,7 @@ namespace ir { ...@@ -23,7 +23,7 @@ namespace ir {
#if !defined(_WIN32) && (__cplusplus < 201703L) #if !defined(_WIN32) && (__cplusplus < 201703L)
constexpr char Node::kControlDepVarName[]; constexpr char Node::kControlDepVarName[];
#else #else
const char Node::kControlDepVarName[] = "__control_var"; const char Node::kControlDepVarName[] = "__control_var"; // NOLINT
#endif #endif
std::unique_ptr<Node> CreateNodeForTest(const std::string &name, std::unique_ptr<Node> CreateNodeForTest(const std::string &name,
......
...@@ -36,7 +36,7 @@ namespace paddle { ...@@ -36,7 +36,7 @@ namespace paddle {
namespace framework { namespace framework {
namespace ir { namespace ir {
static const char kParamScopeAttr[] = "__param_scope__"; static const char kParamScopeAttr[] = "__param_scope__"; // NOLINT
static const std::vector<std::string> support_subgraph_passes = { static const std::vector<std::string> support_subgraph_passes = {
"simplify_with_basic_ops_pass", "simplify_with_basic_ops_pass",
......
...@@ -22,7 +22,7 @@ namespace framework { ...@@ -22,7 +22,7 @@ namespace framework {
namespace ir { namespace ir {
void RuntimeContextCachePass::ApplyImpl(ir::Graph* graph) const { void RuntimeContextCachePass::ApplyImpl(ir::Graph* graph) const {
static constexpr char kNotAllowInferShapeCahce[] = static constexpr char kNotAllowInferShapeCahce[] = // NOLINT
"@NOT_ALLOW_INFERSHAPE_CACHE@"; "@NOT_ALLOW_INFERSHAPE_CACHE@";
VLOG(3) << "Applies Runtime Context Cache strategy."; VLOG(3) << "Applies Runtime Context Cache strategy.";
for (const Node* n : graph->Nodes()) { for (const Node* n : graph->Nodes()) {
......
...@@ -1227,7 +1227,7 @@ bool OpSupportGPU(const std::string& op_type) { ...@@ -1227,7 +1227,7 @@ bool OpSupportGPU(const std::string& op_type) {
} }
struct OperatorWithKernel::CacheImpl { struct OperatorWithKernel::CacheImpl {
static const char kNotAllowInferShapeCahce[]; static const char kNotAllowInferShapeCahce[]; // NOLINT
explicit CacheImpl(phi::KernelContext* kernel_ctx, explicit CacheImpl(phi::KernelContext* kernel_ctx,
RuntimeInferShapeContext* infer_shape_ctx, RuntimeInferShapeContext* infer_shape_ctx,
const std::vector<phi::DenseTensor*>& tensors, const std::vector<phi::DenseTensor*>& tensors,
...@@ -1273,8 +1273,9 @@ struct OperatorWithKernel::CacheImpl { ...@@ -1273,8 +1273,9 @@ struct OperatorWithKernel::CacheImpl {
bool not_allow_infer_shape_cache_; bool not_allow_infer_shape_cache_;
std::vector<phi::DDim> last_ddims_; std::vector<phi::DDim> last_ddims_;
}; };
const char OperatorWithKernel::CacheImpl::kNotAllowInferShapeCahce[] = const char // NOLINT
"@NOT_ALLOW_INFERSHAPE_CACHE@"; OperatorWithKernel::CacheImpl::kNotAllowInferShapeCahce[] =
"@NOT_ALLOW_INFERSHAPE_CACHE@";
static void CheckTensorNANOrInf(const std::string& op_type, static void CheckTensorNANOrInf(const std::string& op_type,
const std::string& name, const std::string& name,
......
...@@ -23,12 +23,12 @@ limitations under the License. */ ...@@ -23,12 +23,12 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
const char kFeedOpType[] = "feed"; const char kFeedOpType[] = "feed"; // NOLINT
const char kFetchOpType[] = "fetch"; const char kFetchOpType[] = "fetch"; // NOLINT
const char kRecurrent[] = "recurrent"; const char kRecurrent[] = "recurrent"; // NOLINT
const char kStates[] = "states"; const char kStates[] = "states"; // NOLINT
const char kExStates[] = "ex_states"; const char kExStates[] = "ex_states"; // NOLINT
bool HasDependentInputVar( bool HasDependentInputVar(
const proto::OpDesc& op_desc, const proto::OpDesc& op_desc,
......
...@@ -531,8 +531,8 @@ void TensorToStream(std::ostream& os, ...@@ -531,8 +531,8 @@ void TensorToStream(std::ostream& os,
#endif #endif
} else if (platform::is_custom_place(tensor.place())) { } else if (platform::is_custom_place(tensor.place())) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE #ifdef PADDLE_WITH_CUSTOM_DEVICE
constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB
std::unique_ptr<char[]> buf(new char[kBufSize]); std::unique_ptr<char[]> buf(new char[kBufSize]); // NOLINT
auto& custom_device_context = auto& custom_device_context =
static_cast<const platform::CustomDeviceContext&>(dev_ctx); static_cast<const platform::CustomDeviceContext&>(dev_ctx);
platform::CPUPlace cpu; platform::CPUPlace cpu;
...@@ -598,7 +598,7 @@ void TensorFromStream(std::istream& is, ...@@ -598,7 +598,7 @@ void TensorFromStream(std::istream& is,
// proto buffer // proto buffer
int32_t size; int32_t size;
is.read(reinterpret_cast<char*>(&size), sizeof(size)); is.read(reinterpret_cast<char*>(&size), sizeof(size));
std::unique_ptr<char[]> buf(new char[size]); std::unique_ptr<char[]> buf(new char[size]); // NOLINT
is.read(reinterpret_cast<char*>(buf.get()), size); is.read(reinterpret_cast<char*>(buf.get()), size);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
desc.ParseFromArray(buf.get(), size), desc.ParseFromArray(buf.get(), size),
...@@ -671,7 +671,7 @@ void TensorFromStream(std::istream& is, ...@@ -671,7 +671,7 @@ void TensorFromStream(std::istream& is,
0, 0,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"phi::DenseTensor desc size should >= 0")); "phi::DenseTensor desc size should >= 0"));
std::unique_ptr<char[]> buf(new char[size]); std::unique_ptr<char[]> buf(new char[size]); // NOLINT
is.read(reinterpret_cast<char*>(buf.get()), size); is.read(reinterpret_cast<char*>(buf.get()), size);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
desc.ParseFromArray(buf.get(), size), desc.ParseFromArray(buf.get(), size),
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <array>
#include <cmath> #include <cmath>
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
...@@ -28,8 +29,8 @@ TEST(TensorCopy, Tensor) { ...@@ -28,8 +29,8 @@ TEST(TensorCopy, Tensor) {
int* src_ptr = src_tensor.mutable_data<int>(phi::make_ddim({3, 3}), int* src_ptr = src_tensor.mutable_data<int>(phi::make_ddim({3, 3}),
platform::CPUPlace()); platform::CPUPlace());
int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::array<int, 9> arr = {1, 2, 3, 4, 5, 6, 7, 8, 9};
memcpy(src_ptr, arr, 9 * sizeof(int)); memcpy(src_ptr, arr.data(), 9 * sizeof(int));
src_tensor.set_layout(DataLayout::kAnyLayout); src_tensor.set_layout(DataLayout::kAnyLayout);
auto cpu_place = new platform::CPUPlace(); auto cpu_place = new platform::CPUPlace();
...@@ -467,7 +468,7 @@ TEST(TensorIsfinite, CPU) { ...@@ -467,7 +468,7 @@ TEST(TensorIsfinite, CPU) {
TEST(Tensor, FromAndToStream) { TEST(Tensor, FromAndToStream) {
phi::DenseTensor src_tensor; phi::DenseTensor src_tensor;
int array[6] = {1, 2, 3, 4, 5, 6}; std::array<int, 6> array = {1, 2, 3, 4, 5, 6};
src_tensor.Resize({2, 3}); src_tensor.Resize({2, 3});
int* src_ptr = src_tensor.mutable_data<int>(platform::CPUPlace()); int* src_ptr = src_tensor.mutable_data<int>(platform::CPUPlace());
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
DEFINE_bool( DEFINE_bool( // NOLINT
custom_model_save_cpu, custom_model_save_cpu,
false, false,
"Keep old mode for developers, the model is saved on cpu not device."); "Keep old mode for developers, the model is saved on cpu not device.");
......
...@@ -26,7 +26,7 @@ limitations under the License. */ ...@@ -26,7 +26,7 @@ limitations under the License. */
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/platform/profiler.h"
DEFINE_bool(profile, false, "Turn on profiler for fluid"); DEFINE_bool(profile, false, "Turn on profiler for fluid"); // NOLINT
namespace paddle { namespace paddle {
namespace { namespace {
......
...@@ -29,7 +29,9 @@ limitations under the License. */ ...@@ -29,7 +29,9 @@ limitations under the License. */
// phi // phi
#include "paddle/phi/kernels/declarations.h" #include "paddle/phi/kernels/declarations.h"
DEFINE_string(devices, "", "The devices to be used which is joined by comma."); DEFINE_string(devices, // NOLINT
"",
"The devices to be used which is joined by comma.");
DEFINE_int32(math_num_threads, DEFINE_int32(math_num_threads,
1, 1,
"Number of threads used to run math functions."); "Number of threads used to run math functions.");
......
...@@ -20,8 +20,10 @@ ...@@ -20,8 +20,10 @@
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
const char* PhiKernelOp::attributes_name[attributes_num] = { const char* PhiKernelOp::attributes_name[attributes_num] = { // NOLINT
"op_name", "kernel_name", "kernel_key"}; "op_name",
"kernel_name",
"kernel_key"};
void PhiKernelOp::Verify() { void PhiKernelOp::Verify() {
VLOG(4) << "Verifying inputs, outputs and attributes for: PhiKernelOp."; VLOG(4) << "Verifying inputs, outputs and attributes for: PhiKernelOp.";
......
...@@ -69,8 +69,8 @@ using InputHandlerFn = std::function<ir::OpResult(ir::IrContext*, ...@@ -69,8 +69,8 @@ using InputHandlerFn = std::function<ir::OpResult(ir::IrContext*,
ir::Program*)>; ir::Program*)>;
using AttributeHandlerFn = std::function<ir::Attribute( using AttributeHandlerFn = std::function<ir::Attribute(
ir::IrContext*, const OpDesc&, const OpAttributeInfo&)>; ir::IrContext*, const OpDesc&, const OpAttributeInfo&)>;
constexpr char kTargetDialectPrefix[] = "pd."; constexpr char kTargetDialectPrefix[] = "pd."; // NOLINT
constexpr char kEmptyVarName[] = "@EMPTY@"; constexpr char kEmptyVarName[] = "@EMPTY@"; // NOLINT
static const std::unordered_set<std::string> special_non_inplace_ops = {}; static const std::unordered_set<std::string> special_non_inplace_ops = {};
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <array>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/controlflow/while_op_helper.h" #include "paddle/fluid/operators/controlflow/while_op_helper.h"
#include "paddle/phi/kernels/funcs/tensor_formatter.h" #include "paddle/phi/kernels/funcs/tensor_formatter.h"
...@@ -34,9 +35,10 @@ class OpBase; ...@@ -34,9 +35,10 @@ class OpBase;
} // namespace imperative } // namespace imperative
} // namespace paddle } // namespace paddle
const char kCond[] = "Cond"; // const char kCond[] = "Cond";
const char kData[] = "Data"; std::array<const char, 5> kCond = {"Cond"};
const char kSummarize[] = "summarize"; std::array<const char, 5> kData = {"Data"};
std::array<const char, 10> kSummarize = {"summarize"};
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -52,7 +54,8 @@ class AssertOp : public framework::OperatorBase { ...@@ -52,7 +54,8 @@ class AssertOp : public framework::OperatorBase {
private: private:
void RunImpl(const framework::Scope &scope, void RunImpl(const framework::Scope &scope,
const platform::Place &dev_place) const override { const platform::Place &dev_place) const override {
const framework::Variable *cond_var_ptr = scope.FindVar(Input(kCond)); const framework::Variable *cond_var_ptr =
scope.FindVar(Input(kCond.data()));
PADDLE_ENFORCE_NOT_NULL(cond_var_ptr, PADDLE_ENFORCE_NOT_NULL(cond_var_ptr,
platform::errors::NotFound( platform::errors::NotFound(
"Input(Condition) of AssertOp is not found.")); "Input(Condition) of AssertOp is not found."));
...@@ -71,9 +74,9 @@ class AssertOp : public framework::OperatorBase { ...@@ -71,9 +74,9 @@ class AssertOp : public framework::OperatorBase {
} }
funcs::TensorFormatter formatter; funcs::TensorFormatter formatter;
formatter.SetSummarize(Attr<int64_t>(kSummarize)); formatter.SetSummarize(Attr<int64_t>(kSummarize.data()));
const std::vector<std::string> &x_names = Inputs(kData); const std::vector<std::string> &x_names = Inputs(kData.data());
for (const std::string &name : x_names) { for (const std::string &name : x_names) {
const framework::Variable *x_var_ptr = scope.FindVar(name); const framework::Variable *x_var_ptr = scope.FindVar(name);
const phi::DenseTensor &x_tensor = x_var_ptr->Get<phi::DenseTensor>(); const phi::DenseTensor &x_tensor = x_var_ptr->Get<phi::DenseTensor>();
...@@ -83,7 +86,7 @@ class AssertOp : public framework::OperatorBase { ...@@ -83,7 +86,7 @@ class AssertOp : public framework::OperatorBase {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"The condition variable '%s' of AssertOp must be " "The condition variable '%s' of AssertOp must be "
"true, but received false", "true, but received false",
Input(kCond))); Input(kCond.data())));
} }
}; };
...@@ -91,13 +94,13 @@ class AssertOpProtoMaker : public framework::OpProtoAndCheckerMaker { ...@@ -91,13 +94,13 @@ class AssertOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
AddInput( AddInput(
kCond, kCond.data(),
"The boolean scalar condition tensor which is asserted to be true."); "The boolean scalar condition tensor which is asserted to be true.");
AddInput(kData, AddInput(kData.data(),
"The tensors to print when the assert condition is not true.") "The tensors to print when the assert condition is not true.")
.AsDuplicable(); .AsDuplicable();
AddAttr<int64_t>( AddAttr<int64_t>(
kSummarize, kSummarize.data(),
"The number of entries of each tensor to print when the " "The number of entries of each tensor to print when the "
"assert condition is not true. -1 means print all entries. If " "assert condition is not true. -1 means print all entries. If "
"the number of entries of a tensor is less then " "the number of entries of a tensor is less then "
...@@ -111,7 +114,8 @@ class AssertOpProtoMaker : public framework::OpProtoAndCheckerMaker { ...@@ -111,7 +114,8 @@ class AssertOpProtoMaker : public framework::OpProtoAndCheckerMaker {
class AssertOpInferShape : public framework::InferShapeBase { class AssertOpInferShape : public framework::InferShapeBase {
public: public:
void operator()(framework::InferShapeContext *context) const override { void operator()(framework::InferShapeContext *context) const override {
OP_INOUT_CHECK(context->HasInputs(kCond), "Input", "Condition", "AssertOp"); OP_INOUT_CHECK(
context->HasInputs(kCond.data()), "Input", "Condition", "AssertOp");
} }
}; };
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/controlflow/conditional_block_op.h" #include "paddle/fluid/operators/controlflow/conditional_block_op.h"
#include <array>
#include "paddle/fluid/framework/new_executor/standalone_executor.h" #include "paddle/fluid/framework/new_executor/standalone_executor.h"
#include "paddle/fluid/operators/assign_op.h" #include "paddle/fluid/operators/assign_op.h"
...@@ -29,11 +30,12 @@ PHI_DECLARE_bool(use_mkldnn); ...@@ -29,11 +30,12 @@ PHI_DECLARE_bool(use_mkldnn);
namespace paddle { namespace paddle {
namespace operators { namespace operators {
const char ConditionalOp::kInputs[] = "Input"; const char ConditionalOp::kInputs[] = "Input"; // NOLINT
const char ConditionalOp::kOutputs[] = "Out"; const char ConditionalOp::kOutputs[] = "Out"; // NOLINT
const char ConditionalOp::kCondition[] = "Cond"; const char ConditionalOp::kCondition[] = "Cond"; // NOLINT
const char ConditionalOp::kScope[] = "Scope"; const char ConditionalOp::kScope[] = "Scope"; // NOLINT
const char ConditionalOp::kSkipEagerDeletionVars[] = "skip_eager_deletion_vars"; const char ConditionalOp::kSkipEagerDeletionVars[] =
"skip_eager_deletion_vars"; // NOLINT
using Executor = framework::Executor; using Executor = framework::Executor;
using ExecutorPrepareContext = framework::ExecutorPrepareContext; using ExecutorPrepareContext = framework::ExecutorPrepareContext;
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <array>
#include <memory> #include <memory>
#include <vector> #include <vector>
...@@ -41,7 +42,7 @@ bool GT(T a, T b) { ...@@ -41,7 +42,7 @@ bool GT(T a, T b) {
*check if (x, y) is in the boundary of roi *check if (x, y) is in the boundary of roi
*/ */
template <typename T> template <typename T>
bool in_quad(T x, T y, T roi_x[], T roi_y[]) { bool in_quad(T x, T y, T roi_x[], T roi_y[]) { // NOLINT
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
T xs = roi_x[i]; T xs = roi_x[i];
T ys = roi_y[i]; T ys = roi_y[i];
...@@ -107,9 +108,9 @@ bool in_quad(T x, T y, T roi_x[], T roi_y[]) { ...@@ -107,9 +108,9 @@ bool in_quad(T x, T y, T roi_x[], T roi_y[]) {
template <typename T> template <typename T>
void get_transform_matrix(const int transformed_width, void get_transform_matrix(const int transformed_width,
const int transformed_height, const int transformed_height,
T roi_x[], T roi_x[], // NOLINT
T roi_y[], T roi_y[], // NOLINT
T matrix[]) { T matrix[]) { // NOLINT
T x0 = roi_x[0]; T x0 = roi_x[0];
T x1 = roi_x[1]; T x1 = roi_x[1];
T x2 = roi_x[2]; T x2 = roi_x[2];
...@@ -170,7 +171,8 @@ void get_transform_matrix(const int transformed_width, ...@@ -170,7 +171,8 @@ void get_transform_matrix(const int transformed_width,
* *
*/ */
template <typename T> template <typename T>
void get_source_coords(T matrix[], int out_w, int out_h, T* in_w, T* in_h) { void get_source_coords(
T matrix[], int out_w, int out_h, T* in_w, T* in_h) { // NOLINT
T u = matrix[0] * out_w + matrix[1] * out_h + matrix[2]; T u = matrix[0] * out_w + matrix[1] * out_h + matrix[2];
T v = matrix[3] * out_w + matrix[4] * out_h + matrix[5]; T v = matrix[3] * out_w + matrix[4] * out_h + matrix[5];
T w = matrix[6] * out_w + matrix[7] * out_h + matrix[8]; T w = matrix[6] * out_w + matrix[7] * out_h + matrix[8];
...@@ -283,17 +285,20 @@ class CPUROIPerspectiveTransformOpKernel : public framework::OpKernel<T> { ...@@ -283,17 +285,20 @@ class CPUROIPerspectiveTransformOpKernel : public framework::OpKernel<T> {
for (int n = 0; n < rois_num; ++n) { for (int n = 0; n < rois_num; ++n) {
const T* n_rois = rois_data + n * 8; const T* n_rois = rois_data + n * 8;
T roi_x[4]; std::array<T, 4> roi_x;
T roi_y[4]; std::array<T, 4> roi_y;
for (int k = 0; k < 4; ++k) { for (int k = 0; k < 4; ++k) {
roi_x[k] = n_rois[2 * k] * spatial_scale; roi_x[k] = n_rois[2 * k] * spatial_scale;
roi_y[k] = n_rois[2 * k + 1] * spatial_scale; roi_y[k] = n_rois[2 * k + 1] * spatial_scale;
} }
int image_id = roi2image_data[n]; int image_id = roi2image_data[n];
// Get transform matrix // Get transform matrix
T matrix[9]; std::array<T, 9> matrix;
get_transform_matrix<T>( get_transform_matrix<T>(transformed_width,
transformed_width, transformed_height, roi_x, roi_y, matrix); transformed_height,
roi_x.data(),
roi_y.data(),
matrix.data());
for (int i = 0; i < 9; i++) { for (int i = 0; i < 9; i++) {
transform_matrix[n * 9 + i] = matrix[i]; transform_matrix[n * 9 + i] = matrix[i];
} }
...@@ -305,8 +310,8 @@ class CPUROIPerspectiveTransformOpKernel : public framework::OpKernel<T> { ...@@ -305,8 +310,8 @@ class CPUROIPerspectiveTransformOpKernel : public framework::OpKernel<T> {
c * transformed_height * transformed_width + c * transformed_height * transformed_width +
out_h * transformed_width + out_w; out_h * transformed_width + out_w;
T in_w, in_h; T in_w, in_h;
get_source_coords<T>(matrix, out_w, out_h, &in_w, &in_h); get_source_coords<T>(matrix.data(), out_w, out_h, &in_w, &in_h);
if (in_quad<T>(in_w, in_h, roi_x, roi_y)) { if (in_quad<T>(in_w, in_h, roi_x.data(), roi_y.data())) {
if (GT_E<T>(-0.5, in_w) || if (GT_E<T>(-0.5, in_w) ||
GT_E<T>(in_w, static_cast<T>(in_width - 0.5)) || GT_E<T>(in_w, static_cast<T>(in_width - 0.5)) ||
GT_E<T>(-0.5, in_h) || GT_E<T>(-0.5, in_h) ||
...@@ -431,17 +436,20 @@ class CPUROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> { ...@@ -431,17 +436,20 @@ class CPUROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> {
T gradient = 0.0; T gradient = 0.0;
for (size_t roi_idx = lod[n]; roi_idx < lod[n + 1]; ++roi_idx) { for (size_t roi_idx = lod[n]; roi_idx < lod[n + 1]; ++roi_idx) {
const T* rois = rois_data + roi_idx * 8; const T* rois = rois_data + roi_idx * 8;
T roi_x[4]; std::array<T, 4> roi_x;
T roi_y[4]; std::array<T, 4> roi_y;
for (int k = 0; k < 4; ++k) { for (int k = 0; k < 4; ++k) {
roi_x[k] = rois[2 * k] * spatial_scale; roi_x[k] = rois[2 * k] * spatial_scale;
roi_y[k] = rois[2 * k + 1] * spatial_scale; roi_y[k] = rois[2 * k + 1] * spatial_scale;
} }
// Get transform matrix // Get transform matrix
T matrix[9]; std::array<T, 9> matrix;
get_transform_matrix<T>( get_transform_matrix<T>(transformed_width,
transformed_width, transformed_height, roi_x, roi_y, matrix); transformed_height,
roi_x.data(),
roi_y.data(),
matrix.data());
const T* out_grad_ptr = out_grad_data + (roi_idx * channels + c) * const T* out_grad_ptr = out_grad_data + (roi_idx * channels + c) *
transformed_height * transformed_height *
transformed_width; transformed_width;
...@@ -449,8 +457,9 @@ class CPUROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> { ...@@ -449,8 +457,9 @@ class CPUROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> {
for (int out_w = 0; out_w < transformed_width; ++out_w) { for (int out_w = 0; out_w < transformed_width; ++out_w) {
T src_w; T src_w;
T src_h; T src_h;
get_source_coords<T>(matrix, out_w, out_h, &src_w, &src_h); get_source_coords<T>(
if (in_quad<T>(src_w, src_h, roi_x, roi_y)) { matrix.data(), out_w, out_h, &src_w, &src_h);
if (in_quad<T>(src_w, src_h, roi_x.data(), roi_y.data())) {
if (GT_E<T>(-0.5, src_w) || if (GT_E<T>(-0.5, src_w) ||
GT_E<T>(src_w, static_cast<T>(in_width - 0.5)) || GT_E<T>(src_w, static_cast<T>(in_width - 0.5)) ||
GT_E<T>(-0.5, src_h) || GT_E<T>(-0.5, src_h) ||
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <array>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
...@@ -406,8 +407,8 @@ template <typename T, typename DeviceContext> ...@@ -406,8 +407,8 @@ template <typename T, typename DeviceContext>
class Pad2dCPUKernel : public framework::OpKernel<T> { class Pad2dCPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
int pads[4]; std::array<int, 4> pads;
GetPaddings(pads, context); GetPaddings(pads.data(), context);
auto mode = context.Attr<std::string>("mode"); auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format"); auto data_format = context.Attr<std::string>("data_format");
T value = static_cast<T>(context.Attr<float>("pad_value")); T value = static_cast<T>(context.Attr<float>("pad_value"));
...@@ -524,8 +525,8 @@ template <typename T, typename DeviceContext> ...@@ -524,8 +525,8 @@ template <typename T, typename DeviceContext>
class Pad2dGradCPUKernel : public framework::OpKernel<T> { class Pad2dGradCPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
int pads[4]; std::array<int, 4> pads;
GetPaddings(pads, context); GetPaddings(pads.data(), context);
auto mode = context.Attr<std::string>("mode"); auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format"); auto data_format = context.Attr<std::string>("data_format");
auto* d_out = auto* d_out =
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <array>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/phi/kernels/funcs/tensor_formatter.h" #include "paddle/phi/kernels/funcs/tensor_formatter.h"
...@@ -37,9 +38,9 @@ using framework::GradVarName; ...@@ -37,9 +38,9 @@ using framework::GradVarName;
#define CLOG std::cout #define CLOG std::cout
const char kForward[] = "FORWARD"; std::array<const char, 8> kForward = {"FORWARD"};
const char kBackward[] = "BACKWARD"; std::array<const char, 9> kBackward = {"BACKWARD"};
const char kBoth[] = "BOTH"; std::array<const char, 5> kBoth = {"BOTH"};
// TODO(ChunweiYan) there should be some other printers for TensorArray // TODO(ChunweiYan) there should be some other printers for TensorArray
class PrintOp : public framework::OperatorBase { class PrintOp : public framework::OperatorBase {
...@@ -79,8 +80,8 @@ class PrintOp : public framework::OperatorBase { ...@@ -79,8 +80,8 @@ class PrintOp : public framework::OperatorBase {
std::string print_phase = Attr<std::string>("print_phase"); std::string print_phase = Attr<std::string>("print_phase");
bool is_forward = Attr<bool>("is_forward"); bool is_forward = Attr<bool>("is_forward");
if ((is_forward && print_phase == kBackward) || if ((is_forward && print_phase == kBackward.data()) ||
(!is_forward && print_phase == kForward)) { (!is_forward && print_phase == kForward.data())) {
return; return;
} }
...@@ -125,10 +126,10 @@ class PrintOpProtoAndCheckMaker : public framework::OpProtoAndCheckerMaker { ...@@ -125,10 +126,10 @@ class PrintOpProtoAndCheckMaker : public framework::OpProtoAndCheckerMaker {
"(string, default 'FORWARD') Which phase to display " "(string, default 'FORWARD') Which phase to display "
"including 'FORWARD' " "including 'FORWARD' "
"'BACKWARD' and 'BOTH'.") "'BACKWARD' and 'BOTH'.")
.SetDefault(std::string(kBoth)) .SetDefault(std::string(kBoth.data()))
.InEnum({std::string(kForward), .InEnum({std::string(kForward.data()),
std::string(kBackward), std::string(kBackward.data()),
std::string(kBoth)}); std::string(kBoth.data())});
AddAttr<bool>("is_forward", "Whether is forward or not").SetDefault(true); AddAttr<bool>("is_forward", "Whether is forward or not").SetDefault(true);
AddComment(R"DOC( AddComment(R"DOC(
Creates a print op that will print when a tensor is accessed. Creates a print op that will print when a tensor is accessed.
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "paddle/fluid/operators/py_func_op.h" #include "paddle/fluid/operators/py_func_op.h"
#include <array>
#include <memory> #include <memory>
#include <set> #include <set>
#include <string> #include <string>
...@@ -30,9 +31,9 @@ namespace py = ::pybind11; ...@@ -30,9 +31,9 @@ namespace py = ::pybind11;
static std::vector<py::object> g_py_callables; static std::vector<py::object> g_py_callables;
const char kForwardPythonCallableId[] = "forward_callable_id"; std::array<const char, 20> kForwardPythonCallableId = {"forward_callable_id"};
const char kBackwardPythonCallableId[] = "backward_callable_id"; std::array<const char, 21> kBackwardPythonCallableId = {"backward_callable_id"};
const char kPyFuncBackwardSkipVars[] = "backward_skip_vars"; std::array<const char, 19> kPyFuncBackwardSkipVars = {"backward_skip_vars"};
size_t AppendPythonCallableObjectAndReturnId(const py::object &py_obj) { size_t AppendPythonCallableObjectAndReturnId(const py::object &py_obj) {
g_py_callables.emplace_back(py_obj); g_py_callables.emplace_back(py_obj);
...@@ -144,11 +145,12 @@ class PyFuncOpVarTypeInference : public framework::StaticGraphVarTypeInference { ...@@ -144,11 +145,12 @@ class PyFuncOpVarTypeInference : public framework::StaticGraphVarTypeInference {
has_out)); has_out));
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
PADDLE_GET_CONST(int, ctx->GetAttr(kForwardPythonCallableId)), PADDLE_GET_CONST(int, ctx->GetAttr(kForwardPythonCallableId.data())),
0, 0,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Function id cannot be less than 0, but received value is %d.", "Function id cannot be less than 0, but received value is %d.",
PADDLE_GET_CONST(int, ctx->GetAttr(kForwardPythonCallableId)))); PADDLE_GET_CONST(int,
ctx->GetAttr(kForwardPythonCallableId.data()))));
if (!has_out) return; if (!has_out) return;
...@@ -200,13 +202,13 @@ class PyFuncOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -200,13 +202,13 @@ class PyFuncOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override { void Make() override {
AddInput("X", "Inputs of py_func op.").AsDuplicable(); AddInput("X", "Inputs of py_func op.").AsDuplicable();
AddOutput("Out", "Outputs of py_func op").AsDuplicable(); AddOutput("Out", "Outputs of py_func op").AsDuplicable();
AddAttr<int>(kForwardPythonCallableId, AddAttr<int>(kForwardPythonCallableId.data(),
"Index of registered forward Python function.") "Index of registered forward Python function.")
.SetDefault(0); .SetDefault(0);
AddAttr<int>(kBackwardPythonCallableId, AddAttr<int>(kBackwardPythonCallableId.data(),
"Index of registered backward Python function.") "Index of registered backward Python function.")
.SetDefault(-1); .SetDefault(-1);
AddAttr<std::vector<std::string>>(kPyFuncBackwardSkipVars, AddAttr<std::vector<std::string>>(kPyFuncBackwardSkipVars.data(),
"Unused forward in/out in backward op") "Unused forward in/out in backward op")
.SetDefault(std::vector<std::string>()); .SetDefault(std::vector<std::string>());
AddComment(R"DOC("PyFunc Op")DOC"); AddComment(R"DOC("PyFunc Op")DOC");
...@@ -241,7 +243,8 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase { ...@@ -241,7 +243,8 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase {
std::vector<std::unique_ptr<framework::OpDesc>> operator()() const override { std::vector<std::unique_ptr<framework::OpDesc>> operator()() const override {
auto &fwd_attrs = Attrs(); auto &fwd_attrs = Attrs();
// no backward op when backward_id is less than 0 // no backward op when backward_id is less than 0
if (PADDLE_GET_CONST(int, fwd_attrs.at(kBackwardPythonCallableId)) < 0) { if (PADDLE_GET_CONST(int, fwd_attrs.at(kBackwardPythonCallableId.data())) <
0) {
return {}; return {};
} }
...@@ -249,9 +252,9 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase { ...@@ -249,9 +252,9 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase {
grad_op->SetType("py_func"); grad_op->SetType("py_func");
framework::AttributeMap bwd_attrs; framework::AttributeMap bwd_attrs;
bwd_attrs[kForwardPythonCallableId] = bwd_attrs[kForwardPythonCallableId.data()] =
fwd_attrs.at(kBackwardPythonCallableId); fwd_attrs.at(kBackwardPythonCallableId.data());
bwd_attrs[kBackwardPythonCallableId] = -1; bwd_attrs[kBackwardPythonCallableId.data()] = -1;
grad_op->SetAttrMap(bwd_attrs); grad_op->SetAttrMap(bwd_attrs);
// All forward inputs // All forward inputs
...@@ -262,7 +265,7 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase { ...@@ -262,7 +265,7 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase {
// For memory reused, some inputs/output in forward part may be not needed // For memory reused, some inputs/output in forward part may be not needed
// in backward part. Skipping these vars helps to save memory // in backward part. Skipping these vars helps to save memory
auto &backward_skip_var_list = PADDLE_GET_CONST( auto &backward_skip_var_list = PADDLE_GET_CONST(
std::vector<std::string>, fwd_attrs.at(kPyFuncBackwardSkipVars)); std::vector<std::string>, fwd_attrs.at(kPyFuncBackwardSkipVars.data()));
std::unordered_set<std::string> backward_skip_var_set( std::unordered_set<std::string> backward_skip_var_set(
backward_skip_var_list.begin(), backward_skip_var_list.end()); backward_skip_var_list.begin(), backward_skip_var_list.end());
std::vector<std::string> bwd_ins; std::vector<std::string> bwd_ins;
...@@ -336,7 +339,8 @@ class PyFuncOp : public framework::OperatorBase { ...@@ -336,7 +339,8 @@ class PyFuncOp : public framework::OperatorBase {
outputs[i] = out_var ? out_var->GetMutable<phi::DenseTensor>() : nullptr; outputs[i] = out_var ? out_var->GetMutable<phi::DenseTensor>() : nullptr;
} }
auto callable_id = static_cast<size_t>(Attr<int>(kForwardPythonCallableId)); auto callable_id =
static_cast<size_t>(Attr<int>(kForwardPythonCallableId.data()));
auto *py_callable = GetPythonCallableObject(callable_id); auto *py_callable = GetPythonCallableObject(callable_id);
VLOG(10) << "Call Python function with id " << callable_id << ": " VLOG(10) << "Call Python function with id " << callable_id << ": "
<< PythonFuncDebugString(*py_callable); << PythonFuncDebugString(*py_callable);
......
...@@ -30,23 +30,25 @@ namespace operators { ...@@ -30,23 +30,25 @@ namespace operators {
using StepScopeVar = std::vector<framework::Scope *>; using StepScopeVar = std::vector<framework::Scope *>;
const char RecurrentBase::kInputs[] = "inputs"; const char RecurrentBase::kInputs[] = "inputs"; // NOLINT
const char RecurrentBase::kInitialStates[] = "initial_states"; const char RecurrentBase::kInitialStates[] = "initial_states"; // NOLINT
const char RecurrentBase::kParameters[] = "parameters"; const char RecurrentBase::kParameters[] = "parameters"; // NOLINT
const char RecurrentBase::kOutputs[] = "outputs"; const char RecurrentBase::kOutputs[] = "outputs"; // NOLINT
const char RecurrentBase::kStepScopes[] = "step_scopes"; const char RecurrentBase::kStepScopes[] = "step_scopes"; // NOLINT
const char RecurrentBase::kHasStates[] = "has_states"; const char RecurrentBase::kHasStates[] = "has_states"; // NOLINT
const char RecurrentBase::kExStates[] = "ex_states"; const char RecurrentBase::kExStates[] = "ex_states"; // NOLINT
const char RecurrentBase::kStates[] = "states"; const char RecurrentBase::kStates[] = "states"; // NOLINT
const char RecurrentBase::kStepBlock[] = "sub_block"; const char RecurrentBase::kStepBlock[] = "sub_block"; // NOLINT
const char RecurrentBase::kReverse[] = "reverse"; const char RecurrentBase::kReverse[] = "reverse"; // NOLINT
const char RecurrentBase::kIsTrain[] = "is_train"; const char RecurrentBase::kIsTrain[] = "is_train"; // NOLINT
const char RecurrentBase::kSkipEagerDeletionVars[] = "skip_eager_deletion_vars"; const char RecurrentBase::kSkipEagerDeletionVars[] =
"skip_eager_deletion_vars"; // NOLINT
#define GRAD_SUFFIX "@GRAD" #define GRAD_SUFFIX "@GRAD"
const char RecurrentBase::kInputGrads[] = "inputs" GRAD_SUFFIX; const char RecurrentBase::kInputGrads[] = "inputs" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kOutputGrads[] = "outputs" GRAD_SUFFIX; const char RecurrentBase::kOutputGrads[] = "outputs" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kParamGrads[] = "parameters" GRAD_SUFFIX; const char RecurrentBase::kParamGrads[] = "parameters" GRAD_SUFFIX; // NOLINT
const char RecurrentBase::kInitStateGrads[] = "initial_states" GRAD_SUFFIX; const char RecurrentBase::kInitStateGrads[] =
"initial_states" GRAD_SUFFIX; // NOLINT
static void ClearStepScopes(const platform::DeviceContext &dev_ctx, static void ClearStepScopes(const platform::DeviceContext &dev_ctx,
framework::Scope *parent_scope, framework::Scope *parent_scope,
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/collective_helper.h"
#include <utility> #include <utility>
#include <vector>
#include "paddle/fluid/memory/allocation/allocator_facade.h" #include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/fluid/platform/device/device_wrapper.h" #include "paddle/fluid/platform/device/device_wrapper.h"
...@@ -537,7 +538,8 @@ void XCCLCommContext::CreateXCCLCommMultiTrainer( ...@@ -537,7 +538,8 @@ void XCCLCommContext::CreateXCCLCommMultiTrainer(
VLOG(1) << "Begin CreateXCCLCommMultiTrainer. device number: " << kDevices VLOG(1) << "Begin CreateXCCLCommMultiTrainer. device number: " << kDevices
<< ", ntrainers: " << ntrainers << ", train_id: " << train_id << ", ntrainers: " << ntrainers << ", train_id: " << train_id
<< ", rind_id: " << ring_id; << ", rind_id: " << ring_id;
phi::ccl::CCLComm comms[kDevices]; std::vector<phi::ccl::CCLComm> comms;
comms.resize(kDevices);
{ {
for (int i = 0; i < kDevices; i++) { for (int i = 0; i < kDevices; i++) {
phi::DeviceManager::SetDevice(device_type_, i); phi::DeviceManager::SetDevice(device_type_, i);
...@@ -545,7 +547,7 @@ void XCCLCommContext::CreateXCCLCommMultiTrainer( ...@@ -545,7 +547,7 @@ void XCCLCommContext::CreateXCCLCommMultiTrainer(
kDevices * ntrainers, kDevices * ntrainers,
xccl_id, xccl_id,
train_id * kDevices + i, train_id * kDevices + i,
comms + i); comms.data() + i);
VLOG(1) << "CCLCommInitRank: " << i; VLOG(1) << "CCLCommInitRank: " << i;
} }
} }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <array>
#include <string> #include <string>
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
...@@ -120,8 +121,8 @@ void TestTensorUtils(const paddle::platform::Place& place) { ...@@ -120,8 +121,8 @@ void TestTensorUtils(const paddle::platform::Place& place) {
int* src_ptr = src_tensor.mutable_data<int>(phi::make_ddim({3, 3}), int* src_ptr = src_tensor.mutable_data<int>(phi::make_ddim({3, 3}),
paddle::platform::CPUPlace()); paddle::platform::CPUPlace());
int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::array<int, 9> arr = {1, 2, 3, 4, 5, 6, 7, 8, 9};
memcpy(src_ptr, arr, 9 * sizeof(int)); memcpy(src_ptr, arr.data(), 9 * sizeof(int));
// CPU Tensor to GPU Tensor // CPU Tensor to GPU Tensor
paddle::platform::CustomDeviceContext gpu_ctx(place); paddle::platform::CustomDeviceContext gpu_ctx(place);
......
...@@ -20,13 +20,15 @@ ...@@ -20,13 +20,15 @@
namespace paddle { namespace paddle {
namespace platform { namespace platform {
EventCreateFunction DeviceEvent::event_creator_[MaxDeviceTypes]; EventCreateFunction DeviceEvent::event_creator_[MaxDeviceTypes]; // NOLINT
EventRecordFunction DeviceEvent::event_recorder_[MaxDeviceTypes]; EventRecordFunction DeviceEvent::event_recorder_[MaxDeviceTypes]; // NOLINT
EventQueryFunction DeviceEvent::event_querier_[MaxDeviceTypes]; EventQueryFunction DeviceEvent::event_querier_[MaxDeviceTypes]; // NOLINT
EventFinishFunction DeviceEvent::event_finisher_[MaxDeviceTypes]; EventFinishFunction DeviceEvent::event_finisher_[MaxDeviceTypes]; // NOLINT
EventSetFinishedFunction DeviceEvent::event_finished_setter_[MaxDeviceTypes]; EventSetFinishedFunction // NOLINT
EventWaitFunction DeviceEvent::event_waiter_[MaxDeviceTypes][MaxDeviceTypes]; DeviceEvent::event_finished_setter_[MaxDeviceTypes];
EventResetFunction DeviceEvent::event_resetter_[MaxDeviceTypes]; EventWaitFunction DeviceEvent::event_waiter_[MaxDeviceTypes] // NOLINT
[MaxDeviceTypes];
EventResetFunction DeviceEvent::event_resetter_[MaxDeviceTypes]; // NOLINT
/* /*
* Generate flag used to create event on all sorts of equipment. * Generate flag used to create event on all sorts of equipment.
......
...@@ -11,6 +11,7 @@ limitations under the License. */ ...@@ -11,6 +11,7 @@ limitations under the License. */
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include <array>
#include <list> #include <list>
#include "gtest/gtest.h" #include "gtest/gtest.h"
...@@ -286,7 +287,7 @@ TEST(ENFORCE_NOT_NULL, FAIL) { ...@@ -286,7 +287,7 @@ TEST(ENFORCE_NOT_NULL, FAIL) {
} }
struct Dims { struct Dims {
size_t dims_[4]; std::array<size_t, 4> dims_;
bool operator==(const Dims& o) const { bool operator==(const Dims& o) const {
for (size_t i = 0; i < 4; ++i) { for (size_t i = 0; i < 4; ++i) {
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define __PADDLE_DEFINE_EXPORTED_FLAG( \ #define __PADDLE_DEFINE_EXPORTED_FLAG( \
__name, __is_writable, __cpp_type, __gflag_type, __default_value, __doc) \ __name, __is_writable, __cpp_type, __gflag_type, __default_value, __doc) \
DEFINE_##__gflag_type(__name, __default_value, __doc); \ DEFINE_##__gflag_type(__name, __default_value, __doc); /* NOLINT */ \
struct __PaddleRegisterFlag_##__name { \ struct __PaddleRegisterFlag_##__name { \
__PaddleRegisterFlag_##__name() { \ __PaddleRegisterFlag_##__name() { \
using FlagDeclaredType = \ using FlagDeclaredType = \
......
...@@ -23,6 +23,7 @@ limitations under the License. */ ...@@ -23,6 +23,7 @@ limitations under the License. */
#include <sys/socket.h> #include <sys/socket.h>
#include <algorithm> #include <algorithm>
#include <array>
#include <string> #include <string>
#include <thread> // NOLINT #include <thread> // NOLINT
...@@ -220,7 +221,7 @@ static int SocketAccept(int server_fd, const CommHead head) { ...@@ -220,7 +221,7 @@ static int SocketAccept(int server_fd, const CommHead head) {
struct sockaddr_in client_addr; struct sockaddr_in client_addr;
socklen_t addr_length = sizeof(client_addr); socklen_t addr_length = sizeof(client_addr);
char buffer[1024] = {0}; std::array<char, 1024> buffer{0};
int conn = -1; int conn = -1;
const char* phead = reinterpret_cast<const char*>(&head); const char* phead = reinterpret_cast<const char*>(&head);
...@@ -231,8 +232,8 @@ static int SocketAccept(int server_fd, const CommHead head) { ...@@ -231,8 +232,8 @@ static int SocketAccept(int server_fd, const CommHead head) {
"accept", "accept",
conn); conn);
int ret_val = SocketRecv(conn, buffer, sizeof(head)); int ret_val = SocketRecv(conn, buffer.data(), sizeof(head));
if (ret_val > 0 && memcmp(buffer, phead, sizeof(head)) == 0) { if (ret_val > 0 && memcmp(buffer.data(), phead, sizeof(head)) == 0) {
// send a message to the sender, indicating that the link is correct // send a message to the sender, indicating that the link is correct
CHECK_SYS_CALL(SocketSend(conn, phead, sizeof(head)), "send"); CHECK_SYS_CALL(SocketSend(conn, phead, sizeof(head)), "send");
break; // accept client break; // accept client
...@@ -289,7 +290,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) { ...@@ -289,7 +290,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
static_assert(sizeof(CommHead) <= 1024, static_assert(sizeof(CommHead) <= 1024,
"sizeof(CommHead) must <= buffer size"); "sizeof(CommHead) must <= buffer size");
char buffer[1024] = {0}; std::array<char, 1024> buffer{0};
const char* phead = reinterpret_cast<const char*>(&head); const char* phead = reinterpret_cast<const char*>(&head);
// TODO(wangxi) Set from env, default 900s=15min // TODO(wangxi) Set from env, default 900s=15min
...@@ -312,8 +313,8 @@ static int ConnectAddr(const std::string& ep, const CommHead head) { ...@@ -312,8 +313,8 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
} }
CHECK_SYS_CALL(SocketSend(sock, phead, sizeof(head)), "send"); CHECK_SYS_CALL(SocketSend(sock, phead, sizeof(head)), "send");
ret_val = SocketRecv(sock, buffer, sizeof(head)); ret_val = SocketRecv(sock, buffer.data(), sizeof(head));
if (ret_val > 0 && memcmp(buffer, phead, sizeof(head)) == 0) { if (ret_val > 0 && memcmp(buffer.data(), phead, sizeof(head)) == 0) {
// recv same message from receiver, indicating that the link is correct // recv same message from receiver, indicating that the link is correct
break; // accept client break; // accept client
} else { } else {
...@@ -333,71 +334,77 @@ static int ConnectAddr(const std::string& ep, const CommHead head) { ...@@ -333,71 +334,77 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
template <typename CommUniqueId> template <typename CommUniqueId>
static void RecvCommID(int conn, CommUniqueId* nccl_id) { static void RecvCommID(int conn, CommUniqueId* nccl_id) {
char buffer[MAX_COMMUNIQUEID_LEN] = {0}; std::array<char, MAX_COMMUNIQUEID_LEN> buffer{0};
static_assert(sizeof(CommUniqueId) <= MAX_COMMUNIQUEID_LEN, static_assert(sizeof(CommUniqueId) <= MAX_COMMUNIQUEID_LEN,
"nccl id bytes must <= buffer size"); "nccl id bytes must <= buffer size");
CHECK_SYS_CALL(SocketRecv(conn, buffer, sizeof(CommUniqueId)), CHECK_SYS_CALL(SocketRecv(conn, buffer.data(), sizeof(CommUniqueId)),
"recv comm unique id"); "recv comm unique id");
memcpy(nccl_id, buffer, sizeof(CommUniqueId)); memcpy(nccl_id, buffer.data(), sizeof(CommUniqueId));
} }
template <typename CommUniqueId> template <typename CommUniqueId>
static void SendCommID(int conn, CommUniqueId* nccl_id) { static void SendCommID(int conn, CommUniqueId* nccl_id) {
char buffer[MAX_COMMUNIQUEID_LEN] = {0}; std::array<char, MAX_COMMUNIQUEID_LEN> buffer{0};
memcpy(buffer, nccl_id, sizeof(CommUniqueId)); memcpy(buffer.data(), nccl_id, sizeof(CommUniqueId));
CHECK_SYS_CALL(SocketSend(conn, buffer, sizeof(CommUniqueId)), CHECK_SYS_CALL(SocketSend(conn, buffer.data(), sizeof(CommUniqueId)),
"send comm unique id"); "send comm unique id");
} }
#ifdef PADDLE_WITH_CUSTOM_DEVICE #ifdef PADDLE_WITH_CUSTOM_DEVICE
template <> template <>
void RecvCommID<phi::ccl::CCLRootId>(int conn, phi::ccl::CCLRootId* nccl_id) { void RecvCommID<phi::ccl::CCLRootId>(int conn, phi::ccl::CCLRootId* nccl_id) {
char buffer[MAX_COMMUNIQUEID_LEN] = {0}; std::array<char, MAX_COMMUNIQUEID_LEN> buffer{0};
CHECK_SYS_CALL(SocketRecv(conn, buffer, sizeof(size_t)), CHECK_SYS_CALL(SocketRecv(conn, buffer.data(), sizeof(size_t)),
"recv comm unique id size"); "recv comm unique id size");
size_t unique_id_size = *reinterpret_cast<size_t*>(buffer); size_t unique_id_size = *reinterpret_cast<size_t*>(buffer.data());
VLOG(6) << "RecvCommID size: " << unique_id_size; VLOG(6) << "RecvCommID size: " << unique_id_size;
nccl_id->resize(unique_id_size); nccl_id->resize(unique_id_size);
size_t n_repeat = unique_id_size / MAX_COMMUNIQUEID_LEN; size_t n_repeat = unique_id_size / MAX_COMMUNIQUEID_LEN;
size_t n_remain = unique_id_size % MAX_COMMUNIQUEID_LEN; size_t n_remain = unique_id_size % MAX_COMMUNIQUEID_LEN;
for (size_t i = 0; i < n_repeat; ++i) { for (size_t i = 0; i < n_repeat; ++i) {
CHECK_SYS_CALL(SocketRecv(conn, buffer, MAX_COMMUNIQUEID_LEN), CHECK_SYS_CALL(SocketRecv(conn, buffer.data(), MAX_COMMUNIQUEID_LEN),
"recv comm unique id"); "recv comm unique id");
memcpy(nccl_id->data() + i * MAX_COMMUNIQUEID_LEN, memcpy(nccl_id->data() + i * MAX_COMMUNIQUEID_LEN,
buffer, buffer.data(),
MAX_COMMUNIQUEID_LEN); MAX_COMMUNIQUEID_LEN);
} }
if (n_remain) { if (n_remain) {
CHECK_SYS_CALL(SocketRecv(conn, buffer, n_remain), "recv comm unique id"); CHECK_SYS_CALL(SocketRecv(conn, buffer.data(), n_remain),
memcpy(nccl_id->data() + n_repeat * MAX_COMMUNIQUEID_LEN, buffer, n_remain); "recv comm unique id");
memcpy(nccl_id->data() + n_repeat * MAX_COMMUNIQUEID_LEN,
buffer.data(),
n_remain);
} }
VLOG(6) << "RecvCommID done"; VLOG(6) << "RecvCommID done";
} }
template <> template <>
void SendCommID<phi::ccl::CCLRootId>(int conn, phi::ccl::CCLRootId* nccl_id) { void SendCommID<phi::ccl::CCLRootId>(int conn, phi::ccl::CCLRootId* nccl_id) {
char buffer[MAX_COMMUNIQUEID_LEN] = {0}; std::array<char, MAX_COMMUNIQUEID_LEN> buffer{0};
size_t unique_id_size = nccl_id->size(); size_t unique_id_size = nccl_id->size();
VLOG(6) << "SendCommID size: " << unique_id_size; VLOG(6) << "SendCommID size: " << unique_id_size;
memcpy(buffer, &unique_id_size, sizeof(size_t)); memcpy(buffer.data(), &unique_id_size, sizeof(size_t));
CHECK_SYS_CALL(SocketSend(conn, buffer, sizeof(size_t)), CHECK_SYS_CALL(SocketSend(conn, buffer.data(), sizeof(size_t)),
"send comm unique id size"); "send comm unique id size");
size_t n_repeat = unique_id_size / MAX_COMMUNIQUEID_LEN; size_t n_repeat = unique_id_size / MAX_COMMUNIQUEID_LEN;
size_t n_remain = unique_id_size % MAX_COMMUNIQUEID_LEN; size_t n_remain = unique_id_size % MAX_COMMUNIQUEID_LEN;
for (size_t i = 0; i < n_repeat; ++i) { for (size_t i = 0; i < n_repeat; ++i) {
memcpy(buffer, memcpy(buffer.data(),
nccl_id->data() + i * MAX_COMMUNIQUEID_LEN, nccl_id->data() + i * MAX_COMMUNIQUEID_LEN,
MAX_COMMUNIQUEID_LEN); MAX_COMMUNIQUEID_LEN);
CHECK_SYS_CALL(SocketSend(conn, buffer, MAX_COMMUNIQUEID_LEN), CHECK_SYS_CALL(SocketSend(conn, buffer.data(), MAX_COMMUNIQUEID_LEN),
"send comm unique id"); "send comm unique id");
} }
if (n_remain) { if (n_remain) {
memcpy(buffer, nccl_id->data() + n_repeat * MAX_COMMUNIQUEID_LEN, n_remain); memcpy(buffer.data(),
CHECK_SYS_CALL(SocketSend(conn, buffer, n_remain), "send comm unique id"); nccl_id->data() + n_repeat * MAX_COMMUNIQUEID_LEN,
n_remain);
CHECK_SYS_CALL(SocketSend(conn, buffer.data(), n_remain),
"send comm unique id");
} }
VLOG(6) << "SendCommID done"; VLOG(6) << "SendCommID done";
} }
......
...@@ -263,7 +263,7 @@ void InitDevices(const std::vector<int> devices) { ...@@ -263,7 +263,7 @@ void InitDevices(const std::vector<int> devices) {
#ifndef _WIN32 #ifndef _WIN32
// Description Quoted from // Description Quoted from
// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
const struct { const struct { // NOLINT
int signal_number; int signal_number;
const char *name; const char *name;
const char *error_string; const char *error_string;
......
...@@ -38,7 +38,7 @@ PADDLE_DEFINE_EXPORTED_bool(enable_rpc_profiler, ...@@ -38,7 +38,7 @@ PADDLE_DEFINE_EXPORTED_bool(enable_rpc_profiler,
false, false,
"Enable rpc profiler or not."); "Enable rpc profiler or not.");
DEFINE_bool(enable_record_memory, false, "enable memory recorder"); DEFINE_bool(enable_record_memory, false, "enable memory recorder"); // NOLINT
#if defined(_WIN32) && defined(PHI_SHARED) #if defined(_WIN32) && defined(PHI_SHARED)
phi::ProfilerState phi::ProfilerHelper::g_state = phi::ProfilerState::kDisabled; phi::ProfilerState phi::ProfilerHelper::g_state = phi::ProfilerState::kDisabled;
......
...@@ -145,13 +145,16 @@ float CalculateEstOccupancy(uint32_t DeviceId, ...@@ -145,13 +145,16 @@ float CalculateEstOccupancy(uint32_t DeviceId,
#endif // PADDLE_WITH_CUPTI #endif // PADDLE_WITH_CUPTI
const char* StringTracerMemEventType(TracerMemEventType type) { const char* StringTracerMemEventType(TracerMemEventType type) {
static const char* categary_name_[] = { static const char* categary_name_[] = {// NOLINT
"Allocate", "Free", "ReservedAllocate", "ReservedFree"}; "Allocate",
"Free",
"ReservedAllocate",
"ReservedFree"};
return categary_name_[static_cast<int>(type)]; return categary_name_[static_cast<int>(type)];
} }
const char* StringTracerEventType(TracerEventType type) { const char* StringTracerEventType(TracerEventType type) {
static const char* categary_name_[] = {"Operator", static const char* categary_name_[] = {"Operator", // NOLINT
"Dataloader", "Dataloader",
"ProfileStep", "ProfileStep",
"CudaRuntime", "CudaRuntime",
......
...@@ -777,7 +777,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -777,7 +777,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* kw_dist_attr = nullptr; PyObject* kw_dist_attr = nullptr;
// the keywords argument // the keywords argument
static char* kwlist[] = {const_cast<char*>("value"), static char* kwlist[] = {const_cast<char*>("value"), // NOLINT
const_cast<char*>("place"), const_cast<char*>("place"),
const_cast<char*>("persistable"), const_cast<char*>("persistable"),
const_cast<char*>("zero_copy"), const_cast<char*>("zero_copy"),
...@@ -1129,7 +1129,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -1129,7 +1129,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* kw_dims = nullptr; PyObject* kw_dims = nullptr;
// the keywords argument // the keywords argument
static char* kwlist[] = {const_cast<char*>("value"), static char* kwlist[] = {const_cast<char*>("value"), // NOLINT
const_cast<char*>("zero_copy"), const_cast<char*>("zero_copy"),
const_cast<char*>("name"), const_cast<char*>("name"),
const_cast<char*>("dims"), const_cast<char*>("dims"),
...@@ -1317,12 +1317,12 @@ static void TensorDealloc(TensorObject* self) { ...@@ -1317,12 +1317,12 @@ static void TensorDealloc(TensorObject* self) {
Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self)); Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self));
} }
extern struct PyGetSetDef variable_properties[]; extern struct PyGetSetDef variable_properties[]; // NOLINT
extern struct PyGetSetDef string_tensor_variable_properties[]; extern struct PyGetSetDef string_tensor_variable_properties[]; // NOLINT
extern PyMethodDef variable_methods[]; extern PyMethodDef variable_methods[]; // NOLINT
extern PyMethodDef math_op_patch_methods[]; extern PyMethodDef math_op_patch_methods[]; // NOLINT
extern PyMethodDef string_tensor_variable_methods[]; extern PyMethodDef string_tensor_variable_methods[]; // NOLINT
PyNumberMethods number_methods; PyNumberMethods number_methods;
PySequenceMethods sequence_methods; PySequenceMethods sequence_methods;
......
...@@ -455,7 +455,8 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self, ...@@ -455,7 +455,8 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self,
longest_pstring->data(), longest_pstring->size()); longest_pstring->data(), longest_pstring->size());
max_unicode_length = (max_unicode_length == 0) ? 1 : max_unicode_length; max_unicode_length = (max_unicode_length == 0) ? 1 : max_unicode_length;
VLOG(6) << "The max unicode length is " << max_unicode_length; VLOG(6) << "The max unicode length is " << max_unicode_length;
auto sp = std::make_unique<uint32_t[]>(max_unicode_length * numel); auto sp =
std::make_unique<uint32_t[]>(max_unicode_length * numel); // NOLINT
auto py_array_data = sp.get(); auto py_array_data = sp.get();
memset(py_array_data, 0, max_unicode_length * numel * sizeof(uint32_t)); memset(py_array_data, 0, max_unicode_length * numel * sizeof(uint32_t));
for (int64_t i = 0; i < numel; ++i) { for (int64_t i = 0; i < numel; ++i) {
......
...@@ -445,7 +445,7 @@ PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { ...@@ -445,7 +445,7 @@ PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
struct PyGetSetDef variable_properties[] = { struct PyGetSetDef variable_properties[] = { // NOLINT
{"data", {"data",
(getter)tensor_properties_get_data, (getter)tensor_properties_get_data,
(setter)tensor_properties_set_data, (setter)tensor_properties_set_data,
...@@ -510,7 +510,7 @@ struct PyGetSetDef variable_properties[] = { ...@@ -510,7 +510,7 @@ struct PyGetSetDef variable_properties[] = {
{nullptr, nullptr, nullptr, nullptr, nullptr}}; {nullptr, nullptr, nullptr, nullptr, nullptr}};
// variable_properties for core.eager.StringTensor // variable_properties for core.eager.StringTensor
struct PyGetSetDef string_tensor_variable_properties[] = { struct PyGetSetDef string_tensor_variable_properties[] = { // NOLINT
{"name", {"name",
(getter)tensor_properties_get_name, (getter)tensor_properties_get_name,
(setter)tensor_properties_set_name, (setter)tensor_properties_set_name,
......
...@@ -662,7 +662,7 @@ int tensor_properties_set_materialize_grads(PyLayerObject* self, ...@@ -662,7 +662,7 @@ int tensor_properties_set_materialize_grads(PyLayerObject* self,
EAGER_CATCH_AND_THROW_RETURN_NEG EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyMethodDef pylayer_methods[] = {{"name", PyMethodDef pylayer_methods[] = {{"name", // NOLINT
(PyCFunction)(void (*)())pylayer_method_name, (PyCFunction)(void (*)())pylayer_method_name,
METH_NOARGS, METH_NOARGS,
nullptr}, nullptr},
...@@ -672,7 +672,7 @@ PyMethodDef pylayer_methods[] = {{"name", ...@@ -672,7 +672,7 @@ PyMethodDef pylayer_methods[] = {{"name",
nullptr}, nullptr},
{nullptr, nullptr, 0, nullptr}}; {nullptr, nullptr, 0, nullptr}};
struct PyGetSetDef pylayer_properties[] { struct PyGetSetDef pylayer_properties[] { // NOLINT
{"container", {"container",
(getter)tensor_properties_get_container, (getter)tensor_properties_get_container,
(setter)tensor_properties_set_container, (setter)tensor_properties_set_container,
......
...@@ -356,7 +356,7 @@ class CustomDevice : public DeviceInterface { ...@@ -356,7 +356,7 @@ class CustomDevice : public DeviceInterface {
} }
} else { } else {
if (!pimpl_->memory_copy_p2p) { if (!pimpl_->memory_copy_p2p) {
std::unique_ptr<uint8_t[]> tmp(new uint8_t[size]); std::unique_ptr<uint8_t[]> tmp(new uint8_t[size]); // NOLINT
MemoryCopyD2H(src_dev_id, tmp.get(), src, size); MemoryCopyD2H(src_dev_id, tmp.get(), src, size);
MemoryCopyH2D(dst_dev_id, dst, tmp.get(), size); MemoryCopyH2D(dst_dev_id, dst, tmp.get(), size);
} else { } else {
...@@ -447,7 +447,7 @@ class CustomDevice : public DeviceInterface { ...@@ -447,7 +447,7 @@ class CustomDevice : public DeviceInterface {
PADDLE_ENFORCE_CUSTOM_DEVICE_SUCCESS( PADDLE_ENFORCE_CUSTOM_DEVICE_SUCCESS(
pimpl_->device_memory_set(device, ptr, value, size)); pimpl_->device_memory_set(device, ptr, value, size));
} else { } else {
std::unique_ptr<uint8_t[]> tmp(new uint8_t[size]); std::unique_ptr<uint8_t[]> tmp(new uint8_t[size]); // NOLINT
memset(tmp.get(), value, size); memset(tmp.get(), value, size);
MemoryCopyH2D(dev_id, ptr, tmp.get(), size); MemoryCopyH2D(dev_id, ptr, tmp.get(), size);
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/box_coder_kernel.h" #include "paddle/phi/kernels/box_coder_kernel.h"
#include <array>
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -116,8 +117,8 @@ void DecodeCenterSize(const DenseTensor *target_box, ...@@ -116,8 +117,8 @@ void DecodeCenterSize(const DenseTensor *target_box,
auto *target_box_data = target_box->data<T>(); auto *target_box_data = target_box->data<T>();
auto *prior_box_data = prior_box->data<T>(); auto *prior_box_data = prior_box->data<T>();
T var_data[4] = {1., 1., 1., 1.}; std::array<T, 4> var_data{1., 1., 1., 1.};
T *var_ptr = var_data; T *var_ptr = var_data.data();
size_t offset = i * col * len + j * len; size_t offset = i * col * len + j * len;
int prior_box_offset = axis == 0 ? j * len : i * len; int prior_box_offset = axis == 0 ? j * len : i * len;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/fill_diagonal_tensor_grad_kernel.h" #include "paddle/phi/kernels/fill_diagonal_tensor_grad_kernel.h"
#include <array>
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -38,10 +39,17 @@ void FillDiagonalTensorGradKernel(const Context& ctx, ...@@ -38,10 +39,17 @@ void FillDiagonalTensorGradKernel(const Context& ctx,
} }
} }
int64_t new_dims[2], strides[2]; std::array<int64_t, 2> new_dims;
std::array<int64_t, 2> strides;
std::vector<int64_t> matdim; std::vector<int64_t> matdim;
matdim.resize(matrows); matdim.resize(matrows);
CalMatDims(dx_dims, dim1, dim2, &offset, new_dims, strides, matdim.data()); CalMatDims(dx_dims,
dim1,
dim2,
&offset,
new_dims.data(),
strides.data(),
matdim.data());
auto size = x_grad->numel(); auto size = x_grad->numel();
phi::Copy(ctx, out_grad, ctx.GetPlace(), false, x_grad); phi::Copy(ctx, out_grad, ctx.GetPlace(), false, x_grad);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/fill_diagonal_tensor_kernel.h" #include "paddle/phi/kernels/fill_diagonal_tensor_kernel.h"
#include <array>
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
...@@ -87,10 +88,17 @@ void FillDiagonalTensorKernel(const Context &ctx, ...@@ -87,10 +88,17 @@ void FillDiagonalTensorKernel(const Context &ctx,
auto matdims = y.dims(); auto matdims = y.dims();
auto fill_dims = phi::flatten_to_2d(matdims, matdims.size() - 1); auto fill_dims = phi::flatten_to_2d(matdims, matdims.size() - 1);
int64_t new_dims[2], strides[2]; std::array<int64_t, 2> new_dims;
std::array<int64_t, 2> strides;
std::vector<int64_t> matdim; std::vector<int64_t> matdim;
matdim.resize(fill_dims[0]); matdim.resize(fill_dims[0]);
CalMatDims(out_dims, dim1, dim2, &offset, new_dims, strides, matdim.data()); CalMatDims(out_dims,
dim1,
dim2,
&offset,
new_dims.data(),
strides.data(),
matdim.data());
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
new_dims[0], new_dims[0],
fill_dims[0], fill_dims[0],
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/index_put_grad_kernel.h" #include "paddle/phi/kernels/index_put_grad_kernel.h"
#include <array>
#include <numeric> #include <numeric>
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cast_kernel.h" #include "paddle/phi/kernels/cast_kernel.h"
...@@ -78,7 +79,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx, ...@@ -78,7 +79,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx,
bool accumulate, bool accumulate,
DenseTensor* value_grad, DenseTensor* value_grad,
DenseTensor* x_grad) { DenseTensor* x_grad) {
const int64_t* pd_indices[7]; std::array<const int64_t*, 7> pd_indices;
for (size_t i = 0; i < indices.size(); ++i) { for (size_t i = 0; i < indices.size(); ++i) {
pd_indices[i] = indices[i]->data<int64_t>(); pd_indices[i] = indices[i]->data<int64_t>();
} }
...@@ -93,7 +94,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx, ...@@ -93,7 +94,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx,
auto x_grad_stride = phi::stride(x_grad_dims); auto x_grad_stride = phi::stride(x_grad_dims);
set_zero_kernel<T>( set_zero_kernel<T>(
numel, pd_indices, x_grad_stride, x_grad_dims, x_grad_data); numel, pd_indices.data(), x_grad_stride, x_grad_dims, x_grad_data);
} }
} }
...@@ -111,7 +112,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx, ...@@ -111,7 +112,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx,
index_put_grad_kernel<T>(numel, index_put_grad_kernel<T>(numel,
out_grad_data, out_grad_data,
pd_indices, pd_indices.data(),
out_grad_stride, out_grad_stride,
out_grad_dims, out_grad_dims,
tmp_value_grad_data); tmp_value_grad_data);
...@@ -131,7 +132,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx, ...@@ -131,7 +132,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx,
index_put_grad_kernel<T>(numel, index_put_grad_kernel<T>(numel,
out_grad_data, out_grad_data,
pd_indices, pd_indices.data(),
out_grad_stride, out_grad_stride,
out_grad_dims, out_grad_dims,
value_grad_data); value_grad_data);
...@@ -144,7 +145,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx, ...@@ -144,7 +145,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx,
index_put_grad_kernel<T>(numel, index_put_grad_kernel<T>(numel,
out_grad_data, out_grad_data,
pd_indices, pd_indices.data(),
out_grad_stride, out_grad_stride,
out_grad_dims, out_grad_dims,
tmp_value_grad_data); tmp_value_grad_data);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/index_put_kernel.h" #include "paddle/phi/kernels/index_put_kernel.h"
#include <array>
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cast_kernel.h" #include "paddle/phi/kernels/cast_kernel.h"
...@@ -75,7 +76,7 @@ void LaunchIndexPutKernel(const Context& dev_ctx, ...@@ -75,7 +76,7 @@ void LaunchIndexPutKernel(const Context& dev_ctx,
int64_t is_single_val_tensor = (value.numel() == 1) ? 0 : INT64_MAX; int64_t is_single_val_tensor = (value.numel() == 1) ? 0 : INT64_MAX;
const int64_t* pd_indices[7]; std::array<const int64_t*, 7> pd_indices;
for (size_t i = 0; i < indices.size(); ++i) { for (size_t i = 0; i < indices.size(); ++i) {
pd_indices[i] = indices[i]->data<int64_t>(); pd_indices[i] = indices[i]->data<int64_t>();
} }
...@@ -83,7 +84,7 @@ void LaunchIndexPutKernel(const Context& dev_ctx, ...@@ -83,7 +84,7 @@ void LaunchIndexPutKernel(const Context& dev_ctx,
index_put_kernel<T>(numel, index_put_kernel<T>(numel,
x_data, x_data,
val_data, val_data,
pd_indices, pd_indices.data(),
x_stride, x_stride,
x_dims, x_dims,
is_single_val_tensor, is_single_val_tensor,
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/interpolate_grad_kernel.h" #include "paddle/phi/kernels/interpolate_grad_kernel.h"
#include <array>
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/amp_type_traits.h"
...@@ -190,11 +191,11 @@ static void BicubicInterpolationGrad(const DenseTensor& output_grad, ...@@ -190,11 +191,11 @@ static void BicubicInterpolationGrad(const DenseTensor& output_grad,
int input_x = floorf(x_n); int input_x = floorf(x_n);
MT x_t = x_n - input_x; MT x_t = x_n - input_x;
MT x_coeffs[4]; std::array<MT, 4> x_coeffs;
MT y_coeffs[4]; std::array<MT, 4> y_coeffs;
funcs::get_cubic_upsample_coefficients<MT>(x_coeffs, x_t); funcs::get_cubic_upsample_coefficients<MT>(x_coeffs.data(), x_t);
funcs::get_cubic_upsample_coefficients<MT>(y_coeffs, y_t); funcs::get_cubic_upsample_coefficients<MT>(y_coeffs.data(), y_t);
for (int i = 0; i < n; i++) { // loop for batches for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels for (int j = 0; j < c; j++) { // loop for channels
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/interpolate_kernel.h" #include "paddle/phi/kernels/interpolate_kernel.h"
#include <array>
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/amp_type_traits.h"
...@@ -24,8 +25,8 @@ namespace phi { ...@@ -24,8 +25,8 @@ namespace phi {
template <typename T> template <typename T>
static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) {
T coeffs[4]; std::array<T, 4> coeffs;
funcs::get_cubic_upsample_coefficients<T>(coeffs, t); funcs::get_cubic_upsample_coefficients<T>(coeffs.data(), t);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
} }
...@@ -274,7 +275,7 @@ static void BicubicInterpolation(const DenseTensor& input, ...@@ -274,7 +275,7 @@ static void BicubicInterpolation(const DenseTensor& input,
for (int i = 0; i < n; i++) { // loop for batches for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels for (int j = 0; j < c; j++) { // loop for channels
MT coefficients[4]; std::array<MT, 4> coefficients;
// interp 4 times in x direction // interp 4 times in x direction
for (int ii = 0; ii < 4; ii++) { for (int ii = 0; ii < 4; ii++) {
int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1),
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/nms_kernel.h" #include "paddle/phi/kernels/nms_kernel.h"
#include <array>
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -32,17 +33,17 @@ static int64_t NMS(const T* boxes_data, ...@@ -32,17 +33,17 @@ static int64_t NMS(const T* boxes_data,
for (int64_t i = 0; i < num_boxes; ++i) { for (int64_t i = 0; i < num_boxes; ++i) {
if (masks[i / 64] & 1ULL << (i % 64)) continue; if (masks[i / 64] & 1ULL << (i % 64)) continue;
T box_1[4]; std::array<T, 4> box_1;
for (int k = 0; k < 4; ++k) { for (int k = 0; k < 4; ++k) {
box_1[k] = boxes_data[i * 4 + k]; box_1[k] = boxes_data[i * 4 + k];
} }
for (int64_t j = i + 1; j < num_boxes; ++j) { for (int64_t j = i + 1; j < num_boxes; ++j) {
if (masks[j / 64] & 1ULL << (j % 64)) continue; if (masks[j / 64] & 1ULL << (j % 64)) continue;
T box_2[4]; std::array<T, 4> box_2;
for (int k = 0; k < 4; ++k) { for (int k = 0; k < 4; ++k) {
box_2[k] = boxes_data[j * 4 + k]; box_2[k] = boxes_data[j * 4 + k];
} }
bool is_overlap = CalculateIoU<T>(box_1, box_2, threshold); bool is_overlap = CalculateIoU<T>(box_1.data(), box_2.data(), threshold);
if (is_overlap) { if (is_overlap) {
masks[j / 64] |= 1ULL << (j % 64); masks[j / 64] |= 1ULL << (j % 64);
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/yolo_box_kernel.h" #include "paddle/phi/kernels/yolo_box_kernel.h"
#include <array>
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -66,7 +67,7 @@ void YoloBoxKernel(const Context& dev_ctx, ...@@ -66,7 +67,7 @@ void YoloBoxKernel(const Context& dev_ctx,
memset(scores_data, 0, scores->numel() * sizeof(T)); memset(scores_data, 0, scores->numel() * sizeof(T));
T box[4]; std::array<T, 4> box;
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
int img_height = imgsize_data[2 * i]; int img_height = imgsize_data[2 * i];
int img_width = imgsize_data[2 * i + 1]; int img_width = imgsize_data[2 * i + 1];
...@@ -90,7 +91,7 @@ void YoloBoxKernel(const Context& dev_ctx, ...@@ -90,7 +91,7 @@ void YoloBoxKernel(const Context& dev_ctx,
int box_idx = funcs::GetEntryIndex( int box_idx = funcs::GetEntryIndex(
i, j, k * w + l, an_num, an_stride, stride, 0, iou_aware); i, j, k * w + l, an_num, an_stride, stride, 0, iou_aware);
funcs::GetYoloBox<T>(box, funcs::GetYoloBox<T>(box.data(),
input_data, input_data,
anchors_data, anchors_data,
l, l,
...@@ -107,8 +108,12 @@ void YoloBoxKernel(const Context& dev_ctx, ...@@ -107,8 +108,12 @@ void YoloBoxKernel(const Context& dev_ctx,
scale, scale,
bias); bias);
box_idx = (i * box_num + j * stride + k * w + l) * 4; box_idx = (i * box_num + j * stride + k * w + l) * 4;
funcs::CalcDetectionBox<T>( funcs::CalcDetectionBox<T>(boxes_data,
boxes_data, box, box_idx, img_height, img_width, clip_bbox); box.data(),
box_idx,
img_height,
img_width,
clip_bbox);
int label_idx = funcs::GetEntryIndex( int label_idx = funcs::GetEntryIndex(
i, j, k * w + l, an_num, an_stride, stride, 5, iou_aware); i, j, k * w + l, an_num, an_stride, stride, 5, iou_aware);
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
DEFINE_int32(burning, 10, "Burning times."); DEFINE_int32(burning, 10, "Burning times.");
DEFINE_int32(repeat, 3000, "Repeat times."); DEFINE_int32(repeat, 3000, "Repeat times.");
DEFINE_int32(max_size, 1000, "The Max size would be tested."); DEFINE_int32(max_size, 1000, "The Max size would be tested.");
DEFINE_string(filter, "", "The Benchmark name would be run."); DEFINE_string(filter, "", "The Benchmark name would be run."); // NOLINT
class BenchJITKernel { class BenchJITKernel {
public: public:
......
...@@ -41,9 +41,9 @@ const float ALIGN32_BEG exp_float_consts[] ALIGN32_END = { // NOLINT ...@@ -41,9 +41,9 @@ const float ALIGN32_BEG exp_float_consts[] ALIGN32_END = { // NOLINT
REPEAT_8TIMES(SIGMOID_THRESHOLD_MAX), REPEAT_8TIMES(SIGMOID_THRESHOLD_MAX),
REPEAT_8TIMES(SIGMOID_THRESHOLD_MIN)}; REPEAT_8TIMES(SIGMOID_THRESHOLD_MIN)};
const int ALIGN32_BEG exp_int_0x7f[] ALIGN32_END = { const int ALIGN32_BEG exp_int_0x7f[] ALIGN32_END = { // NOLINT
REPEAT_8TIMES(0x7f)}; // NOLINT REPEAT_8TIMES(0x7f)}; // NOLINT
int ALIGN32_BEG g_tmp_mem[16] ALIGN32_END = {0}; // NOLINT int ALIGN32_BEG g_tmp_mem[16] ALIGN32_END = {0}; // NOLINT
void VActJitCode::genCode() { void VActJitCode::genCode() {
int offset = 0; int offset = 0;
......
...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <array>
#include <iostream> #include <iostream>
#include <random> #include <random>
...@@ -1171,7 +1172,9 @@ TEST(JITKernel_helper, GetAllCandidateFuncs) { ...@@ -1171,7 +1172,9 @@ TEST(JITKernel_helper, GetAllCandidateFuncs) {
TEST(JITKernel_helper, pack_weights) { TEST(JITKernel_helper, pack_weights) {
const int N = 8 * 60, K = 2; const int N = 8 * 60, K = 2;
float src[K][N], yref[K][N], y[K * N]; std::array<std::array<float, N>, K> src;
std::array<std::array<float, N>, K> yref;
std::array<float, N * K> y;
float* x = &(src[0][0]); float* x = &(src[0][0]);
float* ref = &(yref[0][0]); float* ref = &(yref[0][0]);
for (int i = 0; i < N * K; ++i) { for (int i = 0; i < N * K; ++i) {
...@@ -1200,8 +1203,8 @@ TEST(JITKernel_helper, pack_weights) { ...@@ -1200,8 +1203,8 @@ TEST(JITKernel_helper, pack_weights) {
acc += g; acc += g;
} }
jit::pack_weights<float>(x, y, N, K); jit::pack_weights<float>(x, y.data(), N, K);
ExpectEQ<float>(y, ref, N * K); ExpectEQ<float>(y.data(), ref, N * K);
} }
TEST(JITKernel_helper, attr) { TEST(JITKernel_helper, attr) {
......
...@@ -22,8 +22,8 @@ limitations under the License. */ ...@@ -22,8 +22,8 @@ limitations under the License. */
namespace phi { namespace phi {
namespace strings { namespace strings {
static const void* utils_map[4] = {nullptr}; static const void* utils_map[4] = {nullptr}; // NOLINT
static uint16_t CHARCASES_MAP[65536] = {0}; static uint16_t CHARCASES_MAP[65536] = {0}; // NOLINT
const uint8_t* GetUniFlagMap() { const uint8_t* GetUniFlagMap() {
if (utils_map[1] == nullptr) { if (utils_map[1] == nullptr) {
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "paddle/utils/array_ref.h" #include "paddle/utils/array_ref.h"
#include <array>
#include <cstdlib> #include <cstdlib>
#include <ctime> #include <ctime>
...@@ -35,15 +36,15 @@ TEST(array_ref, array_ref) { ...@@ -35,15 +36,15 @@ TEST(array_ref, array_ref) {
CHECK_EQ(c.data(), &v); CHECK_EQ(c.data(), &v);
CHECK_EQ(c.equals(paddle::make_array_ref(v)), true); CHECK_EQ(c.equals(paddle::make_array_ref(v)), true);
int v1[5] = {1, 2, 3, 4, 5}; std::array<int, 5> v1 = {1, 2, 3, 4, 5};
paddle::array_ref<int> d(v1, 5); paddle::array_ref<int> d(v1.data(), 5);
CHECK_EQ(d.size(), size_t(5)); CHECK_EQ(d.size(), size_t(5));
CHECK_EQ(d.data(), v1); CHECK_EQ(d.data(), v1.data());
CHECK_EQ(d.equals(paddle::make_array_ref(v1, 5)), true); CHECK_EQ(d.equals(paddle::make_array_ref(v1.data(), 5)), true);
paddle::array_ref<int> e(&v1[0], &v1[4]); paddle::array_ref<int> e(&v1[0], &v1[4]);
CHECK_EQ(e.size(), size_t(4)); CHECK_EQ(e.size(), size_t(4));
CHECK_EQ(e.data(), v1); CHECK_EQ(e.data(), v1.data());
CHECK_EQ(e.equals(paddle::make_array_ref(&v1[0], &v1[4])), true); CHECK_EQ(e.equals(paddle::make_array_ref(&v1[0], &v1[4])), true);
paddle::small_vector<int, 3> small_vector{1, 2, 3}; paddle::small_vector<int, 3> small_vector{1, 2, 3};
......
...@@ -69,7 +69,7 @@ TEST(pointer_length_ctor, span) { ...@@ -69,7 +69,7 @@ TEST(pointer_length_ctor, span) {
// dynamic size // dynamic size
{ {
int arr[] = {1, 2, 3}; int arr[] = {1, 2, 3}; // NOLINT
span<int> s(arr, 3); span<int> s(arr, 3);
CHECK_EQ(s.size(), 3UL); CHECK_EQ(s.size(), 3UL);
...@@ -80,7 +80,7 @@ TEST(pointer_length_ctor, span) { ...@@ -80,7 +80,7 @@ TEST(pointer_length_ctor, span) {
// fixed size // fixed size
{ {
int arr[] = {1, 2, 3}; int arr[] = {1, 2, 3}; // NOLINT
span<int, 3> s(arr, 3); span<int, 3> s(arr, 3);
CHECK_EQ(s.size(), 3UL); CHECK_EQ(s.size(), 3UL);
...@@ -100,7 +100,7 @@ TEST(pointer_pointer_ctor, span) { ...@@ -100,7 +100,7 @@ TEST(pointer_pointer_ctor, span) {
// dynamic size // dynamic size
{ {
int arr[] = {1, 2, 3}; int arr[] = {1, 2, 3}; // NOLINT
span<int> s{arr, arr + 3}; span<int> s{arr, arr + 3};
CHECK_EQ(s.size(), 3UL); CHECK_EQ(s.size(), 3UL);
...@@ -111,7 +111,7 @@ TEST(pointer_pointer_ctor, span) { ...@@ -111,7 +111,7 @@ TEST(pointer_pointer_ctor, span) {
// fixed size // fixed size
{ {
int arr[] = {1, 2, 3}; int arr[] = {1, 2, 3}; // NOLINT
span<int, 3> s{arr, arr + 3}; span<int, 3> s{arr, arr + 3};
CHECK_EQ(s.size(), 3UL); CHECK_EQ(s.size(), 3UL);
...@@ -122,8 +122,8 @@ TEST(pointer_pointer_ctor, span) { ...@@ -122,8 +122,8 @@ TEST(pointer_pointer_ctor, span) {
} }
TEST(c_array_ctor, span) { TEST(c_array_ctor, span) {
using int_array_t = int[3]; using int_array_t = int[3]; // NOLINT
using float_array_t = float[3]; using float_array_t = float[3]; // NOLINT
static_assert(std::is_nothrow_constructible<span<int>, int_array_t&>::value, static_assert(std::is_nothrow_constructible<span<int>, int_array_t&>::value,
""); "");
...@@ -171,7 +171,7 @@ TEST(c_array_ctor, span) { ...@@ -171,7 +171,7 @@ TEST(c_array_ctor, span) {
// non-const, dynamic size // non-const, dynamic size
{ {
int arr[] = {1, 2, 3}; int arr[] = {1, 2, 3}; // NOLINT
span<int> s{arr}; span<int> s{arr};
CHECK_EQ(s.size(), 3UL); CHECK_EQ(s.size(), 3UL);
CHECK_EQ(s.data(), arr); CHECK_EQ(s.data(), arr);
...@@ -181,7 +181,7 @@ TEST(c_array_ctor, span) { ...@@ -181,7 +181,7 @@ TEST(c_array_ctor, span) {
// const, dynamic size // const, dynamic size
{ {
int arr[] = {1, 2, 3}; int arr[] = {1, 2, 3}; // NOLINT
span<int const> s{arr}; span<int const> s{arr};
CHECK_EQ(s.size(), 3UL); CHECK_EQ(s.size(), 3UL);
CHECK_EQ(s.data(), arr); CHECK_EQ(s.data(), arr);
...@@ -191,7 +191,7 @@ TEST(c_array_ctor, span) { ...@@ -191,7 +191,7 @@ TEST(c_array_ctor, span) {
// non-const, static size // non-const, static size
{ {
int arr[] = {1, 2, 3}; int arr[] = {1, 2, 3}; // NOLINT
span<int, 3> s{arr}; span<int, 3> s{arr};
CHECK_EQ(s.size(), 3UL); CHECK_EQ(s.size(), 3UL);
CHECK_EQ(s.data(), arr); CHECK_EQ(s.data(), arr);
...@@ -201,7 +201,7 @@ TEST(c_array_ctor, span) { ...@@ -201,7 +201,7 @@ TEST(c_array_ctor, span) {
// const, dynamic size // const, dynamic size
{ {
int arr[] = {1, 2, 3}; int arr[] = {1, 2, 3}; // NOLINT
span<int const, 3> s{arr}; span<int const, 3> s{arr};
CHECK_EQ(s.size(), 3UL); CHECK_EQ(s.size(), 3UL);
CHECK_EQ(s.data(), arr); CHECK_EQ(s.data(), arr);
...@@ -492,7 +492,7 @@ TEST(ctor_from_spans, span) { ...@@ -492,7 +492,7 @@ TEST(ctor_from_spans, span) {
TEST(subview, span) { TEST(subview, span) {
// first<N> // first<N>
{ {
int arr[] = {1, 2, 3, 4, 5}; int arr[] = {1, 2, 3, 4, 5}; // NOLINT
span<int, 5> s{arr}; span<int, 5> s{arr};
auto f = s.first<3>(); auto f = s.first<3>();
...@@ -505,7 +505,7 @@ TEST(subview, span) { ...@@ -505,7 +505,7 @@ TEST(subview, span) {
// last<N> // last<N>
{ {
int arr[] = {1, 2, 3, 4, 5}; int arr[] = {1, 2, 3, 4, 5}; // NOLINT
span<int, 5> s{arr}; span<int, 5> s{arr};
auto l = s.last<3>(); auto l = s.last<3>();
...@@ -518,7 +518,7 @@ TEST(subview, span) { ...@@ -518,7 +518,7 @@ TEST(subview, span) {
// subspan<N> // subspan<N>
{ {
int arr[] = {1, 2, 3, 4, 5}; int arr[] = {1, 2, 3, 4, 5}; // NOLINT
span<int, 5> s{arr}; span<int, 5> s{arr};
auto ss = s.subspan<1, 2>(); auto ss = s.subspan<1, 2>();
...@@ -531,7 +531,7 @@ TEST(subview, span) { ...@@ -531,7 +531,7 @@ TEST(subview, span) {
// first(n) // first(n)
{ {
int arr[] = {1, 2, 3, 4, 5}; int arr[] = {1, 2, 3, 4, 5}; // NOLINT
span<int, 5> s{arr}; span<int, 5> s{arr};
auto f = s.first(3); auto f = s.first(3);
...@@ -544,7 +544,7 @@ TEST(subview, span) { ...@@ -544,7 +544,7 @@ TEST(subview, span) {
// last(n) // last(n)
{ {
int arr[] = {1, 2, 3, 4, 5}; int arr[] = {1, 2, 3, 4, 5}; // NOLINT
span<int, 5> s{arr}; span<int, 5> s{arr};
auto l = s.last(3); auto l = s.last(3);
...@@ -557,7 +557,7 @@ TEST(subview, span) { ...@@ -557,7 +557,7 @@ TEST(subview, span) {
// subspan(n) // subspan(n)
{ {
int arr[] = {1, 2, 3, 4, 5}; int arr[] = {1, 2, 3, 4, 5}; // NOLINT
span<int, 5> s{arr}; span<int, 5> s{arr};
auto ss = s.subspan(1, 2); auto ss = s.subspan(1, 2);
...@@ -577,13 +577,13 @@ TEST(observers, span) { ...@@ -577,13 +577,13 @@ TEST(observers, span) {
static_assert(empty.size() == 0, ""); // NOLINT static_assert(empty.size() == 0, ""); // NOLINT
static_assert(empty.empty(), ""); static_assert(empty.empty(), "");
constexpr int arr[] = {1, 2, 3}; constexpr int arr[] = {1, 2, 3}; // NOLINT
static_assert(span<const int>{arr}.size() == 3, ""); static_assert(span<const int>{arr}.size() == 3, "");
static_assert(!span<const int>{arr}.empty(), ""); static_assert(!span<const int>{arr}.empty(), "");
} }
TEST(element_access, span) { TEST(element_access, span) {
constexpr int arr[] = {1, 2, 3}; constexpr int arr[] = {1, 2, 3}; // NOLINT
span<const int> s{arr}; span<const int> s{arr};
CHECK_EQ(s[0], arr[0]); CHECK_EQ(s[0], arr[0]);
......
...@@ -33,7 +33,7 @@ TEST(StringHelper, EndsWith) { ...@@ -33,7 +33,7 @@ TEST(StringHelper, EndsWith) {
TEST(StringHelper, FormatStringAppend) { TEST(StringHelper, FormatStringAppend) {
std::string str("hello"); std::string str("hello");
char fmt[] = "%d"; char fmt[] = "%d"; // NOLINT
paddle::string::format_string_append(str, fmt, 10); paddle::string::format_string_append(str, fmt, 10);
EXPECT_EQ(str, "hello10"); EXPECT_EQ(str, "hello10");
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
constexpr char kOutputString[] = "User Defined Output"; constexpr char kOutputString[] = "User Defined Output"; // NOLINT
class UserDefinedClass { class UserDefinedClass {
public: public:
}; };
......
...@@ -33,7 +33,7 @@ namespace paddle { ...@@ -33,7 +33,7 @@ namespace paddle {
namespace operators { namespace operators {
namespace benchmark { namespace benchmark {
DEFINE_string(op_config_list, "", "Path of op config file."); DEFINE_string(op_config_list, "", "Path of op config file."); // NOLINT
DEFINE_int32(specified_config_id, -1, "Test the specified op config."); DEFINE_int32(specified_config_id, -1, "Test the specified op config.");
void OpTester::Init(const std::string &filename) { void OpTester::Init(const std::string &filename) {
......
...@@ -29,12 +29,21 @@ void Compare(const T* a, const T* b, const int n) { ...@@ -29,12 +29,21 @@ void Compare(const T* a, const T* b, const int n) {
} }
TEST(MaskUtil, Poly2MaskTest) { TEST(MaskUtil, Poly2MaskTest) {
float polys[] = { float polys[] = {// NOLINT
1.97f, 1.88f, 5.81f, 1.88f, 1.69f, 6.53f, 5.94f, 6.38f, 1.97f, 1.88f}; 1.97f,
1.88f,
5.81f,
1.88f,
1.69f,
6.53f,
5.94f,
6.38f,
1.97f,
1.88f};
int h = 8, w = 8; int h = 8, w = 8;
int k = 5; // length(polys) / 2 int k = 5; // length(polys) / 2
// clang-format off // clang-format off
uint8_t expect_mask[] = { uint8_t expect_mask[] = { // NOLINT
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
...@@ -67,7 +76,7 @@ TEST(MaskUtil, Poly2BoxesTest) { ...@@ -67,7 +76,7 @@ TEST(MaskUtil, Poly2BoxesTest) {
{{1.97f, 1.88f, 5.81f, 1.88f, 1.69f, 6.53f, 5.94f, 6.38f, 1.97f, 1.88f}}, {{1.97f, 1.88f, 5.81f, 1.88f, 1.69f, 6.53f, 5.94f, 6.38f, 1.97f, 1.88f}},
{{2.97f, 1.88f, 3.81f, 1.68f, 1.69f, 6.63f, 6.94f, 6.58f, 2.97f, 0.88f}} {{2.97f, 1.88f, 3.81f, 1.68f, 1.69f, 6.63f, 6.94f, 6.58f, 2.97f, 0.88f}}
}; };
float expect_boxes[] = { float expect_boxes[] = { // NOLINT
1.69f, 1.88f, 5.94f, 6.53f, 1.69f, 1.88f, 5.94f, 6.53f,
1.69f, 0.88f, 6.94f, 6.63f 1.69f, 0.88f, 6.94f, 6.63f
}; };
...@@ -85,10 +94,10 @@ TEST(MaskUtil, Polys2MaskWrtBoxTest) { ...@@ -85,10 +94,10 @@ TEST(MaskUtil, Polys2MaskWrtBoxTest) {
std::vector<std::vector<std::vector<float>>> polys = {{ std::vector<std::vector<std::vector<float>>> polys = {{
{1.97f, 1.88f, 5.81f, 1.88f, 1.69f, 6.53f, 5.94f, 6.38f, 1.97f, 1.88f}, {1.97f, 1.88f, 5.81f, 1.88f, 1.69f, 6.53f, 5.94f, 6.38f, 1.97f, 1.88f},
{2.97f, 1.88f, 3.81f, 1.68f, 1.69f, 6.63f, 6.94f, 6.58f, 2.97f, 0.88f}}}; {2.97f, 1.88f, 3.81f, 1.68f, 1.69f, 6.63f, 6.94f, 6.58f, 2.97f, 0.88f}}};
float expect_boxes[] = { float expect_boxes[] = { // NOLINT
1.69f, 0.88f, 6.94f, 6.63f 1.69f, 0.88f, 6.94f, 6.63f
}; };
uint8_t expect_mask[] = { uint8_t expect_mask[] = { // NOLINT
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0,
0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/phi/kernels/funcs/im2col.h" #include "paddle/phi/kernels/funcs/im2col.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <array>
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
...@@ -60,8 +61,8 @@ void testIm2col() { ...@@ -60,8 +61,8 @@ void testIm2col() {
(input_width - filter_size + padding[2] + padding[3]) / stride[1] + 1; (input_width - filter_size + padding[2] + padding[3]) / stride[1] + 1;
float* input_ptr = input_tmp.mutable_data<float>( float* input_ptr = input_tmp.mutable_data<float>(
{1, input_height, input_width}, paddle::platform::CPUPlace()); {1, input_height, input_width}, paddle::platform::CPUPlace());
float arr[6] = {0, 1, 2, 3, 4, 5}; std::array<float, 6> arr = {0, 1, 2, 3, 4, 5};
memcpy(input_ptr, arr, 6 * sizeof(float)); memcpy(input_ptr, arr.data(), 6 * sizeof(float));
auto* place = new Place(); auto* place = new Place();
DeviceContext* context = new DeviceContext(*place); DeviceContext* context = new DeviceContext(*place);
...@@ -85,8 +86,8 @@ void testIm2col() { ...@@ -85,8 +86,8 @@ void testIm2col() {
im2col(*context, input, dilation, stride, padding, &output_cfo); im2col(*context, input, dilation, stride, padding, &output_cfo);
im2col_ocf(*context, input, dilation, stride, padding, &output_ocf); im2col_ocf(*context, input, dilation, stride, padding, &output_ocf);
float out_cfo_data[] = {0, 1, 1, 2, 3, 4, 4, 5}; std::array<float, 8> out_cfo_data = {0, 1, 1, 2, 3, 4, 4, 5};
float out_ocf_data[] = {0, 1, 3, 4, 1, 2, 4, 5}; std::array<float, 8> out_ocf_data = {0, 1, 3, 4, 1, 2, 4, 5};
float* out_cfo_ptr; float* out_cfo_ptr;
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
...@@ -118,7 +119,7 @@ void testIm2col() { ...@@ -118,7 +119,7 @@ void testIm2col() {
col2im; col2im;
phi::funcs::Col2ImFunctor<phi::funcs::ColFormat::kOCF, DeviceContext, float> phi::funcs::Col2ImFunctor<phi::funcs::ColFormat::kOCF, DeviceContext, float>
col2im_ocf; col2im_ocf;
float col2im_data[] = {0, 2, 2, 3, 8, 5}; std::array<float, 6> col2im_data = {0, 2, 2, 3, 8, 5};
memset(input_ptr, 0, 6 * sizeof(float)); memset(input_ptr, 0, 6 * sizeof(float));
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/phi/kernels/funcs/vol2col.h" #include "paddle/phi/kernels/funcs/vol2col.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <array>
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
...@@ -68,8 +69,8 @@ void testVol2col() { ...@@ -68,8 +69,8 @@ void testVol2col() {
float* input_ptr = float* input_ptr =
input_tmp.mutable_data<float>({1, input_depth, input_height, input_width}, input_tmp.mutable_data<float>({1, input_depth, input_height, input_width},
paddle::platform::CPUPlace()); paddle::platform::CPUPlace());
float arr[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; std::array<float, 12> arr = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
memcpy(input_ptr, arr, 12 * sizeof(float)); memcpy(input_ptr, arr.data(), 12 * sizeof(float));
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
...@@ -88,7 +89,8 @@ void testVol2col() { ...@@ -88,7 +89,8 @@ void testVol2col() {
phi::funcs::Vol2ColFunctor<DeviceContext, float> vol2col; phi::funcs::Vol2ColFunctor<DeviceContext, float> vol2col;
vol2col(*context, input, dilations, strides, paddings, &output); vol2col(*context, input, dilations, strides, paddings, &output);
float vol_2_col[] = {0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}; std::array<float, 16> vol_2_col = {
0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11};
float* out_cfo_ptr; float* out_cfo_ptr;
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_cfo_ptr = output.data<float>(); out_cfo_ptr = output.data<float>();
...@@ -103,7 +105,7 @@ void testVol2col() { ...@@ -103,7 +105,7 @@ void testVol2col() {
} }
// Col2Vol test // Col2Vol test
float col_2_vol[] = {0, 2, 2, 3, 8, 5, 6, 14, 8, 9, 20, 11}; std::array<float, 12> col_2_vol = {0, 2, 2, 3, 8, 5, 6, 14, 8, 9, 20, 11};
memset(input_ptr, 0, 12 * sizeof(float)); memset(input_ptr, 0, 12 * sizeof(float));
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <google/protobuf/text_format.h> #include <google/protobuf/text_format.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <array>
#include "paddle/fluid/inference/analysis/analyzer.h" #include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/analysis/ut_helper.h" #include "paddle/fluid/inference/analysis/ut_helper.h"
...@@ -66,10 +67,10 @@ void TestWord2vecPrediction(const std::string& model_path) { ...@@ -66,10 +67,10 @@ void TestWord2vecPrediction(const std::string& model_path) {
// One single batch // One single batch
int64_t data[4] = {1, 2, 3, 4}; std::array<int64_t, 4> data = {1, 2, 3, 4};
PaddleTensor tensor; PaddleTensor tensor;
tensor.shape = std::vector<int>({4, 1}); tensor.shape = std::vector<int>({4, 1});
tensor.data = PaddleBuf(data, sizeof(data)); tensor.data = PaddleBuf(data.data(), sizeof(data));
tensor.dtype = PaddleDType::INT64; tensor.dtype = PaddleDType::INT64;
// For simplicity, we set all the slots with the same data. // For simplicity, we set all the slots with the same data.
...@@ -87,7 +88,7 @@ void TestWord2vecPrediction(const std::string& model_path) { ...@@ -87,7 +88,7 @@ void TestWord2vecPrediction(const std::string& model_path) {
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"Output's data length should be 33168 but got %d", "Output's data length should be 33168 but got %d",
outputs.front().data.length())); outputs.front().data.length()));
float result[5] = { std::array<float, 5> result = {
0.00129761, 0.00151112, 0.000423564, 0.00108815, 0.000932706}; 0.00129761, 0.00151112, 0.000423564, 0.00108815, 0.000932706};
const size_t num_elements = outputs.front().data.length() / sizeof(float); const size_t num_elements = outputs.front().data.length() / sizeof(float);
// The outputs' buffers are in CPU memory. // The outputs' buffers are in CPU memory.
......
...@@ -44,7 +44,7 @@ class OperationTest ...@@ -44,7 +44,7 @@ class OperationTest
using Op::Op; using Op::Op;
static const char *name() { return "test.operation2"; } static const char *name() { return "test.operation2"; }
static constexpr uint32_t attributes_num = 2; static constexpr uint32_t attributes_num = 2;
static const char *attributes_name[attributes_num]; static const char *attributes_name[attributes_num]; // NOLINT
static void Verify() {} static void Verify() {}
static void InferMeta(phi::InferMetaContext *infer_meta) { static void InferMeta(phi::InferMetaContext *infer_meta) {
auto fn = PD_INFER_META(phi::CreateInferMeta); auto fn = PD_INFER_META(phi::CreateInferMeta);
...@@ -54,8 +54,9 @@ class OperationTest ...@@ -54,8 +54,9 @@ class OperationTest
IR_DECLARE_EXPLICIT_TYPE_ID(OperationTest) IR_DECLARE_EXPLICIT_TYPE_ID(OperationTest)
IR_DEFINE_EXPLICIT_TYPE_ID(OperationTest) IR_DEFINE_EXPLICIT_TYPE_ID(OperationTest)
const char *OperationTest::attributes_name[attributes_num] = {"op2_attr1", const char *OperationTest::attributes_name[attributes_num] = { // NOLINT
"op2_attr2"}; "op2_attr1",
"op2_attr2"};
// Define a dialect, op1 and op2 will be registered by this dialect. // Define a dialect, op1 and op2 will be registered by this dialect.
class TestDialect : public ir::Dialect { class TestDialect : public ir::Dialect {
......
...@@ -89,7 +89,7 @@ class Operation1 : public ir::Op<Operation1> { ...@@ -89,7 +89,7 @@ class Operation1 : public ir::Op<Operation1> {
using Op::Op; using Op::Op;
static const char *name() { return "test.operation1"; } static const char *name() { return "test.operation1"; }
static constexpr uint32_t attributes_num = 2; static constexpr uint32_t attributes_num = 2;
static const char *attributes_name[attributes_num]; static const char *attributes_name[attributes_num]; // NOLINT
void Verify() { void Verify() {
auto &attributes = this->attributes(); auto &attributes = this->attributes();
if (attributes.count("op1_attr1") == 0 || if (attributes.count("op1_attr1") == 0 ||
...@@ -114,8 +114,9 @@ class Operation1 : public ir::Op<Operation1> { ...@@ -114,8 +114,9 @@ class Operation1 : public ir::Op<Operation1> {
argument.AddAttributes(attributes.begin(), attributes.end()); argument.AddAttributes(attributes.begin(), attributes.end());
} }
}; };
const char *Operation1::attributes_name[attributes_num] = {"op1_attr1", const char *Operation1::attributes_name[attributes_num] = { // NOLINT
"op1_attr2"}; "op1_attr1",
"op1_attr2"};
IR_DECLARE_EXPLICIT_TYPE_ID(Operation1) IR_DECLARE_EXPLICIT_TYPE_ID(Operation1)
IR_DEFINE_EXPLICIT_TYPE_ID(Operation1) IR_DEFINE_EXPLICIT_TYPE_ID(Operation1)
...@@ -127,7 +128,7 @@ class Operation2 ...@@ -127,7 +128,7 @@ class Operation2
using Op::Op; using Op::Op;
static const char *name() { return "test.operation2"; } static const char *name() { return "test.operation2"; }
static constexpr uint32_t attributes_num = 2; static constexpr uint32_t attributes_num = 2;
static const char *attributes_name[attributes_num]; static const char *attributes_name[attributes_num]; // NOLINT
void Verify() { void Verify() {
auto &attributes = this->attributes(); auto &attributes = this->attributes();
if (attributes.count("op2_attr1") == 0 || if (attributes.count("op2_attr1") == 0 ||
...@@ -141,8 +142,9 @@ class Operation2 ...@@ -141,8 +142,9 @@ class Operation2
} }
static void InferShape() { VLOG(2) << "This is op2's InferShape interface."; } static void InferShape() { VLOG(2) << "This is op2's InferShape interface."; }
}; };
const char *Operation2::attributes_name[attributes_num] = {"op2_attr1", const char *Operation2::attributes_name[attributes_num] = { // NOLINT
"op2_attr2"}; "op2_attr1",
"op2_attr2"};
IR_DECLARE_EXPLICIT_TYPE_ID(Operation2) IR_DECLARE_EXPLICIT_TYPE_ID(Operation2)
IR_DEFINE_EXPLICIT_TYPE_ID(Operation2) IR_DEFINE_EXPLICIT_TYPE_ID(Operation2)
......
...@@ -77,7 +77,7 @@ class Operation1 : public ir::Op<Operation1> { ...@@ -77,7 +77,7 @@ class Operation1 : public ir::Op<Operation1> {
using Op::Op; using Op::Op;
static const char *name() { return "test.Operation1"; } static const char *name() { return "test.Operation1"; }
static constexpr uint32_t attributes_num = 2; static constexpr uint32_t attributes_num = 2;
static const char *attributes_name[attributes_num]; static const char *attributes_name[attributes_num]; // NOLINT
void Verify(); void Verify();
static void InferShape() { VLOG(2) << "This is op2's InferShape interface."; } static void InferShape() { VLOG(2) << "This is op2's InferShape interface."; }
}; };
...@@ -93,8 +93,9 @@ void Operation1::Verify() { ...@@ -93,8 +93,9 @@ void Operation1::Verify() {
throw("Type of attribute: parameter_name is not right."); throw("Type of attribute: parameter_name is not right.");
} }
} }
const char *Operation1::attributes_name[attributes_num] = {"op2_attr1", const char *Operation1::attributes_name[attributes_num] = { // NOLINT
"op2_attr2"}; "op2_attr1",
"op2_attr2"};
IR_DECLARE_EXPLICIT_TYPE_ID(Operation1) IR_DECLARE_EXPLICIT_TYPE_ID(Operation1)
IR_DEFINE_EXPLICIT_TYPE_ID(Operation1) IR_DEFINE_EXPLICIT_TYPE_ID(Operation1)
...@@ -358,7 +359,7 @@ class Conv2dFusionOpTest : public ir::Op<Conv2dFusionOpTest, ...@@ -358,7 +359,7 @@ class Conv2dFusionOpTest : public ir::Op<Conv2dFusionOpTest,
public: public:
using Op::Op; using Op::Op;
static const char *name() { return "pd.conv2d_fusion_test"; } static const char *name() { return "pd.conv2d_fusion_test"; }
static const char *attributes_name[10]; static const char *attributes_name[10]; // NOLINT
static constexpr uint32_t attributes_num = 10; static constexpr uint32_t attributes_num = 10;
static OpInfoTuple GetOpInfo(); static OpInfoTuple GetOpInfo();
static void Build(ir::Builder &builder, // NOLINT static void Build(ir::Builder &builder, // NOLINT
...@@ -413,16 +414,17 @@ class Conv2dFusionOpTest : public ir::Op<Conv2dFusionOpTest, ...@@ -413,16 +414,17 @@ class Conv2dFusionOpTest : public ir::Op<Conv2dFusionOpTest,
static void InferMeta(phi::InferMetaContext *infer_meta); static void InferMeta(phi::InferMetaContext *infer_meta);
}; };
const char *Conv2dFusionOpTest::attributes_name[10] = {"strides", const char *Conv2dFusionOpTest::attributes_name[10] = { // NOLINT
"paddings_t", "strides",
"padding_algorithm", "paddings_t",
"dilations_t", "padding_algorithm",
"groups", "dilations_t",
"data_format", "groups",
"activation", "data_format",
"exhaustive_search", "activation",
"channels", "exhaustive_search",
"user_workspace_size"}; "channels",
"user_workspace_size"};
OpInfoTuple Conv2dFusionOpTest::GetOpInfo() { OpInfoTuple Conv2dFusionOpTest::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = { std::vector<paddle::dialect::OpInputInfo> inputs = {
......
...@@ -40,7 +40,7 @@ phi::Place GetPlaceFromPtr(void* data); ...@@ -40,7 +40,7 @@ phi::Place GetPlaceFromPtr(void* data);
TEST(from_blob, CPU) { TEST(from_blob, CPU) {
// 1. create data // 1. create data
int64_t data[] = {4, 3, 2, 1}; int64_t data[] = {4, 3, 2, 1}; // NOLINT
ASSERT_EQ(paddle::GetPlaceFromPtr(data), phi::CPUPlace()); ASSERT_EQ(paddle::GetPlaceFromPtr(data), phi::CPUPlace());
......
...@@ -47,13 +47,14 @@ TEST(API, case_convert) { ...@@ -47,13 +47,14 @@ TEST(API, case_convert) {
pstring* cpu_strings_x_data = pstring* cpu_strings_x_data =
dev_ctx->template Alloc<pstring>(cpu_strings_x.get()); dev_ctx->template Alloc<pstring>(cpu_strings_x.get());
std::string strs[] = {"A Short Pstring.", std::string strs[] = {"A Short Pstring.", // NOLINT
"A Large Pstring Whose Length Is Longer Than 22."}; "A Large Pstring Whose Length Is Longer Than 22."};
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
cpu_strings_x_data[i] = strs[i]; cpu_strings_x_data[i] = strs[i];
} }
// 2. get expected results // 2. get expected results
std::string expected_results[] = {strs[0], strs[0], strs[1], strs[1]}; std::string expected_results[] = {
strs[0], strs[0], strs[1], strs[1]}; // NOLINT
std::transform( std::transform(
strs[0].begin(), strs[0].end(), expected_results[0].begin(), ::tolower); strs[0].begin(), strs[0].end(), expected_results[0].begin(), ::tolower);
std::transform( std::transform(
...@@ -77,7 +78,8 @@ TEST(API, case_convert) { ...@@ -77,7 +78,8 @@ TEST(API, case_convert) {
auto lower_tensor_ptr = lower_tensor->data(); auto lower_tensor_ptr = lower_tensor->data();
auto upper_tensor_ptr = upper_tensor->data(); auto upper_tensor_ptr = upper_tensor->data();
const std::string cpu_results[] = {lower_tensor_ptr[0].data(), const std::string cpu_results[] = {// NOLINT
lower_tensor_ptr[0].data(),
upper_tensor_ptr[0].data(), upper_tensor_ptr[0].data(),
lower_tensor_ptr[1].data(), lower_tensor_ptr[1].data(),
upper_tensor_ptr[1].data()}; upper_tensor_ptr[1].data()};
...@@ -101,12 +103,14 @@ TEST(API, case_convert_utf8) { ...@@ -101,12 +103,14 @@ TEST(API, case_convert_utf8) {
pstring* cpu_strings_x_data = pstring* cpu_strings_x_data =
dev_ctx->template Alloc<pstring>(cpu_strings_x.get()); dev_ctx->template Alloc<pstring>(cpu_strings_x.get());
std::string strs[] = {"óÓsscHloëË", "óÓsscHloëËóÓsscHloëËóÓsscHloëË"}; std::string strs[] = {"óÓsscHloëË",
"óÓsscHloëËóÓsscHloëËóÓsscHloëË"}; // NOLINT
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
cpu_strings_x_data[i] = strs[i]; cpu_strings_x_data[i] = strs[i];
} }
// 2. get expected results // 2. get expected results
std::string expected_results[] = {"óósschloëë", std::string expected_results[] = {// NOLINT
"óósschloëë",
"ÓÓSSCHLOËË", "ÓÓSSCHLOËË",
"óósschloëëóósschloëëóósschloëë", "óósschloëëóósschloëëóósschloëë",
"ÓÓSSCHLOËËÓÓSSCHLOËËÓÓSSCHLOËË"}; "ÓÓSSCHLOËËÓÓSSCHLOËËÓÓSSCHLOËË"};
...@@ -125,7 +129,8 @@ TEST(API, case_convert_utf8) { ...@@ -125,7 +129,8 @@ TEST(API, case_convert_utf8) {
auto lower_tensor_ptr = lower_tensor->data(); auto lower_tensor_ptr = lower_tensor->data();
auto upper_tensor_ptr = upper_tensor->data(); auto upper_tensor_ptr = upper_tensor->data();
const char* cpu_results[] = {lower_tensor_ptr[0].data(), const char* cpu_results[] = {// NOLINT
lower_tensor_ptr[0].data(),
upper_tensor_ptr[0].data(), upper_tensor_ptr[0].data(),
lower_tensor_ptr[1].data(), lower_tensor_ptr[1].data(),
upper_tensor_ptr[1].data()}; upper_tensor_ptr[1].data()};
......
...@@ -45,8 +45,8 @@ TEST(unroll_ops, fill_constant) { ...@@ -45,8 +45,8 @@ TEST(unroll_ops, fill_constant) {
} }
TEST(unroll_ops, assign) { TEST(unroll_ops, assign) {
const int a[] = {1, 2, 3, 4, 5}; const int a[] = {1, 2, 3, 4, 5}; // NOLINT
int b[] = {0, 0, 0, 0, 0}; int b[] = {0, 0, 0, 0, 0}; // NOLINT
UnrollAssign<3>::Run(a, b); UnrollAssign<3>::Run(a, b);
EXPECT_EQ(b[0], 1); EXPECT_EQ(b[0], 1);
EXPECT_EQ(b[1], 2); EXPECT_EQ(b[1], 2);
...@@ -56,7 +56,7 @@ TEST(unroll_ops, assign) { ...@@ -56,7 +56,7 @@ TEST(unroll_ops, assign) {
} }
TEST(unroll_ops, var_args_assign) { TEST(unroll_ops, var_args_assign) {
int a[] = {0, 0, 0}; int a[] = {0, 0, 0}; // NOLINT
UnrollVarArgsAssign<int>::Run(a, 1, 2); UnrollVarArgsAssign<int>::Run(a, 1, 2);
EXPECT_EQ(a[0], 1); EXPECT_EQ(a[0], 1);
EXPECT_EQ(a[1], 2); EXPECT_EQ(a[1], 2);
...@@ -64,8 +64,8 @@ TEST(unroll_ops, var_args_assign) { ...@@ -64,8 +64,8 @@ TEST(unroll_ops, var_args_assign) {
} }
TEST(unroll_ops, compare) { TEST(unroll_ops, compare) {
int a[] = {1, 2, 3}; int a[] = {1, 2, 3}; // NOLINT
int b[] = {1, 2, 4}; int b[] = {1, 2, 4}; // NOLINT
EXPECT_TRUE(UnrollCompare<2>::Run(a, b)); EXPECT_TRUE(UnrollCompare<2>::Run(a, b));
EXPECT_FALSE(UnrollCompare<3>::Run(a, b)); EXPECT_FALSE(UnrollCompare<3>::Run(a, b));
...@@ -75,7 +75,7 @@ TEST(unroll_ops, compare) { ...@@ -75,7 +75,7 @@ TEST(unroll_ops, compare) {
} }
TEST(unroll_ops, product) { TEST(unroll_ops, product) {
int a[] = {2, 3, 4}; int a[] = {2, 3, 4}; // NOLINT
EXPECT_EQ(UnrollProduct<3>::Run(a), a[0] * a[1] * a[2]); EXPECT_EQ(UnrollProduct<3>::Run(a), a[0] * a[1] * a[2]);
} }
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/funcs/strided_memcpy.h" #include "paddle/phi/kernels/funcs/strided_memcpy.h"
#include <array>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/phi/backends/context_pool.h" #include "paddle/phi/backends/context_pool.h"
...@@ -22,7 +23,7 @@ namespace tests { ...@@ -22,7 +23,7 @@ namespace tests {
TEST(StridedMemcpy, CPUCrop) { TEST(StridedMemcpy, CPUCrop) {
// clang-format off // clang-format off
int src[] = { int src[] = {// NOLINT
0, 1, 2, 0, 0, 0, 1, 2, 0, 0,
0, 3, 4, 0, 0, 0, 3, 4, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
...@@ -31,13 +32,13 @@ TEST(StridedMemcpy, CPUCrop) { ...@@ -31,13 +32,13 @@ TEST(StridedMemcpy, CPUCrop) {
phi::DDim src_stride({5, 1}); phi::DDim src_stride({5, 1});
int dst[4]; std::array<int, 4> dst;
phi::DDim dst_dim({2, 2}); phi::DDim dst_dim({2, 2});
phi::DDim dst_stride({2, 1}); phi::DDim dst_stride({2, 1});
phi::CPUContext ctx; phi::CPUContext ctx;
phi::funcs::StridedMemcpy<int>( phi::funcs::StridedMemcpy<int>(
ctx, src + 1, src_stride, dst_dim, dst_stride, dst); ctx, src + 1, src_stride, dst_dim, dst_stride, dst.data());
ASSERT_EQ(1, dst[0]); ASSERT_EQ(1, dst[0]);
ASSERT_EQ(2, dst[1]); ASSERT_EQ(2, dst[1]);
...@@ -47,26 +48,25 @@ TEST(StridedMemcpy, CPUCrop) { ...@@ -47,26 +48,25 @@ TEST(StridedMemcpy, CPUCrop) {
TEST(StridedMemcpy, CPUConcat) { TEST(StridedMemcpy, CPUConcat) {
// clang-format off // clang-format off
int src[] = { int src[] = { // NOLINT
1, 2, 1, 2,
3, 4 3, 4
}; };
// clang-format on // clang-format on
int dst[8]; std::array<int, 8> dst;
phi::DDim src_stride({2, 1}); phi::DDim src_stride({2, 1});
phi::DDim dst_dim({2, 2}); phi::DDim dst_dim({2, 2});
phi::DDim dst_stride({4, 1}); phi::DDim dst_stride({4, 1});
phi::CPUContext ctx; phi::CPUContext ctx;
phi::funcs::StridedMemcpy<int>( phi::funcs::StridedMemcpy<int>(
ctx, src, src_stride, dst_dim, dst_stride, dst); ctx, src, src_stride, dst_dim, dst_stride, dst.data());
phi::funcs::StridedMemcpy<int>( phi::funcs::StridedMemcpy<int>(
ctx, src, src_stride, dst_dim, dst_stride, dst + 2); ctx, src, src_stride, dst_dim, dst_stride, dst.data() + 2);
// clang-format off // clang-format off
int expect_dst[] = { int expect_dst[] = { // NOLINT
1, 2, 1, 2, 1, 2, 1, 2,
3, 4, 3, 4 3, 4, 3, 4
}; };
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <array>
#include <set> #include <set>
#include "gtest/gtest.h" #include "gtest/gtest.h"
...@@ -41,16 +42,16 @@ TEST(math_function, gemm_notrans_cblas) { ...@@ -41,16 +42,16 @@ TEST(math_function, gemm_notrans_cblas) {
input1.Resize({2, 3}); input1.Resize({2, 3});
float* input1_ptr = dev_ctx->template Alloc<float>(&input1); float* input1_ptr = dev_ctx->template Alloc<float>(&input1);
float arr1[6] = {0, 1, 2, 3, 4, 5}; std::array<float, 6> arr1 = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr1, 6 * sizeof(float)); memcpy(input1_ptr, arr1.data(), 6 * sizeof(float));
input2.Resize({3, 4}); input2.Resize({3, 4});
float* input2_ptr = dev_ctx->template Alloc<float>(&input2); float* input2_ptr = dev_ctx->template Alloc<float>(&input2);
float arr2[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; std::array<float, 12> arr2 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
memcpy(input2_ptr, arr2, 12 * sizeof(float)); memcpy(input2_ptr, arr2.data(), 12 * sizeof(float));
input3.Resize({2, 4}); input3.Resize({2, 4});
float* input3_ptr = dev_ctx->template Alloc<float>(&input3); float* input3_ptr = dev_ctx->template Alloc<float>(&input3);
float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; std::array<float, 8> arr3 = {0, 1, 2, 3, 4, 5, 6, 7};
memcpy(input3_ptr, arr3, 8 * sizeof(float)); memcpy(input3_ptr, arr3.data(), 8 * sizeof(float));
GetBlas<float>(*dev_ctx).GEMM(false, GetBlas<float>(*dev_ctx).GEMM(false,
false, false,
...@@ -172,16 +173,16 @@ TEST(math_function, gemm_trans_cblas) { ...@@ -172,16 +173,16 @@ TEST(math_function, gemm_trans_cblas) {
input1.Resize({2, 3}); input1.Resize({2, 3});
float* input1_ptr = dev_ctx->template Alloc<float>(&input1); float* input1_ptr = dev_ctx->template Alloc<float>(&input1);
float arr1[6] = {0, 1, 2, 3, 4, 5}; std::array<float, 6> arr1 = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr1, 6 * sizeof(float)); memcpy(input1_ptr, arr1.data(), 6 * sizeof(float));
input2.Resize({4, 3}); input2.Resize({4, 3});
float* input2_ptr = dev_ctx->template Alloc<float>(&input2); float* input2_ptr = dev_ctx->template Alloc<float>(&input2);
float arr2[12] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11}; std::array<float, 12> arr2 = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
memcpy(input2_ptr, arr2, 12 * sizeof(float)); memcpy(input2_ptr, arr2.data(), 12 * sizeof(float));
input3.Resize({2, 4}); input3.Resize({2, 4});
float* input3_ptr = dev_ctx->template Alloc<float>(&input3); float* input3_ptr = dev_ctx->template Alloc<float>(&input3);
float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; std::array<float, 8> arr3 = {0, 1, 2, 3, 4, 5, 6, 7};
memcpy(input3_ptr, arr3, 8 * sizeof(float)); memcpy(input3_ptr, arr3.data(), 8 * sizeof(float));
GetBlas<float>(*dev_ctx).GEMM(false, GetBlas<float>(*dev_ctx).GEMM(false,
true, true,
......
...@@ -46,7 +46,7 @@ TEST(DEV_API, strings_copy) { ...@@ -46,7 +46,7 @@ TEST(DEV_API, strings_copy) {
StringTensor string_dst(alloc, meta); StringTensor string_dst(alloc, meta);
// 2. Assign input text // 2. Assign input text
const char* input[] = {"A Short Pstring.", const char* input[] = {"A Short Pstring.", // NOLINT
"A Large Pstring Whose Length Is Longer Than 22.", "A Large Pstring Whose Length Is Longer Than 22.",
"abc", "abc",
"defg", "defg",
......
...@@ -56,7 +56,8 @@ TEST(DEV_API, strings_cast_convert) { ...@@ -56,7 +56,8 @@ TEST(DEV_API, strings_cast_convert) {
dense_x_data[1] = long_str; dense_x_data[1] = long_str;
// 2. get expected results // 2. get expected results
std::string expected_results[] = {short_str, short_str, long_str, long_str}; std::string expected_results[] = {
short_str, short_str, long_str, long_str}; // NOLINT
std::transform(short_str.begin(), std::transform(short_str.begin(),
short_str.end(), short_str.end(),
expected_results[0].begin(), expected_results[0].begin(),
...@@ -107,7 +108,8 @@ TEST(DEV_API, strings_cast_convert_utf8) { ...@@ -107,7 +108,8 @@ TEST(DEV_API, strings_cast_convert_utf8) {
dense_x_data[0] = utf8_str; dense_x_data[0] = utf8_str;
// 2. get expected results // 2. get expected results
std::string expected_results[] = {"óósschloëëóósschloëëóósschloëë", std::string expected_results[] = {// NOLINT
"óósschloëëóósschloëëóósschloëë",
"ÓÓSSCHLOËËÓÓSSCHLOËËÓÓSSCHLOËË"}; "ÓÓSSCHLOËËÓÓSSCHLOËËÓÓSSCHLOËË"};
// 3. test API, ascii encoding // 3. test API, ascii encoding
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册