未验证 提交 2ee5b296 编写于 作者: H huangjiyi 提交者: GitHub

[CodeStyle][CINN] fix cinn cpplint codestyle (#55006)

上级 7e383885
...@@ -38,7 +38,7 @@ TEST(CostModel, Basic) { ...@@ -38,7 +38,7 @@ TEST(CostModel, Basic) {
std::vector<float>(feature_size)); std::vector<float>(feature_size));
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < feature_size; ++j) { for (int j = 0; j < feature_size; ++j) {
samples[i][j] = rand() % 10; samples[i][j] = rand() % 10; // NOLINT
} }
} }
......
...@@ -37,7 +37,7 @@ void AutoGenRule::ApplyRandomly() { ...@@ -37,7 +37,7 @@ void AutoGenRule::ApplyRandomly() {
CHECK_GT(num_applicable_, 0) CHECK_GT(num_applicable_, 0)
<< "Call " << GetRuleName() << "Call " << GetRuleName()
<< "::ApplyRandomly() with NumberApplicable() == 0"; << "::ApplyRandomly() with NumberApplicable() == 0";
int index = rand() % num_applicable_; int index = rand() % num_applicable_; // NOLINT
return Apply(index); return Apply(index);
} }
......
...@@ -90,9 +90,9 @@ class MultiLevelTiling : public AutoGenRule { ...@@ -90,9 +90,9 @@ class MultiLevelTiling : public AutoGenRule {
if (candidates.size() == 0) { if (candidates.size() == 0) {
return {1, T(extent)}; return {1, T(extent)};
} }
int index = rand() % candidates.size(); int index = rand() % candidates.size(); // NOLINT
std::vector<T> pick = candidates[index]; std::vector<T> pick = candidates[index];
if (rand() % 2 != 0) { if (rand() % 2 != 0) { // NOLINT
T tmp = pick[0]; T tmp = pick[0];
pick[0] = pick[1]; pick[0] = pick[1];
pick[1] = tmp; pick[1] = tmp;
......
...@@ -52,7 +52,8 @@ TEST(MultiLevelTile, SampleSplitTwo) { ...@@ -52,7 +52,8 @@ TEST(MultiLevelTile, SampleSplitTwo) {
target, MultiLevelTiling::kConfigs.at(target.arch)); target, MultiLevelTiling::kConfigs.at(target.arch));
for (int i = 0; i < 100; ++i) { for (int i = 0; i < 100; ++i) {
size_t number_to_split = rand() % 65535 + 2; // random number in [2, 2^16] size_t number_to_split =
rand() % 65535 + 2; // NOLINT, random number in [2, 2^16]
std::vector<size_t> split = std::vector<size_t> split =
multi_level_tiling.SampleSplitTwo<size_t>(number_to_split); multi_level_tiling.SampleSplitTwo<size_t>(number_to_split);
EXPECT_EQ(split.size(), 2UL); EXPECT_EQ(split.size(), 2UL);
...@@ -73,8 +74,9 @@ TEST(MultiLevelTile, SampleTileSplit) { ...@@ -73,8 +74,9 @@ TEST(MultiLevelTile, SampleTileSplit) {
target, MultiLevelTiling::kConfigs.at(target.arch)); target, MultiLevelTiling::kConfigs.at(target.arch));
for (int i = 0; i < 100; ++i) { for (int i = 0; i < 100; ++i) {
int number_to_split = rand() % 65535 + 2; // random number in [2, 2^16] int number_to_split =
int split_size = rand() % 5 + 1; // random in [1, 5] rand() % 65535 + 2; // NOLINT, random number in [2, 2^16]
int split_size = rand() % 5 + 1; // NOLINT, random in [1, 5]
std::vector<int> split = std::vector<int> split =
multi_level_tiling.SampleTileSplit<int>(number_to_split, split_size); multi_level_tiling.SampleTileSplit<int>(number_to_split, split_size);
EXPECT_EQ(split.size(), static_cast<size_t>(split_size)); EXPECT_EQ(split.size(), static_cast<size_t>(split_size));
......
...@@ -190,7 +190,7 @@ void CheckResult(raw_func_type test_func, ...@@ -190,7 +190,7 @@ void CheckResult(raw_func_type test_func,
input_data_ptrs[i] = input_data_ptrs[i] =
reinterpret_cast<float*>(malloc(input_data_numel * sizeof(float))); reinterpret_cast<float*>(malloc(input_data_numel * sizeof(float)));
for (int j = 0; j < input_data_numel; ++j) { for (int j = 0; j < input_data_numel; ++j) {
input_data_ptrs[i][j] = (rand() * 1.f) / RAND_MAX; input_data_ptrs[i][j] = (rand() * 1.f) / RAND_MAX; // NOLINT
} }
} }
std::vector<float*> test_output_data_ptrs(output_names.size()); std::vector<float*> test_output_data_ptrs(output_names.size());
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
namespace cinn { namespace cinn {
namespace backends { namespace backends {
const std::string CodeGenCUDA_Dev::source_header_ = const std::string CodeGenCUDA_Dev::source_header_ = // NOLINT
R"(#include <cstdint> R"(#include <cstdint>
#define CINN_WITH_CUDA #define CINN_WITH_CUDA
......
...@@ -88,8 +88,8 @@ TEST(cinn_computation, basic_cpu) { ...@@ -88,8 +88,8 @@ TEST(cinn_computation, basic_cpu) {
std::vector<float> hostD(M * N); std::vector<float> hostD(M * N);
std::vector<float> hostD_expected(M * N); std::vector<float> hostD_expected(M * N);
for (int i = 0; i < M * N; i++) { for (int i = 0; i < M * N; i++) {
hostA[i] = static_cast<float>(rand()) / INT_MAX; hostA[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
hostB[i] = static_cast<float>(rand()) / INT_MAX; hostB[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
hostD_expected[i] = hostA[i] * 2 + hostB[i]; hostD_expected[i] = hostA[i] * 2 + hostB[i];
} }
...@@ -126,8 +126,8 @@ TEST(cinn_computation, basic_gpu) { ...@@ -126,8 +126,8 @@ TEST(cinn_computation, basic_gpu) {
std::vector<float> hostD(M * N); std::vector<float> hostD(M * N);
std::vector<float> hostD_expected(M * N); std::vector<float> hostD_expected(M * N);
for (int i = 0; i < M * N; i++) { for (int i = 0; i < M * N; i++) {
hostA[i] = static_cast<float>(rand()) / INT_MAX; hostA[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
hostB[i] = static_cast<float>(rand()) / INT_MAX; hostB[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
hostD_expected[i] = hostA[i] * 2 + hostB[i]; hostD_expected[i] = hostA[i] * 2 + hostB[i];
} }
...@@ -165,7 +165,7 @@ TEST(cinn_computation, net_builder_cpu) { ...@@ -165,7 +165,7 @@ TEST(cinn_computation, net_builder_cpu) {
auto load_input = [=](hlir::framework::Tensor t) { auto load_input = [=](hlir::framework::Tensor t) {
float *ptr = t->mutable_data<float>(target); float *ptr = t->mutable_data<float>(target);
for (int i = 0; i < t->shape().numel(); i++) { for (int i = 0; i < t->shape().numel(); i++) {
ptr[i] = static_cast<float>(rand()) / INT_MAX; ptr[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
} }
}; };
...@@ -232,7 +232,8 @@ TEST(cinn_computation, fc_execute_cpu) { ...@@ -232,7 +232,8 @@ TEST(cinn_computation, fc_execute_cpu) {
auto A = inputs[0]; auto A = inputs[0];
ASSERT_EQ(A->shape().numel(), 1 * 30); ASSERT_EQ(A->shape().numel(), 1 * 30);
float *ptrA = A->mutable_data<float>(target); float *ptrA = A->mutable_data<float>(target);
for (int i = 0; i < 30; i++) ptrA[i] = static_cast<float>(rand()) / INT_MAX; for (int i = 0; i < 30; i++)
ptrA[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
for (int i = 0; i < 30; i++) ptrA[i] = static_cast<float>(0); for (int i = 0; i < 30; i++) ptrA[i] = static_cast<float>(0);
compute->Execute(); compute->Execute();
} }
...@@ -253,7 +254,7 @@ TEST(cinn_computation, fc_execute_gpu) { ...@@ -253,7 +254,7 @@ TEST(cinn_computation, fc_execute_gpu) {
auto out = outputs[0]; auto out = outputs[0];
std::vector<float> hostA(30); std::vector<float> hostA(30);
for (float &v : hostA) v = static_cast<float>(rand()) / INT_MAX; for (float &v : hostA) v = static_cast<float>(rand()) / INT_MAX; // NOLINT
compute->SetTensorData( compute->SetTensorData(
A, reinterpret_cast<void *>(hostA.data()), hostA.size() * sizeof(float)); A, reinterpret_cast<void *>(hostA.data()), hostA.size() * sizeof(float));
......
...@@ -163,7 +163,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmax( ...@@ -163,7 +163,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmax(
ir_sch.SetBuffer(blocks[0], "local"); ir_sch.SetBuffer(blocks[0], "local");
ir_sch.SetBuffer(blocks[1], "local"); ir_sch.SetBuffer(blocks[1], "local");
long prod_size = std::accumulate(output_shapes[0].begin(), int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(), output_shapes[0].end(),
1, 1,
std::multiplies<int>()); std::multiplies<int>());
......
...@@ -160,7 +160,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmin( ...@@ -160,7 +160,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmin(
// exceed the limit. // exceed the limit.
ir_sch.SetBuffer(blocks[0], "local"); ir_sch.SetBuffer(blocks[0], "local");
ir_sch.SetBuffer(blocks[1], "local"); ir_sch.SetBuffer(blocks[1], "local");
long prod_size = std::accumulate(output_shapes[0].begin(), int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(), output_shapes[0].end(),
1, 1,
std::multiplies<int>()); std::multiplies<int>());
......
...@@ -146,7 +146,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForGatherNd( ...@@ -146,7 +146,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForGatherNd(
ir::ModuleExpr mod_expr(vec_ast); ir::ModuleExpr mod_expr(vec_ast);
ir::IRSchedule ir_sch(mod_expr); ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs(); ir_sch.MergeExprs();
long prod_size = std::accumulate(output_shapes[0].begin(), int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(), output_shapes[0].end(),
1, 1,
std::multiplies<int>()); std::multiplies<int>());
......
...@@ -201,7 +201,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForRepeat( ...@@ -201,7 +201,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForRepeat(
ir::ModuleExpr mod_expr(vec_ast); ir::ModuleExpr mod_expr(vec_ast);
ir::IRSchedule ir_sch(mod_expr); ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs(); ir_sch.MergeExprs();
long prod_size = std::accumulate(output_shapes[0].begin(), int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(), output_shapes[0].end(),
1, 1,
std::multiplies<int>()); std::multiplies<int>());
......
...@@ -240,7 +240,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForResize( ...@@ -240,7 +240,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForResize(
ir::ModuleExpr mod_expr(vec_ast); ir::ModuleExpr mod_expr(vec_ast);
ir::IRSchedule ir_sch(mod_expr); ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs(); ir_sch.MergeExprs();
long prod_size = std::accumulate(output_shapes[0].begin(), int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(), output_shapes[0].end(),
1, 1,
std::multiplies<int>()); std::multiplies<int>());
......
...@@ -218,7 +218,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForSort( ...@@ -218,7 +218,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForSort(
ir_sch.SetBuffer(blocks[0], "local"); ir_sch.SetBuffer(blocks[0], "local");
ir_sch.SetBuffer(blocks[1], "local"); ir_sch.SetBuffer(blocks[1], "local");
long prod_size = std::accumulate(output_shapes[0].begin(), int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(), output_shapes[0].end(),
1, 1,
std::multiplies<int>()); std::multiplies<int>());
...@@ -311,7 +311,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgSort( ...@@ -311,7 +311,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgSort(
// the size will exceed the limit. // the size will exceed the limit.
// TODO: There is a bug, setting buffer to "local" here will cause the var // TODO: There is a bug, setting buffer to "local" here will cause the var
// declared twice at CodeGen. ir_sch.SetBuffer(blocks[0], "local"); // declared twice at CodeGen. ir_sch.SetBuffer(blocks[0], "local");
long prod_size = std::accumulate(output_shapes[0].begin(), int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(), output_shapes[0].end(),
1, 1,
std::multiplies<int>()); std::multiplies<int>());
......
...@@ -124,14 +124,14 @@ inline cublasStatus_t cublasGemmStridedBatched(cudaDataType_t dtype, ...@@ -124,14 +124,14 @@ inline cublasStatus_t cublasGemmStridedBatched(cudaDataType_t dtype,
float alpha, float alpha,
const void *A, const void *A,
int lda, int lda,
long long int strideA, int64_t strideA,
const void *B, const void *B,
int ldb, int ldb,
long long int strideB, int64_t strideB,
float beta, float beta,
void *C, void *C,
int ldc, int ldc,
long long int strideC, int64_t strideC,
int batchCount) { int batchCount) {
if (dtype == CUDA_R_32F) { if (dtype == CUDA_R_32F) {
return cublasSgemmStridedBatched(handle, return cublasSgemmStridedBatched(handle,
......
...@@ -1979,13 +1979,13 @@ class CurandGenerator { ...@@ -1979,13 +1979,13 @@ class CurandGenerator {
curandGenerator_t &GetGenerator() { return generator_; } curandGenerator_t &GetGenerator() { return generator_; }
CurandGenerator &SetOffset(unsigned long long offset = 0ULL) { CurandGenerator &SetOffset(uint64_t offset = 0ULL) {
CURAND_CALL(curandSetGeneratorOffset(generator_, offset)); CURAND_CALL(curandSetGeneratorOffset(generator_, offset));
VLOG(4) << "Set curand generator offset to: " << offset; VLOG(4) << "Set curand generator offset to: " << offset;
return *this; return *this;
} }
CurandGenerator &SetSeed(unsigned long long seed = 0ULL) { CurandGenerator &SetSeed(uint64_t seed = 0ULL) {
// set global seed if seed is zero // set global seed if seed is zero
auto rand_seed = (seed == 0ULL) ? RandomSeed::GetOrSet() : seed; auto rand_seed = (seed == 0ULL) ? RandomSeed::GetOrSet() : seed;
if (rand_seed != 0ULL && rand_seed != seed_) { if (rand_seed != 0ULL && rand_seed != seed_) {
...@@ -2009,7 +2009,7 @@ class CurandGenerator { ...@@ -2009,7 +2009,7 @@ class CurandGenerator {
private: private:
curandGenerator_t generator_; curandGenerator_t generator_;
unsigned long long seed_ = 0ULL; uint64_t seed_ = 0ULL;
cudaStream_t stream_ = nullptr; cudaStream_t stream_ = nullptr;
}; };
......
...@@ -196,16 +196,16 @@ bool GetCinnCudnnDeterministic() { ...@@ -196,16 +196,16 @@ bool GetCinnCudnnDeterministic() {
#endif #endif
} }
unsigned long long RandomSeed::seed_ = 0ULL; uint64_t RandomSeed::seed_ = 0ULL;
unsigned long long RandomSeed::GetOrSet(unsigned long long seed) { uint64_t RandomSeed::GetOrSet(uint64_t seed) {
if (seed != 0ULL) { if (seed != 0ULL) {
seed_ = seed; seed_ = seed;
} }
return seed_; return seed_;
} }
unsigned long long RandomSeed::Clear() { uint64_t RandomSeed::Clear() {
auto old_seed = seed_; auto old_seed = seed_;
seed_ = 0ULL; seed_ = 0ULL;
return old_seed; return old_seed;
......
...@@ -31,15 +31,15 @@ bool CanUseNvccCompiler(); ...@@ -31,15 +31,15 @@ bool CanUseNvccCompiler();
class RandomSeed { class RandomSeed {
public: public:
static unsigned long long GetOrSet(unsigned long long seed = 0); static uint64_t GetOrSet(uint64_t seed = 0);
static unsigned long long Clear(); static uint64_t Clear();
private: private:
RandomSeed() = default; RandomSeed() = default;
RandomSeed(const RandomSeed &) = delete; RandomSeed(const RandomSeed &) = delete;
RandomSeed &operator=(const RandomSeed &) = delete; RandomSeed &operator=(const RandomSeed &) = delete;
static unsigned long long seed_; static uint64_t seed_;
}; };
bool IsCompiledWithCUDA(); bool IsCompiledWithCUDA();
......
...@@ -261,16 +261,16 @@ TEST_DEFAULT1(depthwise_conv2d, ...@@ -261,16 +261,16 @@ TEST_DEFAULT1(depthwise_conv2d,
// layout_transform // layout_transform
std::vector<std::vector<int>> shapes_layout_transform = {{512, 512, 3, 3}}; std::vector<std::vector<int>> shapes_layout_transform = {{512, 512, 3, 3}};
std::string src_layout = "OIHW"; std::string src_layout = "OIHW"; // NOLINT
std::string dst_layout = "OIHW16i16o"; std::string dst_layout = "OIHW16i16o"; // NOLINT
absl::flat_hash_map<std::string, AttrType> attr_store_layout_transform = { absl::flat_hash_map<std::string, AttrType> attr_store_layout_transform = {
{"src_layout", src_layout}, {"dst_layout", dst_layout}}; {"src_layout", src_layout}, {"dst_layout", dst_layout}};
TEST_DEFAULT1( TEST_DEFAULT1(
layout_transform, layout_transform, type, type, attr_store_layout_transform) layout_transform, layout_transform, type, type, attr_store_layout_transform)
std::vector<std::vector<int>> shapes_layout_transform1 = {{64, 3, 7, 7}}; std::vector<std::vector<int>> shapes_layout_transform1 = {{64, 3, 7, 7}};
std::string src_layout1 = "OIHW"; std::string src_layout1 = "OIHW"; // NOLINT
std::string dst_layout1 = "OIHW3i32o"; std::string dst_layout1 = "OIHW3i32o"; // NOLINT
absl::flat_hash_map<std::string, AttrType> attr_store_layout_transform1 = { absl::flat_hash_map<std::string, AttrType> attr_store_layout_transform1 = {
{"src_layout", src_layout1}, {"dst_layout", dst_layout1}}; {"src_layout", src_layout1}, {"dst_layout", dst_layout1}};
TEST_DEFAULT1(layout_transform, TEST_DEFAULT1(layout_transform,
...@@ -284,7 +284,7 @@ hlir::framework::NodeAttr attrs; ...@@ -284,7 +284,7 @@ hlir::framework::NodeAttr attrs;
std::vector<int> kernel_size = {3, 3}; std::vector<int> kernel_size = {3, 3};
std::vector<int> stride_size = {2, 2}; std::vector<int> stride_size = {2, 2};
std::vector<int> padding_size = {1, 1, 1, 1}; std::vector<int> padding_size = {1, 1, 1, 1};
std::string pool_type = "max"; std::string pool_type = "max"; // NOLINT
absl::flat_hash_map<std::string, AttrType> attr_store_pool2d = { absl::flat_hash_map<std::string, AttrType> attr_store_pool2d = {
{"kernel_size", kernel_size}, {"kernel_size", kernel_size},
{"stride_size", stride_size}, {"stride_size", stride_size},
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册