未验证 提交 51d31a07 编写于 作者: W Wang Xin 提交者: GitHub

[CodeStyle][CINN] fix cpplint codestyle `[build/namespaces]` (#55051)

Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 5cbd946f
......@@ -41,7 +41,7 @@
namespace cinn {
namespace auto_schedule {
using namespace ::cinn::ir;
using namespace ::cinn::ir; // NOLINT
FeatureExtractor::FeatureExtractor() {}
......
......@@ -24,7 +24,7 @@ namespace cinn {
namespace auto_schedule {
TEST(AutoUnroll, Init) {
using namespace ir;
using namespace ir; // NOLINT
Expr M(100);
Expr N(4);
......@@ -50,7 +50,7 @@ TEST(AutoUnroll, Init) {
}
TEST(AutoUnroll, UnrollableApply) {
using namespace ir;
using namespace ir; // NOLINT
Expr M(100);
Expr N(4);
......
......@@ -267,7 +267,15 @@ void FloorDivideOpMapper(const paddle::cpp::OpDesc& op_desc,
} // namespace cinn
CINN_REGISTER_HELPER(paddle_elementwise) {
using namespace cinn::frontend::paddle_mappers;
using cinn::frontend::paddle_mappers::AddOpMapper;
using cinn::frontend::paddle_mappers::CastOpMapper;
using cinn::frontend::paddle_mappers::ElementwiseAddGradOpMapper;
using cinn::frontend::paddle_mappers::ElementwiseOpMapper;
using cinn::frontend::paddle_mappers::EltwiseType;
using cinn::frontend::paddle_mappers::FloorDivideOpMapper;
using cinn::frontend::paddle_mappers::PowOpMapper;
using cinn::frontend::paddle_mappers::SumOpMapper;
CINN_REGISTER_OP_MAPPER(add, AddOpMapper)
CINN_REGISTER_OP_MAPPER(elementwise_add,
ElementwiseOpMapper<EltwiseType::kAdd>)
......
......@@ -36,7 +36,6 @@ using framework::shape_t;
using framework::StrategyFunction;
using common::Type;
using namespace lang;
using cinn::hlir::op::ExternalApiRegistry;
......
......@@ -29,7 +29,8 @@ namespace cinn {
namespace hlir {
namespace framework {
using namespace frontend;
using frontend::NetBuilder;
using frontend::RunDecomposer;
void CodeGen(ir::LoweredFunc& func) {
#ifdef CINN_WITH_CUDA
......
......@@ -25,8 +25,6 @@ namespace cinn {
namespace hlir {
namespace framework {
using namespace frontend;
TEST(ParallelCompilerTest, Add_TEST_0) {
frontend::NetBuilder builder("Add_TEST_0");
auto A = builder.CreateInput(Float(32), {128, 128}, "A");
......@@ -52,7 +50,7 @@ TEST(ParallelCompilerTest, Conv2d_Test_0) {
auto target = common::DefaultNVGPUTarget();
auto program = builder.Build();
auto graph = Optimize(&program, {}, target);
auto graph = frontend::Optimize(&program, {}, target);
auto scope = BuildScope(target, graph);
ParallelCompiler::CompileOptions option;
......@@ -70,7 +68,7 @@ TEST(ParallelCompilerTest, Matmul_Test_0) {
auto target = common::DefaultNVGPUTarget();
auto program = builder.Build();
auto graph = Optimize(&program, {}, target);
auto graph = frontend::Optimize(&program, {}, target);
auto scope = BuildScope(target, graph);
ParallelCompiler::CompileOptions option;
......
......@@ -28,7 +28,12 @@ namespace cinn {
namespace hlir {
namespace pass {
using namespace framework;
using framework::Graph;
using framework::Node;
using framework::NodeData;
using framework::Operator;
using framework::OpPatternKind;
using framework::shape_t;
class FusionHelperBase {
public:
......
......@@ -315,7 +315,7 @@ CONDITION_FUNC(reduce_fuse_broadcast) {
};
for (auto node : consumer->nodes_set) {
if (helper->GetOpKind(node) != kBroadcast) {
if (helper->GetOpKind(node) != framework::kBroadcast) {
continue;
}
......
......@@ -25,7 +25,7 @@
namespace cinn {
namespace optim {
using namespace poly;
using namespace poly; // NOLINT
} // namespace optim
} // namespace cinn
......@@ -26,7 +26,7 @@ namespace cinn {
namespace optim {
TEST(UnrollLoops, unrolled_tag) {
using namespace ir;
using namespace ir; // NOLINT
Expr M(100);
Expr N(4);
......@@ -64,7 +64,7 @@ TEST(UnrollLoops, unrolled_tag) {
}
TEST(UnrollLoops, auto_unroll) {
using namespace ir;
using namespace ir; // NOLINT
Expr M(100);
Expr N(4);
......
......@@ -32,8 +32,7 @@
namespace cinn {
namespace poly {
using namespace cinn::ir;
using namespace cinn::lang;
using namespace cinn::ir; // NOLINT
TEST(TransIdentityExtentToContextId, basic) {
isl_ctx* ctx = isl_ctx_alloc();
......@@ -72,7 +71,7 @@ TEST(AstGen_Build, not_delete_length1_loop) {
}
LOG(INFO) << "index_length1 hint = " << index_length1[0]
<< index_length1[1] << index_length1[2] << index_length1[3];
Placeholder<float> A("A", len1_shape);
lang::Placeholder<float> A("A", len1_shape);
Tensor B = lang::Compute(
len1_shape,
[&](const std::vector<Expr>& indice) {
......
......@@ -20,7 +20,10 @@ namespace py = pybind11;
namespace cinn {
namespace pybind {
using namespace cinn::utils;
using cinn::utils::EventType;
using cinn::utils::HostEvent;
using cinn::utils::HostEventRecorder;
using cinn::utils::ProfilerHelper;
void BindUtils(py::module *m) {
py::enum_<EventType>(*m, "EventType")
......
......@@ -52,7 +52,7 @@ void saxpy(float a, float *x, float *y, float *out, size_t n)
TEST(CUDAModule, float16) {
using common::float16;
using namespace runtime::cuda::util;
using runtime::cuda::util::Vector;
auto generate_ptx = [] {
backends::nvrtc::Compiler compiler;
......@@ -121,7 +121,7 @@ TEST(CUDAModule, float16) {
TEST(CUDAModule, bfloat16) {
using common::bfloat16;
using namespace runtime::cuda::util;
using runtime::cuda::util::Vector;
auto generate_ptx = [] {
backends::nvrtc::Compiler compiler;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册