未验证 提交 2e6de4ce 编写于 作者: 张春乔 提交者: GitHub

[CodeStyle][CINN] fix end of file and trailing whitespace (#54955)

* cinn 5 6

* roll back something

* roll back something

* fix codestyle of cinn

* revert symlink changes

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 865b6548
......@@ -21,36 +21,48 @@
namespace cinn {
namespace auto_schedule {
std::unique_ptr<BlockSampler> BlockSampler::Make(const std::vector<ir::Expr>& all_blocks,
bool default_remove_policy,
const std::string& strategy,
utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights) {
std::unique_ptr<BlockSampler> BlockSampler::Make(
const std::vector<ir::Expr>& all_blocks,
bool default_remove_policy,
const std::string& strategy,
utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights) {
CHECK_GT(all_blocks.size(), 0) << "Empty block list";
if (strategy == "traversal") {
VLOG(6) << "Init TraversalBlockSampler with block num = " << all_blocks.size();
return std::make_unique<TraversalBlockSampler>(all_blocks, default_remove_policy);
VLOG(6) << "Init TraversalBlockSampler with block num = "
<< all_blocks.size();
return std::make_unique<TraversalBlockSampler>(all_blocks,
default_remove_policy);
} else if (strategy == "probabilistic") {
VLOG(6) << "Init ProbabilisticBlockSampler with block num = " << all_blocks.size();
return std::make_unique<ProbabilisticBlockSampler>(all_blocks, default_remove_policy, rand_seed, weights);
VLOG(6) << "Init ProbabilisticBlockSampler with block num = "
<< all_blocks.size();
return std::make_unique<ProbabilisticBlockSampler>(
all_blocks, default_remove_policy, rand_seed, weights);
}
LOG(FATAL) << "Unimplemented strategy:" << strategy;
return nullptr;
}
BlockSampler::BlockSampler(const std::vector<ir::Expr>& all_blocks, bool default_remove_policy) {
BlockSampler::BlockSampler(const std::vector<ir::Expr>& all_blocks,
bool default_remove_policy) {
default_remove_policy_ = default_remove_policy;
std::transform(all_blocks.begin(), all_blocks.end(), std::back_inserter(all_blocks_), [](const ir::Expr& block_expr) {
const ir::ScheduleBlockRealize* block_realize = block_expr.As<ir::ScheduleBlockRealize>();
const ir::ScheduleBlock* block = block_realize->schedule_block.As<ir::ScheduleBlock>();
return block->name;
});
std::transform(all_blocks.begin(),
all_blocks.end(),
std::back_inserter(all_blocks_),
[](const ir::Expr& block_expr) {
const ir::ScheduleBlockRealize* block_realize =
block_expr.As<ir::ScheduleBlockRealize>();
const ir::ScheduleBlock* block =
block_realize->schedule_block.As<ir::ScheduleBlock>();
return block->name;
});
}
std::string TraversalBlockSampler::NextBlock(bool remove) {
if (cur_idx_ < all_blocks_.size()) {
VLOG(6) << "[TraversalBlockSampler] next block: " << all_blocks_.at(cur_idx_);
VLOG(6) << "[TraversalBlockSampler] next block: "
<< all_blocks_.at(cur_idx_);
std::string block_name = all_blocks_.at(cur_idx_);
if (remove) {
++cur_idx_;
......@@ -62,11 +74,14 @@ std::string TraversalBlockSampler::NextBlock(bool remove) {
return "";
}
ProbabilisticBlockSampler::ProbabilisticBlockSampler(const std::vector<ir::Expr>& all_blocks,
bool default_remove_policy,
utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights)
: BlockSampler(all_blocks, default_remove_policy), weights_(weights), rand_seed_(rand_seed) {
ProbabilisticBlockSampler::ProbabilisticBlockSampler(
const std::vector<ir::Expr>& all_blocks,
bool default_remove_policy,
utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights)
: BlockSampler(all_blocks, default_remove_policy),
weights_(weights),
rand_seed_(rand_seed) {
if (weights.empty()) {
weights_.resize(all_blocks.size(), 1);
} else {
......@@ -79,14 +94,16 @@ std::string ProbabilisticBlockSampler::NextBlock(bool remove) {
if (remains_ == 0) {
return "";
}
int block_idx = utils::SampleDiscreteFromDistribution<int>(weights_, &rand_seed_);
int block_idx =
utils::SampleDiscreteFromDistribution<int>(weights_, &rand_seed_);
if (remove) {
weights_[block_idx] = 0;
--remains_;
}
VLOG(6) << "[ProbabilisticBlockSampler] next block: " << all_blocks_.at(block_idx);
VLOG(6) << "[ProbabilisticBlockSampler] next block: "
<< all_blocks_.at(block_idx);
return all_blocks_.at(block_idx);
}
} // namespace auto_schedule
} // namespace cinn
\ No newline at end of file
} // namespace cinn
......@@ -20,16 +20,19 @@
namespace cinn {
namespace auto_schedule {
std::unique_ptr<RuleSampler> RuleSampler::Make(const std::vector<AutoGenRule*>& potential_rules,
bool default_remove_policy,
const std::string& strategy,
utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights) {
std::unique_ptr<RuleSampler> RuleSampler::Make(
const std::vector<AutoGenRule*>& potential_rules,
bool default_remove_policy,
const std::string& strategy,
utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights) {
CHECK_GT(potential_rules.size(), 0) << "Empty rule list";
if (strategy == "traversal") {
return std::make_unique<TraversalRuleSampler>(potential_rules, default_remove_policy);
return std::make_unique<TraversalRuleSampler>(potential_rules,
default_remove_policy);
} else if (strategy == "probabilistic") {
return std::make_unique<ProbabilisticRuleSampler>(potential_rules, default_remove_policy, rand_seed, weights);
return std::make_unique<ProbabilisticRuleSampler>(
potential_rules, default_remove_policy, rand_seed, weights);
}
LOG(FATAL) << "Unimplemented strategy:" << strategy;
......@@ -48,10 +51,11 @@ AutoGenRule* TraversalRuleSampler::NextRule(bool remove) {
return nullptr;
}
ProbabilisticRuleSampler::ProbabilisticRuleSampler(const std::vector<AutoGenRule*>& potential_rules,
bool default_remove_policy,
utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights)
ProbabilisticRuleSampler::ProbabilisticRuleSampler(
const std::vector<AutoGenRule*>& potential_rules,
bool default_remove_policy,
utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights)
: RuleSampler(potential_rules, default_remove_policy),
weights_(weights),
rand_seed_(utils::LinearRandomEngine::NormalizeState(rand_seed)) {
......@@ -67,7 +71,8 @@ AutoGenRule* ProbabilisticRuleSampler::NextRule(bool remove) {
if (remains_ == 0) {
return nullptr;
}
int rule_idx = utils::SampleDiscreteFromDistribution<int>(weights_, &rand_seed_);
int rule_idx =
utils::SampleDiscreteFromDistribution<int>(weights_, &rand_seed_);
if (remove) {
weights_[rule_idx] = 0;
--remains_;
......@@ -77,4 +82,4 @@ AutoGenRule* ProbabilisticRuleSampler::NextRule(bool remove) {
}
} // namespace auto_schedule
} // namespace cinn
\ No newline at end of file
} // namespace cinn
......@@ -20,20 +20,23 @@ namespace cinn {
namespace frontend {
namespace science_mappers {
void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) {
void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc,
const OpMapperContext& ctx) {
CHECK_EQ(op_desc.Output("Y").size(), 1UL);
auto y_name = op_desc.Output("Y").front();
auto shape = utils::ToShapeType(utils::GetAttrOrDefault<std::vector<int64_t>>(op_desc, "shape"));
auto shape = utils::ToShapeType(
utils::GetAttrOrDefault<std::vector<int64_t>>(op_desc, "shape"));
auto value = utils::GetAttrOrDefault<float>(op_desc, "value", 0.0f);
auto dtype_id = utils::GetAttrOrDefault<int>(op_desc, "dtype", static_cast<int>(paddle::cpp::VarDescAPI::Type::FP32));
auto dtype_id = utils::GetAttrOrDefault<int>(
op_desc, "dtype", static_cast<int>(paddle::cpp::VarDescAPI::Type::FP32));
auto dtype_pd = static_cast<paddle::cpp::VarDescAPI::Type>(dtype_id);
auto dtype_cinn = utils::CppVarType2CommonType(dtype_pd);
auto dtype = common::Type2Str(dtype_cinn);
auto dtype = common::Type2Str(dtype_cinn);
VLOG(4) << "fill constant (" << value << ") with shape (" << cinn::utils::Join(shape, ",") << ") and dtype [" << dtype
<< "]";
VLOG(4) << "fill constant (" << value << ") with shape ("
<< cinn::utils::Join(shape, ",") << ") and dtype [" << dtype << "]";
const auto& cinn_name = cinn::utils::TransValidVarName(y_name);
CheckVarNameValid(cinn_name);
......@@ -44,19 +47,23 @@ void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperCont
ctx.AddVarModelToProgram(y_name, out->id);
}
void BroadcastOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) {
void BroadcastOpMapper(const paddle::cpp::OpDesc& op_desc,
const OpMapperContext& ctx) {
CHECK_EQ(op_desc.Input("X").size(), 1UL);
auto x_name = op_desc.Input("X").front();
CHECK_EQ(op_desc.Output("Y").size(), 1UL);
auto y_name = op_desc.Output("Y").front();
CHECK(op_desc.HasAttr("shape")) << "The broadcast_p operator should has 'shape' attribute, but " << x_name
<< "'s broadcast hasn't.";
CHECK(op_desc.HasAttr("shape"))
<< "The broadcast_p operator should has 'shape' attribute, but " << x_name
<< "'s broadcast hasn't.";
auto y_shape = utils::ToShapeType(utils::GetAttrOrDefault<std::vector<int64_t>>(op_desc, "shape"));
auto x = ctx.GetVar(x_name);
auto y_shape = utils::ToShapeType(
utils::GetAttrOrDefault<std::vector<int64_t>>(op_desc, "shape"));
auto x = ctx.GetVar(x_name);
VLOG(4) << "Broadcast " << x_name << " from shape (" << cinn::utils::Join(x->shape, ",") << ") to shape ("
VLOG(4) << "Broadcast " << x_name << " from shape ("
<< cinn::utils::Join(x->shape, ",") << ") to shape ("
<< cinn::utils::Join(y_shape, ",") << ").";
auto out = ctx.Builder()->BroadcastTo(x, y_shape);
......@@ -70,8 +77,10 @@ void BroadcastOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext
} // namespace cinn
CINN_REGISTER_HELPER(science_broadcast) {
CINN_REGISTER_OP_MAPPER(fill_constant_p, cinn::frontend::science_mappers::FillConstantOpMapper)
CINN_REGISTER_OP_MAPPER(broadcast_p, cinn::frontend::science_mappers::BroadcastOpMapper)
CINN_REGISTER_OP_MAPPER(fill_constant_p,
cinn::frontend::science_mappers::FillConstantOpMapper)
CINN_REGISTER_OP_MAPPER(broadcast_p,
cinn::frontend::science_mappers::BroadcastOpMapper)
return true;
}
\ No newline at end of file
}
......@@ -19,7 +19,9 @@
namespace cinn {
namespace frontend {
int GetSize(std::vector<int>& shape) { return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()); }
int GetSize(std::vector<int>& shape) {
return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
}
void RunModelTest(Program& program,
const std::vector<Variable>&& inputs,
......@@ -28,13 +30,17 @@ void RunModelTest(Program& program,
std::vector<std::vector<float>> inputs_data;
for (auto input : inputs) {
inputs_data.emplace_back(GetSize(input->shape));
InitRandomVector<float>(&inputs_data.back(), inputs_data.back().size(), 0.0f, 1.0f, 1e-3);
InitRandomVector<float>(
&inputs_data.back(), inputs_data.back().size(), 0.0f, 1.0f, 1e-3);
}
auto target = common::DefaultTarget();
std::unordered_map<std::string, std::pair<std::vector<float>, std::vector<float>>> outputs;
std::unordered_map<std::string,
std::pair<std::vector<float>, std::vector<float>>>
outputs;
{
auto graph = std::make_shared<hlir::framework::Graph>(program, fetch_ids, target);
auto graph =
std::make_shared<hlir::framework::Graph>(program, fetch_ids, target);
hlir::framework::ApplyPass(graph.get(), "OpFusionPass");
hlir::framework::ApplyPass(graph.get(), "FusionMergePass");
......@@ -45,7 +51,7 @@ void RunModelTest(Program& program,
for (int idx = 0; idx < inputs.size(); ++idx) {
scope->Var<hlir::framework::Tensor>(inputs[idx]->id);
auto tensor = scope->GetTensor(inputs[idx]->id);
auto* data = tensor->mutable_data<float>(target);
auto* data = tensor->mutable_data<float>(target);
CopyFromVector(inputs_data[idx], tensor, target);
}
run_program->Execute();
......@@ -53,11 +59,13 @@ void RunModelTest(Program& program,
auto tensor = scope->GetTensor(id);
std::vector<float> data(tensor->shape().numel());
CopyToVector(tensor, &data);
outputs[id] = std::pair<std::vector<float>, std::vector<float>>(data, std::vector<float>());
outputs[id] = std::pair<std::vector<float>, std::vector<float>>(
data, std::vector<float>());
}
}
{
auto graph = std::make_shared<hlir::framework::Graph>(program, fetch_ids, target);
auto graph =
std::make_shared<hlir::framework::Graph>(program, fetch_ids, target);
hlir::framework::ApplyPass(graph.get(), "DotMerger");
hlir::framework::ApplyPass(graph.get(), "OpFusionPass");
hlir::framework::ApplyPass(graph.get(), "FusionMergePass");
......@@ -69,7 +77,7 @@ void RunModelTest(Program& program,
for (int idx = 0; idx < inputs.size(); ++idx) {
scope->Var<hlir::framework::Tensor>(inputs[idx]->id);
auto tensor = scope->GetTensor(inputs[idx]->id);
auto* data = tensor->mutable_data<float>(target);
auto* data = tensor->mutable_data<float>(target);
CopyFromVector(inputs_data[idx], tensor, target);
}
run_program->Execute();
......@@ -89,24 +97,24 @@ void RunModelTest(Program& program,
TEST(DotMerger, Test_dot_merger0) {
int m = 2, k = 1024, n = 100, n1 = 100, n2 = 100, axis = 1;
NetBuilder net_builder("Test_dot_merger0");
auto A = net_builder.CreateInput(Float(32), {m, k}, "A");
auto B = net_builder.CreateInput(Float(32), {k, n1}, "B");
auto C = net_builder.CreateInput(Float(32), {k, n2}, "C");
auto D = net_builder.CreateInput(Float(32), {n1, k}, "D");
auto E = net_builder.CreateInput(Float(32), {n2, k}, "E");
auto F = net_builder.CreateInput(Float(32), {k, n}, "F");
auto G = net_builder.Matmul(A, B);
auto H = net_builder.Matmul(A, C);
auto G1 = net_builder.Matmul(D, F);
auto H1 = net_builder.Matmul(E, F);
auto G2 = net_builder.Concat({G, H}, axis);
auto H2 = net_builder.Concat({G1, H1}, (1 - axis));
auto F1 = net_builder.Matmul(G2, H2);
auto A = net_builder.CreateInput(Float(32), {m, k}, "A");
auto B = net_builder.CreateInput(Float(32), {k, n1}, "B");
auto C = net_builder.CreateInput(Float(32), {k, n2}, "C");
auto D = net_builder.CreateInput(Float(32), {n1, k}, "D");
auto E = net_builder.CreateInput(Float(32), {n2, k}, "E");
auto F = net_builder.CreateInput(Float(32), {k, n}, "F");
auto G = net_builder.Matmul(A, B);
auto H = net_builder.Matmul(A, C);
auto G1 = net_builder.Matmul(D, F);
auto H1 = net_builder.Matmul(E, F);
auto G2 = net_builder.Concat({G, H}, axis);
auto H2 = net_builder.Concat({G1, H1}, (1 - axis));
auto F1 = net_builder.Matmul(G2, H2);
auto fetch_ids = {F1->id};
auto program = net_builder.Build();
auto program = net_builder.Build();
std::cout << "RunModelTest" << std::endl;
RunModelTest(program, {A, B, C, D, E, F}, fetch_ids);
}
} // namespace frontend
} // namespace cinn
\ No newline at end of file
} // namespace cinn
# Design of CINN/DSL
This module is a simple DSL defined in CINN project.
This module is a simple DSL defined in CINN project.
The DSL module aims to represent the overall computation in a hardware indenpendent way.
## Concepts
......@@ -78,14 +78,14 @@ A matrix multiplication
Var i, j, k;
Placeholder<float> A({M, K}), B({K, N});
Tensor C = Compute({M, N}/*output shape*/,
Tensor C = Compute({M, N}/*output shape*/,
[](Var i, Var j) {
return ReduceSum(A(i,k) * B(k, j), k);
}, "C");
Tensor D = Compute({M, N}, [](Var i, Var j) {
return Map(C(i,j) + 1);
});
Schedule s = CreateSchedule(C);
auto func = Build(s, [A, B, C], target=target, name="matmul");
......
......@@ -54,7 +54,9 @@ void BindUtils(py::module *m) {
.def(py::init<const std::string &, double, EventType>())
.def_property(
"annotation",
[](HostEvent &self) -> const std::string & { return self.annotation_; },
[](HostEvent &self) -> const std::string & {
return self.annotation_;
},
[](HostEvent &self, const std::string &v) { self.annotation_ = v; })
.def_property(
"duration",
......@@ -67,4 +69,4 @@ void BindUtils(py::module *m) {
}
} // namespace pybind
} // namespace cinn
\ No newline at end of file
} // namespace cinn
......@@ -26,8 +26,10 @@ TEST(RecordEvent, HOST) {
ProfilerHelper::EnableCPU();
LOG(INFO) << "Usage 1: RecordEvent for HOST";
std::vector<EventType> types = {
EventType::kOrdinary, EventType::kCompile, EventType::kCompile, EventType::kInstruction};
std::vector<EventType> types = {EventType::kOrdinary,
EventType::kCompile,
EventType::kCompile,
EventType::kInstruction};
for (int i = 0; i < 4; ++i) {
std::string name = "evs_op_" + std::to_string(i);
RecordEvent record_event(name, types[i]);
......@@ -38,7 +40,7 @@ TEST(RecordEvent, HOST) {
auto &events = HostEventRecorder::GetInstance().Events();
EXPECT_EQ(events.size(), 4U);
for (int i = 0; i < 4; ++i) {
auto &event = events[i];
auto &event = events[i];
std::string name = "evs_op_" + std::to_string(i);
EXPECT_EQ(event.annotation_, name);
EXPECT_GT(event.duration_, 0.0);
......@@ -75,4 +77,4 @@ TEST(RecordEvent, HOST) {
}
}
EXPECT_EQ(events.size(), 8U);
}
\ No newline at end of file
}
*.pyc
*.html
*.json
\ No newline at end of file
*.json
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册