未验证 提交 2e6de4ce 编写于 作者: 张春乔 提交者: GitHub

[CodeStyle][CINN] fix end of file and trailing whitespace (#54955)

* cinn 5 6

* roll back something

* roll back something

* fix codestyle of cinn

* revert symlink changes

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 865b6548
...@@ -21,36 +21,48 @@ ...@@ -21,36 +21,48 @@
namespace cinn { namespace cinn {
namespace auto_schedule { namespace auto_schedule {
std::unique_ptr<BlockSampler> BlockSampler::Make(const std::vector<ir::Expr>& all_blocks, std::unique_ptr<BlockSampler> BlockSampler::Make(
bool default_remove_policy, const std::vector<ir::Expr>& all_blocks,
const std::string& strategy, bool default_remove_policy,
utils::LinearRandomEngine::StateType rand_seed, const std::string& strategy,
const std::vector<int>& weights) { utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights) {
CHECK_GT(all_blocks.size(), 0) << "Empty block list"; CHECK_GT(all_blocks.size(), 0) << "Empty block list";
if (strategy == "traversal") { if (strategy == "traversal") {
VLOG(6) << "Init TraversalBlockSampler with block num = " << all_blocks.size(); VLOG(6) << "Init TraversalBlockSampler with block num = "
return std::make_unique<TraversalBlockSampler>(all_blocks, default_remove_policy); << all_blocks.size();
return std::make_unique<TraversalBlockSampler>(all_blocks,
default_remove_policy);
} else if (strategy == "probabilistic") { } else if (strategy == "probabilistic") {
VLOG(6) << "Init ProbabilisticBlockSampler with block num = " << all_blocks.size(); VLOG(6) << "Init ProbabilisticBlockSampler with block num = "
return std::make_unique<ProbabilisticBlockSampler>(all_blocks, default_remove_policy, rand_seed, weights); << all_blocks.size();
return std::make_unique<ProbabilisticBlockSampler>(
all_blocks, default_remove_policy, rand_seed, weights);
} }
LOG(FATAL) << "Unimplemented strategy:" << strategy; LOG(FATAL) << "Unimplemented strategy:" << strategy;
return nullptr; return nullptr;
} }
BlockSampler::BlockSampler(const std::vector<ir::Expr>& all_blocks, bool default_remove_policy) { BlockSampler::BlockSampler(const std::vector<ir::Expr>& all_blocks,
bool default_remove_policy) {
default_remove_policy_ = default_remove_policy; default_remove_policy_ = default_remove_policy;
std::transform(all_blocks.begin(), all_blocks.end(), std::back_inserter(all_blocks_), [](const ir::Expr& block_expr) { std::transform(all_blocks.begin(),
const ir::ScheduleBlockRealize* block_realize = block_expr.As<ir::ScheduleBlockRealize>(); all_blocks.end(),
const ir::ScheduleBlock* block = block_realize->schedule_block.As<ir::ScheduleBlock>(); std::back_inserter(all_blocks_),
return block->name; [](const ir::Expr& block_expr) {
}); const ir::ScheduleBlockRealize* block_realize =
block_expr.As<ir::ScheduleBlockRealize>();
const ir::ScheduleBlock* block =
block_realize->schedule_block.As<ir::ScheduleBlock>();
return block->name;
});
} }
std::string TraversalBlockSampler::NextBlock(bool remove) { std::string TraversalBlockSampler::NextBlock(bool remove) {
if (cur_idx_ < all_blocks_.size()) { if (cur_idx_ < all_blocks_.size()) {
VLOG(6) << "[TraversalBlockSampler] next block: " << all_blocks_.at(cur_idx_); VLOG(6) << "[TraversalBlockSampler] next block: "
<< all_blocks_.at(cur_idx_);
std::string block_name = all_blocks_.at(cur_idx_); std::string block_name = all_blocks_.at(cur_idx_);
if (remove) { if (remove) {
++cur_idx_; ++cur_idx_;
...@@ -62,11 +74,14 @@ std::string TraversalBlockSampler::NextBlock(bool remove) { ...@@ -62,11 +74,14 @@ std::string TraversalBlockSampler::NextBlock(bool remove) {
return ""; return "";
} }
ProbabilisticBlockSampler::ProbabilisticBlockSampler(const std::vector<ir::Expr>& all_blocks, ProbabilisticBlockSampler::ProbabilisticBlockSampler(
bool default_remove_policy, const std::vector<ir::Expr>& all_blocks,
utils::LinearRandomEngine::StateType rand_seed, bool default_remove_policy,
const std::vector<int>& weights) utils::LinearRandomEngine::StateType rand_seed,
: BlockSampler(all_blocks, default_remove_policy), weights_(weights), rand_seed_(rand_seed) { const std::vector<int>& weights)
: BlockSampler(all_blocks, default_remove_policy),
weights_(weights),
rand_seed_(rand_seed) {
if (weights.empty()) { if (weights.empty()) {
weights_.resize(all_blocks.size(), 1); weights_.resize(all_blocks.size(), 1);
} else { } else {
...@@ -79,14 +94,16 @@ std::string ProbabilisticBlockSampler::NextBlock(bool remove) { ...@@ -79,14 +94,16 @@ std::string ProbabilisticBlockSampler::NextBlock(bool remove) {
if (remains_ == 0) { if (remains_ == 0) {
return ""; return "";
} }
int block_idx = utils::SampleDiscreteFromDistribution<int>(weights_, &rand_seed_); int block_idx =
utils::SampleDiscreteFromDistribution<int>(weights_, &rand_seed_);
if (remove) { if (remove) {
weights_[block_idx] = 0; weights_[block_idx] = 0;
--remains_; --remains_;
} }
VLOG(6) << "[ProbabilisticBlockSampler] next block: " << all_blocks_.at(block_idx); VLOG(6) << "[ProbabilisticBlockSampler] next block: "
<< all_blocks_.at(block_idx);
return all_blocks_.at(block_idx); return all_blocks_.at(block_idx);
} }
} // namespace auto_schedule } // namespace auto_schedule
} // namespace cinn } // namespace cinn
\ No newline at end of file
...@@ -20,16 +20,19 @@ ...@@ -20,16 +20,19 @@
namespace cinn { namespace cinn {
namespace auto_schedule { namespace auto_schedule {
std::unique_ptr<RuleSampler> RuleSampler::Make(const std::vector<AutoGenRule*>& potential_rules, std::unique_ptr<RuleSampler> RuleSampler::Make(
bool default_remove_policy, const std::vector<AutoGenRule*>& potential_rules,
const std::string& strategy, bool default_remove_policy,
utils::LinearRandomEngine::StateType rand_seed, const std::string& strategy,
const std::vector<int>& weights) { utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights) {
CHECK_GT(potential_rules.size(), 0) << "Empty rule list"; CHECK_GT(potential_rules.size(), 0) << "Empty rule list";
if (strategy == "traversal") { if (strategy == "traversal") {
return std::make_unique<TraversalRuleSampler>(potential_rules, default_remove_policy); return std::make_unique<TraversalRuleSampler>(potential_rules,
default_remove_policy);
} else if (strategy == "probabilistic") { } else if (strategy == "probabilistic") {
return std::make_unique<ProbabilisticRuleSampler>(potential_rules, default_remove_policy, rand_seed, weights); return std::make_unique<ProbabilisticRuleSampler>(
potential_rules, default_remove_policy, rand_seed, weights);
} }
LOG(FATAL) << "Unimplemented strategy:" << strategy; LOG(FATAL) << "Unimplemented strategy:" << strategy;
...@@ -48,10 +51,11 @@ AutoGenRule* TraversalRuleSampler::NextRule(bool remove) { ...@@ -48,10 +51,11 @@ AutoGenRule* TraversalRuleSampler::NextRule(bool remove) {
return nullptr; return nullptr;
} }
ProbabilisticRuleSampler::ProbabilisticRuleSampler(const std::vector<AutoGenRule*>& potential_rules, ProbabilisticRuleSampler::ProbabilisticRuleSampler(
bool default_remove_policy, const std::vector<AutoGenRule*>& potential_rules,
utils::LinearRandomEngine::StateType rand_seed, bool default_remove_policy,
const std::vector<int>& weights) utils::LinearRandomEngine::StateType rand_seed,
const std::vector<int>& weights)
: RuleSampler(potential_rules, default_remove_policy), : RuleSampler(potential_rules, default_remove_policy),
weights_(weights), weights_(weights),
rand_seed_(utils::LinearRandomEngine::NormalizeState(rand_seed)) { rand_seed_(utils::LinearRandomEngine::NormalizeState(rand_seed)) {
...@@ -67,7 +71,8 @@ AutoGenRule* ProbabilisticRuleSampler::NextRule(bool remove) { ...@@ -67,7 +71,8 @@ AutoGenRule* ProbabilisticRuleSampler::NextRule(bool remove) {
if (remains_ == 0) { if (remains_ == 0) {
return nullptr; return nullptr;
} }
int rule_idx = utils::SampleDiscreteFromDistribution<int>(weights_, &rand_seed_); int rule_idx =
utils::SampleDiscreteFromDistribution<int>(weights_, &rand_seed_);
if (remove) { if (remove) {
weights_[rule_idx] = 0; weights_[rule_idx] = 0;
--remains_; --remains_;
...@@ -77,4 +82,4 @@ AutoGenRule* ProbabilisticRuleSampler::NextRule(bool remove) { ...@@ -77,4 +82,4 @@ AutoGenRule* ProbabilisticRuleSampler::NextRule(bool remove) {
} }
} // namespace auto_schedule } // namespace auto_schedule
} // namespace cinn } // namespace cinn
\ No newline at end of file
...@@ -20,20 +20,23 @@ namespace cinn { ...@@ -20,20 +20,23 @@ namespace cinn {
namespace frontend { namespace frontend {
namespace science_mappers { namespace science_mappers {
void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc,
const OpMapperContext& ctx) {
CHECK_EQ(op_desc.Output("Y").size(), 1UL); CHECK_EQ(op_desc.Output("Y").size(), 1UL);
auto y_name = op_desc.Output("Y").front(); auto y_name = op_desc.Output("Y").front();
auto shape = utils::ToShapeType(utils::GetAttrOrDefault<std::vector<int64_t>>(op_desc, "shape")); auto shape = utils::ToShapeType(
utils::GetAttrOrDefault<std::vector<int64_t>>(op_desc, "shape"));
auto value = utils::GetAttrOrDefault<float>(op_desc, "value", 0.0f); auto value = utils::GetAttrOrDefault<float>(op_desc, "value", 0.0f);
auto dtype_id = utils::GetAttrOrDefault<int>(op_desc, "dtype", static_cast<int>(paddle::cpp::VarDescAPI::Type::FP32)); auto dtype_id = utils::GetAttrOrDefault<int>(
op_desc, "dtype", static_cast<int>(paddle::cpp::VarDescAPI::Type::FP32));
auto dtype_pd = static_cast<paddle::cpp::VarDescAPI::Type>(dtype_id); auto dtype_pd = static_cast<paddle::cpp::VarDescAPI::Type>(dtype_id);
auto dtype_cinn = utils::CppVarType2CommonType(dtype_pd); auto dtype_cinn = utils::CppVarType2CommonType(dtype_pd);
auto dtype = common::Type2Str(dtype_cinn); auto dtype = common::Type2Str(dtype_cinn);
VLOG(4) << "fill constant (" << value << ") with shape (" << cinn::utils::Join(shape, ",") << ") and dtype [" << dtype VLOG(4) << "fill constant (" << value << ") with shape ("
<< "]"; << cinn::utils::Join(shape, ",") << ") and dtype [" << dtype << "]";
const auto& cinn_name = cinn::utils::TransValidVarName(y_name); const auto& cinn_name = cinn::utils::TransValidVarName(y_name);
CheckVarNameValid(cinn_name); CheckVarNameValid(cinn_name);
...@@ -44,19 +47,23 @@ void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperCont ...@@ -44,19 +47,23 @@ void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperCont
ctx.AddVarModelToProgram(y_name, out->id); ctx.AddVarModelToProgram(y_name, out->id);
} }
void BroadcastOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { void BroadcastOpMapper(const paddle::cpp::OpDesc& op_desc,
const OpMapperContext& ctx) {
CHECK_EQ(op_desc.Input("X").size(), 1UL); CHECK_EQ(op_desc.Input("X").size(), 1UL);
auto x_name = op_desc.Input("X").front(); auto x_name = op_desc.Input("X").front();
CHECK_EQ(op_desc.Output("Y").size(), 1UL); CHECK_EQ(op_desc.Output("Y").size(), 1UL);
auto y_name = op_desc.Output("Y").front(); auto y_name = op_desc.Output("Y").front();
CHECK(op_desc.HasAttr("shape")) << "The broadcast_p operator should has 'shape' attribute, but " << x_name CHECK(op_desc.HasAttr("shape"))
<< "'s broadcast hasn't."; << "The broadcast_p operator should has 'shape' attribute, but " << x_name
<< "'s broadcast hasn't.";
auto y_shape = utils::ToShapeType(utils::GetAttrOrDefault<std::vector<int64_t>>(op_desc, "shape")); auto y_shape = utils::ToShapeType(
auto x = ctx.GetVar(x_name); utils::GetAttrOrDefault<std::vector<int64_t>>(op_desc, "shape"));
auto x = ctx.GetVar(x_name);
VLOG(4) << "Broadcast " << x_name << " from shape (" << cinn::utils::Join(x->shape, ",") << ") to shape (" VLOG(4) << "Broadcast " << x_name << " from shape ("
<< cinn::utils::Join(x->shape, ",") << ") to shape ("
<< cinn::utils::Join(y_shape, ",") << ")."; << cinn::utils::Join(y_shape, ",") << ").";
auto out = ctx.Builder()->BroadcastTo(x, y_shape); auto out = ctx.Builder()->BroadcastTo(x, y_shape);
...@@ -70,8 +77,10 @@ void BroadcastOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext ...@@ -70,8 +77,10 @@ void BroadcastOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext
} // namespace cinn } // namespace cinn
CINN_REGISTER_HELPER(science_broadcast) { CINN_REGISTER_HELPER(science_broadcast) {
CINN_REGISTER_OP_MAPPER(fill_constant_p, cinn::frontend::science_mappers::FillConstantOpMapper) CINN_REGISTER_OP_MAPPER(fill_constant_p,
CINN_REGISTER_OP_MAPPER(broadcast_p, cinn::frontend::science_mappers::BroadcastOpMapper) cinn::frontend::science_mappers::FillConstantOpMapper)
CINN_REGISTER_OP_MAPPER(broadcast_p,
cinn::frontend::science_mappers::BroadcastOpMapper)
return true; return true;
} }
\ No newline at end of file
...@@ -19,7 +19,9 @@ ...@@ -19,7 +19,9 @@
namespace cinn { namespace cinn {
namespace frontend { namespace frontend {
int GetSize(std::vector<int>& shape) { return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()); } int GetSize(std::vector<int>& shape) {
return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
}
void RunModelTest(Program& program, void RunModelTest(Program& program,
const std::vector<Variable>&& inputs, const std::vector<Variable>&& inputs,
...@@ -28,13 +30,17 @@ void RunModelTest(Program& program, ...@@ -28,13 +30,17 @@ void RunModelTest(Program& program,
std::vector<std::vector<float>> inputs_data; std::vector<std::vector<float>> inputs_data;
for (auto input : inputs) { for (auto input : inputs) {
inputs_data.emplace_back(GetSize(input->shape)); inputs_data.emplace_back(GetSize(input->shape));
InitRandomVector<float>(&inputs_data.back(), inputs_data.back().size(), 0.0f, 1.0f, 1e-3); InitRandomVector<float>(
&inputs_data.back(), inputs_data.back().size(), 0.0f, 1.0f, 1e-3);
} }
auto target = common::DefaultTarget(); auto target = common::DefaultTarget();
std::unordered_map<std::string, std::pair<std::vector<float>, std::vector<float>>> outputs; std::unordered_map<std::string,
std::pair<std::vector<float>, std::vector<float>>>
outputs;
{ {
auto graph = std::make_shared<hlir::framework::Graph>(program, fetch_ids, target); auto graph =
std::make_shared<hlir::framework::Graph>(program, fetch_ids, target);
hlir::framework::ApplyPass(graph.get(), "OpFusionPass"); hlir::framework::ApplyPass(graph.get(), "OpFusionPass");
hlir::framework::ApplyPass(graph.get(), "FusionMergePass"); hlir::framework::ApplyPass(graph.get(), "FusionMergePass");
...@@ -45,7 +51,7 @@ void RunModelTest(Program& program, ...@@ -45,7 +51,7 @@ void RunModelTest(Program& program,
for (int idx = 0; idx < inputs.size(); ++idx) { for (int idx = 0; idx < inputs.size(); ++idx) {
scope->Var<hlir::framework::Tensor>(inputs[idx]->id); scope->Var<hlir::framework::Tensor>(inputs[idx]->id);
auto tensor = scope->GetTensor(inputs[idx]->id); auto tensor = scope->GetTensor(inputs[idx]->id);
auto* data = tensor->mutable_data<float>(target); auto* data = tensor->mutable_data<float>(target);
CopyFromVector(inputs_data[idx], tensor, target); CopyFromVector(inputs_data[idx], tensor, target);
} }
run_program->Execute(); run_program->Execute();
...@@ -53,11 +59,13 @@ void RunModelTest(Program& program, ...@@ -53,11 +59,13 @@ void RunModelTest(Program& program,
auto tensor = scope->GetTensor(id); auto tensor = scope->GetTensor(id);
std::vector<float> data(tensor->shape().numel()); std::vector<float> data(tensor->shape().numel());
CopyToVector(tensor, &data); CopyToVector(tensor, &data);
outputs[id] = std::pair<std::vector<float>, std::vector<float>>(data, std::vector<float>()); outputs[id] = std::pair<std::vector<float>, std::vector<float>>(
data, std::vector<float>());
} }
} }
{ {
auto graph = std::make_shared<hlir::framework::Graph>(program, fetch_ids, target); auto graph =
std::make_shared<hlir::framework::Graph>(program, fetch_ids, target);
hlir::framework::ApplyPass(graph.get(), "DotMerger"); hlir::framework::ApplyPass(graph.get(), "DotMerger");
hlir::framework::ApplyPass(graph.get(), "OpFusionPass"); hlir::framework::ApplyPass(graph.get(), "OpFusionPass");
hlir::framework::ApplyPass(graph.get(), "FusionMergePass"); hlir::framework::ApplyPass(graph.get(), "FusionMergePass");
...@@ -69,7 +77,7 @@ void RunModelTest(Program& program, ...@@ -69,7 +77,7 @@ void RunModelTest(Program& program,
for (int idx = 0; idx < inputs.size(); ++idx) { for (int idx = 0; idx < inputs.size(); ++idx) {
scope->Var<hlir::framework::Tensor>(inputs[idx]->id); scope->Var<hlir::framework::Tensor>(inputs[idx]->id);
auto tensor = scope->GetTensor(inputs[idx]->id); auto tensor = scope->GetTensor(inputs[idx]->id);
auto* data = tensor->mutable_data<float>(target); auto* data = tensor->mutable_data<float>(target);
CopyFromVector(inputs_data[idx], tensor, target); CopyFromVector(inputs_data[idx], tensor, target);
} }
run_program->Execute(); run_program->Execute();
...@@ -89,24 +97,24 @@ void RunModelTest(Program& program, ...@@ -89,24 +97,24 @@ void RunModelTest(Program& program,
TEST(DotMerger, Test_dot_merger0) { TEST(DotMerger, Test_dot_merger0) {
int m = 2, k = 1024, n = 100, n1 = 100, n2 = 100, axis = 1; int m = 2, k = 1024, n = 100, n1 = 100, n2 = 100, axis = 1;
NetBuilder net_builder("Test_dot_merger0"); NetBuilder net_builder("Test_dot_merger0");
auto A = net_builder.CreateInput(Float(32), {m, k}, "A"); auto A = net_builder.CreateInput(Float(32), {m, k}, "A");
auto B = net_builder.CreateInput(Float(32), {k, n1}, "B"); auto B = net_builder.CreateInput(Float(32), {k, n1}, "B");
auto C = net_builder.CreateInput(Float(32), {k, n2}, "C"); auto C = net_builder.CreateInput(Float(32), {k, n2}, "C");
auto D = net_builder.CreateInput(Float(32), {n1, k}, "D"); auto D = net_builder.CreateInput(Float(32), {n1, k}, "D");
auto E = net_builder.CreateInput(Float(32), {n2, k}, "E"); auto E = net_builder.CreateInput(Float(32), {n2, k}, "E");
auto F = net_builder.CreateInput(Float(32), {k, n}, "F"); auto F = net_builder.CreateInput(Float(32), {k, n}, "F");
auto G = net_builder.Matmul(A, B); auto G = net_builder.Matmul(A, B);
auto H = net_builder.Matmul(A, C); auto H = net_builder.Matmul(A, C);
auto G1 = net_builder.Matmul(D, F); auto G1 = net_builder.Matmul(D, F);
auto H1 = net_builder.Matmul(E, F); auto H1 = net_builder.Matmul(E, F);
auto G2 = net_builder.Concat({G, H}, axis); auto G2 = net_builder.Concat({G, H}, axis);
auto H2 = net_builder.Concat({G1, H1}, (1 - axis)); auto H2 = net_builder.Concat({G1, H1}, (1 - axis));
auto F1 = net_builder.Matmul(G2, H2); auto F1 = net_builder.Matmul(G2, H2);
auto fetch_ids = {F1->id}; auto fetch_ids = {F1->id};
auto program = net_builder.Build(); auto program = net_builder.Build();
std::cout << "RunModelTest" << std::endl; std::cout << "RunModelTest" << std::endl;
RunModelTest(program, {A, B, C, D, E, F}, fetch_ids); RunModelTest(program, {A, B, C, D, E, F}, fetch_ids);
} }
} // namespace frontend } // namespace frontend
} // namespace cinn } // namespace cinn
\ No newline at end of file
# Design of CINN/DSL # Design of CINN/DSL
This module is a simple DSL defined in CINN project. This module is a simple DSL defined in CINN project.
The DSL module aims to represent the overall computation in a hardware indenpendent way. The DSL module aims to represent the overall computation in a hardware indenpendent way.
## Concepts ## Concepts
...@@ -78,14 +78,14 @@ A matrix multiplication ...@@ -78,14 +78,14 @@ A matrix multiplication
Var i, j, k; Var i, j, k;
Placeholder<float> A({M, K}), B({K, N}); Placeholder<float> A({M, K}), B({K, N});
Tensor C = Compute({M, N}/*output shape*/, Tensor C = Compute({M, N}/*output shape*/,
[](Var i, Var j) { [](Var i, Var j) {
return ReduceSum(A(i,k) * B(k, j), k); return ReduceSum(A(i,k) * B(k, j), k);
}, "C"); }, "C");
Tensor D = Compute({M, N}, [](Var i, Var j) { Tensor D = Compute({M, N}, [](Var i, Var j) {
return Map(C(i,j) + 1); return Map(C(i,j) + 1);
}); });
Schedule s = CreateSchedule(C); Schedule s = CreateSchedule(C);
auto func = Build(s, [A, B, C], target=target, name="matmul"); auto func = Build(s, [A, B, C], target=target, name="matmul");
......
...@@ -54,7 +54,9 @@ void BindUtils(py::module *m) { ...@@ -54,7 +54,9 @@ void BindUtils(py::module *m) {
.def(py::init<const std::string &, double, EventType>()) .def(py::init<const std::string &, double, EventType>())
.def_property( .def_property(
"annotation", "annotation",
[](HostEvent &self) -> const std::string & { return self.annotation_; }, [](HostEvent &self) -> const std::string & {
return self.annotation_;
},
[](HostEvent &self, const std::string &v) { self.annotation_ = v; }) [](HostEvent &self, const std::string &v) { self.annotation_ = v; })
.def_property( .def_property(
"duration", "duration",
...@@ -67,4 +69,4 @@ void BindUtils(py::module *m) { ...@@ -67,4 +69,4 @@ void BindUtils(py::module *m) {
} }
} // namespace pybind } // namespace pybind
} // namespace cinn } // namespace cinn
\ No newline at end of file
...@@ -26,8 +26,10 @@ TEST(RecordEvent, HOST) { ...@@ -26,8 +26,10 @@ TEST(RecordEvent, HOST) {
ProfilerHelper::EnableCPU(); ProfilerHelper::EnableCPU();
LOG(INFO) << "Usage 1: RecordEvent for HOST"; LOG(INFO) << "Usage 1: RecordEvent for HOST";
std::vector<EventType> types = { std::vector<EventType> types = {EventType::kOrdinary,
EventType::kOrdinary, EventType::kCompile, EventType::kCompile, EventType::kInstruction}; EventType::kCompile,
EventType::kCompile,
EventType::kInstruction};
for (int i = 0; i < 4; ++i) { for (int i = 0; i < 4; ++i) {
std::string name = "evs_op_" + std::to_string(i); std::string name = "evs_op_" + std::to_string(i);
RecordEvent record_event(name, types[i]); RecordEvent record_event(name, types[i]);
...@@ -38,7 +40,7 @@ TEST(RecordEvent, HOST) { ...@@ -38,7 +40,7 @@ TEST(RecordEvent, HOST) {
auto &events = HostEventRecorder::GetInstance().Events(); auto &events = HostEventRecorder::GetInstance().Events();
EXPECT_EQ(events.size(), 4U); EXPECT_EQ(events.size(), 4U);
for (int i = 0; i < 4; ++i) { for (int i = 0; i < 4; ++i) {
auto &event = events[i]; auto &event = events[i];
std::string name = "evs_op_" + std::to_string(i); std::string name = "evs_op_" + std::to_string(i);
EXPECT_EQ(event.annotation_, name); EXPECT_EQ(event.annotation_, name);
EXPECT_GT(event.duration_, 0.0); EXPECT_GT(event.duration_, 0.0);
...@@ -75,4 +77,4 @@ TEST(RecordEvent, HOST) { ...@@ -75,4 +77,4 @@ TEST(RecordEvent, HOST) {
} }
} }
EXPECT_EQ(events.size(), 8U); EXPECT_EQ(events.size(), 8U);
} }
\ No newline at end of file
*.pyc *.pyc
*.html *.html
*.json *.json
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册