diff --git a/paddle/fluid/lite/core/kernel.h b/paddle/fluid/lite/core/kernel.h index 4eed43f4cf16edddfd4d7b58db942afe2774034f..0dee3968f6e7ec8fb9f93636c4541c2c69c541a5 100644 --- a/paddle/fluid/lite/core/kernel.h +++ b/paddle/fluid/lite/core/kernel.h @@ -104,12 +104,16 @@ class KernelBase { mutable operators::param_t param_; // The corresponding op type. std::string op_type_{}; + // The extra identity to help defficiate a specific kernel, op_type_ + alias_ + // is the unique ID for the kernel. std::string alias_{}; }; // Light-weight kernel implementation. // The OpKernel is designed to implement the specific algorithm on a target // device. +// TODO(Superjomn) Consider to add a Platform type to differentiate CUDNN, +// MKLDNN, plain CUDA C implementations. template class OpKernel : public KernelBase { diff --git a/paddle/fluid/lite/core/mir/argument_type_display_pass.cc b/paddle/fluid/lite/core/mir/argument_type_display_pass.cc index 77c3b65ac2b2e025d938dba4902058cf1be0ae35..e4db847d127152a6ee74988b2224d1237c50789e 100644 --- a/paddle/fluid/lite/core/mir/argument_type_display_pass.cc +++ b/paddle/fluid/lite/core/mir/argument_type_display_pass.cc @@ -24,13 +24,13 @@ class ArgumentTypeDisplayPass : public DebugPass { void Apply(std::unique_ptr& graph) override { LOG(INFO) << "== Argument types =="; for (auto& node : graph->mutable_nodes()) { - if (!node.IsArgument()) continue; + if (!node.IsArg()) continue; - auto* type = node.AsArgument().type; + auto* type = node.AsArg().type; if (type) { - LOG(INFO) << "* ARG " << node.AsArgument().name << " type: " << *type; + LOG(INFO) << "* ARG " << node.AsArg().name << " type: " << *type; } else { - LOG(INFO) << "* ARG " << node.AsArgument().name << " type: UNK"; + LOG(INFO) << "* ARG " << node.AsArg().name << " type: UNK"; } } LOG(INFO) << "---------------------"; diff --git a/paddle/fluid/lite/core/mir/generate_program_pass.cc b/paddle/fluid/lite/core/mir/generate_program_pass.cc index 0b2d806264e735739475845ec98ceaadce4934d6..62ca701b073112c1fc8220e201db1d80fc89eafe 100644 --- a/paddle/fluid/lite/core/mir/generate_program_pass.cc +++ b/paddle/fluid/lite/core/mir/generate_program_pass.cc @@ -23,11 +23,10 @@ namespace mir { void GenerateProgramPass::Apply(std::unique_ptr& graph) { LOG(INFO) << "final program \n" << Visualize(graph.get()); for (auto& item : graph->InstructTopologicalOrder()) { - if (item->IsInstruct()) { - auto& instruct = item->AsInstruct(); - LOG(INFO) << instruct; - insts_.emplace_back(instruct.op, - std::move(instruct.valid_kernels.front())); + if (item->IsStmt()) { + auto& stmt = item->AsStmt(); + LOG(INFO) << stmt; + insts_.emplace_back(stmt.op, std::move(stmt.valid_kernels.front())); } } } diff --git a/paddle/fluid/lite/core/mir/graph_visualize_pass.cc b/paddle/fluid/lite/core/mir/graph_visualize_pass.cc index 99c53a310d110009abf0d4b38dfadcd4736ef34b..440dfd785bef505dc586c65d4c0a258e13ef2443 100644 --- a/paddle/fluid/lite/core/mir/graph_visualize_pass.cc +++ b/paddle/fluid/lite/core/mir/graph_visualize_pass.cc @@ -34,16 +34,16 @@ std::string Visualize(mir::SSAGraph* graph) { for (auto& node : graph->mutable_nodes()) { std::string key; - if (node.IsArgument()) { - key = node.AsArgument().name; + if (node.IsArg()) { + key = node.AsArg().name; } else { - key = node.AsInstruct().op_type + std::to_string(id++); + key = node.AsStmt().op_type + std::to_string(id++); } - if (node.IsInstruct()) { + if (node.IsStmt()) { dot.AddNode(key, {Dot::Attr("shape", "box")}); for (auto& x : node.inlinks) { - auto name = x->AsArgument().name; + auto name = x->AsArg().name; if (!exists_args.count(name)) { dot.AddNode(name, {}); } @@ -51,7 +51,7 @@ std::string Visualize(mir::SSAGraph* graph) { exists_args.insert(name); } for (auto& x : node.outlinks) { - auto name = x->AsArgument().name; + auto name = x->AsArg().name; if (!exists_args.count(name)) { dot.AddNode(name, {}); } diff --git a/paddle/fluid/lite/core/mir/io_copy_kernel_pick_pass.cc b/paddle/fluid/lite/core/mir/io_copy_kernel_pick_pass.cc index 78f0a9b02b965f5649091ff1ae4d78b20a538df8..1978f50db1a814e7b790e8388421d369b2d8f9a9 100644 --- a/paddle/fluid/lite/core/mir/io_copy_kernel_pick_pass.cc +++ b/paddle/fluid/lite/core/mir/io_copy_kernel_pick_pass.cc @@ -19,20 +19,20 @@ namespace paddle { namespace lite { namespace mir { -class IoCopyKernelPickPass : public InstructionPass { +class IoCopyKernelPickPass : public StmtPass { public: void Apply(std::unique_ptr& graph) override { for (auto& node : graph->mutable_nodes()) { - if (!node.IsInstruct()) continue; - auto& inst = node.AsInstruct(); + if (!node.IsStmt()) continue; + auto& inst = node.AsStmt(); if (inst.op_type != "io_copy") continue; LOG(INFO) << "....> picking a IO COPY kernel"; - auto& kernels = node.AsInstruct().valid_kernels; + auto& kernels = node.AsStmt().valid_kernels; CHECK(!kernels.empty()) << "No valid kernels found for IoCopy Op"; - const auto* inty = node.inlinks.front()->AsArgument().type; - const auto* outy = node.outlinks.front()->AsArgument().type; + const auto* inty = node.inlinks.front()->AsArg().type; + const auto* outy = node.outlinks.front()->AsArg().type; LOG(INFO) << "input type " << *inty; LOG(INFO) << "output type " << *outy; diff --git a/paddle/fluid/lite/core/mir/node.h b/paddle/fluid/lite/core/mir/node.h index daa6f5555a8dfd90d12a4365dd2ff188861a1619..91b39e1ac86c2698d02d119acbaae929a941efc0 100644 --- a/paddle/fluid/lite/core/mir/node.h +++ b/paddle/fluid/lite/core/mir/node.h @@ -34,15 +34,15 @@ class Node { Node() = default; enum class Role { - kArgument = 0, - kInstruct, + kArg = 0, + kStmt, kNumRoles, /*should be last*/ kUnk, }; - struct Instruct { + struct Stmt { std::string op_type; - // The kernel instances this Instruct contains. + // The kernel instances this Statement contains. std::vector> valid_kernels; // TODO(Superjomn) make this a shared_ptr for resource safety. std::shared_ptr op; // we hold op to run InferShape @@ -62,13 +62,13 @@ class Node { return *valid_kernels.front(); } - friend std::ostream& operator<<(std::ostream& os, const Instruct& other) { - os << "Instruct " << other.op_type << " " << other.place(); + friend std::ostream& operator<<(std::ostream& os, const Stmt& other) { + os << "Statement " << other.op_type << " " << other.place(); return os; } }; - struct Argument { + struct Arg { std::string name; const Type* type{}; // Weight is a special kind of argument, it is marked as weight explicitly @@ -76,16 +76,16 @@ class Node { bool is_weight{false}; }; - Argument& AsArgument(const std::string& name) { - auto& x = AsArgument(); + Arg& AsArg(const std::string& name) { + auto& x = AsArg(); x.name = name; return x; } - Instruct& AsInstruct(const std::string& op_type, - std::vector>&& kernels, - const std::shared_ptr& op) { - auto& x = AsInstruct(); + Stmt& AsStmt(const std::string& op_type, + std::vector>&& kernels, + const std::shared_ptr& op) { + auto& x = AsStmt(); x.op_type = op_type; x.op = op; x.valid_kernels = std::move(kernels); @@ -93,23 +93,23 @@ class Node { } // Set roles. - Argument& AsArgument() { + Arg& AsArg() { if (role_ != Role::kUnk) { - CHECK(role_ == Role::kArgument); - return *argument_; + CHECK(role_ == Role::kArg); + return *arg_; } - role_ = Role::kArgument; - argument_.reset(new Argument); - return *argument_; + role_ = Role::kArg; + arg_.reset(new Arg); + return *arg_; } - Instruct& AsInstruct() { + Stmt& AsStmt() { if (role_ != Role::kUnk) { - CHECK(role_ == Role::kInstruct); - return *instruct_; + CHECK(role_ == Role::kStmt); + return *stmt_; } - role_ = Role::kInstruct; - instruct_.reset(new Instruct); - return *instruct_; + role_ = Role::kStmt; + stmt_.reset(new Stmt); + return *stmt_; } friend std::ostream& operator<<(std::ostream& os, Node& other) { @@ -117,26 +117,26 @@ class Node { if (!other.IsRoleSet()) { os << "Unk role node"; } - if (other.IsArgument()) { - auto& arg = other.AsArgument(); + if (other.IsArg()) { + auto& arg = other.AsArg(); os << "Argument " << arg.name; } - if (other.IsInstruct()) { - auto& arg = other.AsInstruct(); - os << "Instruct " << arg.op_type; + if (other.IsStmt()) { + auto& arg = other.AsStmt(); + os << "Statement " << arg.op_type; } return os; } // Check roles. bool IsRoleSet() const { return role_ != Role::kUnk; } - bool IsInstruct() const { return role_ == Role::kInstruct; } - bool IsArgument() const { return role_ == Role::kArgument; } + bool IsStmt() const { return role_ == Role::kStmt; } + bool IsArg() const { return role_ == Role::kArg; } private: - // Either instruct_ or argument_ is used. - std::unique_ptr instruct_; - std::unique_ptr argument_; + // Either stmt_ or argument_ is used. + std::unique_ptr stmt_; + std::unique_ptr arg_; Role role_{Role::kUnk}; }; diff --git a/paddle/fluid/lite/core/mir/pass.h b/paddle/fluid/lite/core/mir/pass.h index 36c70e24323a03519d29212b819cc9e223e2fa57..0f14f6defb68bdc18cc111794598daa741f4a27a 100644 --- a/paddle/fluid/lite/core/mir/pass.h +++ b/paddle/fluid/lite/core/mir/pass.h @@ -26,8 +26,8 @@ class Pass { enum class Kind { // Will modify the program/graph topology. kProgramWise = 0, - // Will modify the instruction, with the graph topology fixed. - kInstructionWise, + // Will modify the statement, with the graph topology fixed. + kStmtWise, // Will not modify the IR, just collect information or visualization. kDebug, }; @@ -45,7 +45,7 @@ class Pass { Kind kind() const { return kind_; } bool is_debug_pass() const { return kind_ == Kind::kDebug; } bool is_program_pass() const { return kind_ == Kind::kProgramWise; } - bool is_instruction_pass() const { return kind_ == Kind::kInstructionWise; } + bool is_stmt_pass() const { return kind_ == Kind::kStmtWise; } virtual ~Pass() = default; @@ -61,9 +61,9 @@ class ProgramPass : public Pass { ProgramPass() : Pass(Kind::kProgramWise) {} }; -class InstructionPass : public Pass { +class StmtPass : public Pass { public: - InstructionPass() : Pass(Kind::kInstructionWise) {} + StmtPass() : Pass(Kind::kStmtWise) {} }; class DebugPass : public Pass { diff --git a/paddle/fluid/lite/core/mir/runtime_context_assign_pass.cc b/paddle/fluid/lite/core/mir/runtime_context_assign_pass.cc index e3157f1f6ebdd1bdde80d3d91f756226e2072ae7..48149ec1a5b29cc89864c34717635bd07263eb76 100644 --- a/paddle/fluid/lite/core/mir/runtime_context_assign_pass.cc +++ b/paddle/fluid/lite/core/mir/runtime_context_assign_pass.cc @@ -19,7 +19,7 @@ namespace paddle { namespace lite { namespace mir { -class RuntimeContextAssignPass : public InstructionPass { +class RuntimeContextAssignPass : public StmtPass { public: RuntimeContextAssignPass() { #ifdef LITE_WITH_CUDA @@ -29,9 +29,9 @@ class RuntimeContextAssignPass : public InstructionPass { void Apply(std::unique_ptr& graph) override { for (auto& node : graph->mutable_nodes()) { - if (!node.IsInstruct()) continue; + if (!node.IsStmt()) continue; - auto& inst = node.AsInstruct(); + auto& inst = node.AsStmt(); switch (inst.picked_kernel().target()) { case TARGET(kHost): diff --git a/paddle/fluid/lite/core/mir/ssa_graph.cc b/paddle/fluid/lite/core/mir/ssa_graph.cc index b808d44219f8d7c7e02a940bb1cbcc2ba2326dbd..e3a1a0ed4da45200165d88e42861656a59a36e8f 100644 --- a/paddle/fluid/lite/core/mir/ssa_graph.cc +++ b/paddle/fluid/lite/core/mir/ssa_graph.cc @@ -37,14 +37,14 @@ std::map> SSAGraph::BuildOperationAdjList() { std::map> adj_list; for (auto &n : mutable_nodes()) { - if (!n.IsInstruct()) continue; + if (!n.IsStmt()) continue; if (adj_list.find(&n) == adj_list.end()) { adj_list[&n] = std::set(); } std::vector nodes; for (auto &var : n.inlinks) { for (auto &adj_n : var->inlinks) { - PADDLE_ENFORCE(adj_n->IsInstruct()); + PADDLE_ENFORCE(adj_n->IsStmt()); nodes.push_back(adj_n); } } @@ -96,7 +96,7 @@ void SSAGraph::GraphCreateTmpVarNodes(const Program &program) { VLOG(5) << "create arg node " << name; node_storage_.emplace_back(); auto &new_node = node_storage_.back(); - new_node.AsArgument(name); + new_node.AsArg(name); arguments_[name] = &new_node; } } @@ -109,7 +109,7 @@ void SSAGraph::GraphCreateWeightVarNodes(const Program &program) { VLOG(5) << "create arg node " << name; node_storage_.emplace_back(); auto &new_node = node_storage_.back(); - new_node.AsArgument(name); + new_node.AsArg(name); arguments_[name] = &new_node; } } @@ -122,7 +122,7 @@ Node *SSAGraph::GraphCreateInstructNode( op->SetValidPlaces(valid_places); auto &new_node = node_storage_.back(); auto kernels = op->CreateKernels(valid_places); - node_storage_.back().AsInstruct(op->op_type_, std::move(kernels), op); + node_storage_.back().AsStmt(op->op_type_, std::move(kernels), op); CHECK(new_node.inlinks.empty()) << "duplicate Build found"; CHECK(new_node.outlinks.empty()) << "duplicate Build found"; @@ -202,14 +202,14 @@ bool SSAGraph::CheckNodesRoleSet() { bool SSAGraph::CheckLinksRoleSet() { for (auto &node : mutable_nodes()) { CHECK_OR_FALSE(node.IsRoleSet()); - if (!node.IsInstruct()) continue; + if (!node.IsStmt()) continue; for (auto *x : node.inlinks) { CHECK_OR_FALSE(x->IsRoleSet()); - CHECK_OR_FALSE(x->IsArgument()); + CHECK_OR_FALSE(x->IsArg()); } for (auto *x : node.outlinks) { CHECK_OR_FALSE(x->IsRoleSet()); - CHECK_OR_FALSE(x->IsArgument()); + CHECK_OR_FALSE(x->IsArg()); } } return true; @@ -219,7 +219,7 @@ Node *SSAGraph::NewArgumentNode(const std::string &name) { node_storage_.emplace_back(); CHECK(!arguments_.count(name)) << "duplicate argument called " << name; arguments_[name] = &node_storage_.back(); - node_storage_.back().AsArgument(name); + node_storage_.back().AsArg(name); return &node_storage_.back(); } diff --git a/paddle/fluid/lite/core/mir/ssa_graph.h b/paddle/fluid/lite/core/mir/ssa_graph.h index 011aed2cd9160baeeddb8ac5f592e9b317b395b9..6f860773b451e9b9b9acebb622ee576559de9b6e 100644 --- a/paddle/fluid/lite/core/mir/ssa_graph.h +++ b/paddle/fluid/lite/core/mir/ssa_graph.h @@ -76,7 +76,7 @@ class SSAGraph : GraphBase { void MarkArgumentWeights(const Program &program) { for (const auto &name : program.weights) { - arguments_[name]->AsArgument().is_weight = true; + arguments_[name]->AsArg().is_weight = true; } } @@ -115,9 +115,9 @@ static void DirectedLink(Node *a, Node *b) { static void LocalInferenceType(Node *a, Node *b, const std::string &arg_name) { // instr -> output argument - if (a->IsInstruct() && b->IsArgument()) { - auto &inst = a->AsInstruct(); - auto &output = b->AsArgument(); + if (a->IsStmt() && b->IsArg()) { + auto &inst = a->AsStmt(); + auto &output = b->AsArg(); if (!output.type) { output.type = inst.picked_kernel().GetOutputDeclType(arg_name); @@ -125,9 +125,9 @@ static void LocalInferenceType(Node *a, Node *b, const std::string &arg_name) { } // input argument -> instr - if (a->IsArgument() && b->IsInstruct()) { - auto &input = a->AsArgument(); - auto &inst = b->AsInstruct(); + if (a->IsArg() && b->IsStmt()) { + auto &input = a->AsArg(); + auto &inst = b->AsStmt(); if (!input.type) { input.type = inst.picked_kernel().GetInputDeclType(arg_name); } diff --git a/paddle/fluid/lite/core/mir/static_kernel_pick_pass.cc b/paddle/fluid/lite/core/mir/static_kernel_pick_pass.cc index 2c954be96444d4f675d8b019d59c8925d3b0c1c1..4dfc15e3e195beb90a877aa78e4016b6cc4ac5f1 100644 --- a/paddle/fluid/lite/core/mir/static_kernel_pick_pass.cc +++ b/paddle/fluid/lite/core/mir/static_kernel_pick_pass.cc @@ -33,8 +33,8 @@ void StaticKernelPickPass::Apply(std::unique_ptr& graph) { CHECK(graph) << "graph not valid"; // sort kernels by the factors. for (auto& node : graph->mutable_nodes()) { - if (!node.IsInstruct()) continue; - auto& instruct = node.AsInstruct(); + if (!node.IsStmt()) continue; + auto& instruct = node.AsStmt(); std::vector>> scored; for (auto&& kernel : instruct.valid_kernels) { size_t score = KernelGrade(*kernel); diff --git a/paddle/fluid/lite/core/mir/static_kernel_pick_pass.h b/paddle/fluid/lite/core/mir/static_kernel_pick_pass.h index 86b53ce2896fedf127ac822766807d5e985b9526..5c48ba9ca02823859c61c168764de216544f3473 100644 --- a/paddle/fluid/lite/core/mir/static_kernel_pick_pass.h +++ b/paddle/fluid/lite/core/mir/static_kernel_pick_pass.h @@ -33,7 +33,7 @@ namespace mir { * - kernel_pick_factors, the factors to consider in picking kernels. * Set them first before execute the pass. */ -class StaticKernelPickPass : public mir::InstructionPass { +class StaticKernelPickPass : public mir::StmtPass { public: void Apply(std::unique_ptr& graph) override; diff --git a/paddle/fluid/lite/core/mir/type_target_transform_pass.cc b/paddle/fluid/lite/core/mir/type_target_transform_pass.cc index 34762cf40c52b1a69ea00036f9120da4224b1513..9ac80b552e7b2d4986dcaa1bd64541718454c766 100644 --- a/paddle/fluid/lite/core/mir/type_target_transform_pass.cc +++ b/paddle/fluid/lite/core/mir/type_target_transform_pass.cc @@ -33,7 +33,7 @@ void TypeTargetTransformPass::Apply(std::unique_ptr& graph) { CHECK(!valid_places_.empty()); for (auto& node : nodes) { - if (!node->IsInstruct()) continue; + if (!node->IsStmt()) continue; auto inlinks = node->inlinks; for (auto* in : inlinks) { ComplementInputs(graph.get(), node, in); @@ -49,22 +49,22 @@ void TypeTargetTransformPass::ComplementInputs(SSAGraph* graph, Node* inst_node, std::find(inst_node->inlinks.begin(), inst_node->inlinks.end(), in)) return; - CHECK(inst_node->IsInstruct()); - auto& inst = inst_node->AsInstruct(); + CHECK(inst_node->IsStmt()); + auto& inst = inst_node->AsStmt(); CHECK(in->IsRoleSet()); - CHECK(in->IsArgument()); - auto in_arg_name = in->AsArgument().name; + CHECK(in->IsArg()); + auto in_arg_name = in->AsArg().name; std::string tmp; CHECK(inst.op_info()->GetInputArgname(in_arg_name, &tmp)); auto decl_arg_type = inst.picked_kernel().GetInputDeclType(tmp); - CHECK(in->AsArgument().type); - if (!TargetCompatibleTo(*in->AsArgument().type, *decl_arg_type)) { - LOG(INFO) << "found Target unmatched tensor: " << in->AsArgument().name + CHECK(in->AsArg().type); + if (!TargetCompatibleTo(*in->AsArg().type, *decl_arg_type)) { + LOG(INFO) << "found Target unmatched tensor: " << in->AsArg().name << " for kernel " << inst.op->DebugString() << " " - << *in->AsArgument().type << " -> " << *decl_arg_type; + << *in->AsArg().type << " -> " << *decl_arg_type; // Add an IoCopy instruction to make the input compatible with other dist. - AddIoCopyInst(*in->AsArgument().type, *decl_arg_type, in->AsArgument().name, - graph, inst_node, valid_places_); + AddIoCopyInst(*in->AsArg().type, *decl_arg_type, in->AsArg().name, graph, + inst_node, valid_places_); } } @@ -73,7 +73,7 @@ void TypeTargetTransformPass::AddIoCopyInst( Node* inst_node, const std::vector& valid_places) { CHECK(!valid_places.empty()) << "valid_place should be set"; // var -> new_transform_op -> new_var -> inst - // So there will be a new Argument node and a new IoCopy Instruct Node. + // So there will be a new Argument node and a new IoCopy Statement Node. auto node_id = [&] { return graph->nodes().size(); }; auto io_copy_output_name = var + "/trans/" + std::to_string(node_id()); @@ -85,7 +85,7 @@ void TypeTargetTransformPass::AddIoCopyInst( CHECK(io_copy_op) << "create op [" << io_copy_op << "] failed"; // CHECK(io_copy_op); // Create the new var manually. - inst_node->AsInstruct().op->scope()->Var(io_copy_output_name); + inst_node->AsStmt().op->scope()->Var(io_copy_output_name); // Create IoCopy Instruction. lite::OpDesc op_desc; @@ -93,16 +93,16 @@ void TypeTargetTransformPass::AddIoCopyInst( op_desc.SetInput("Input", {var}); op_desc.SetOutput("Out", {io_copy_output_name}); - io_copy_op->Attach(op_desc, inst_node->AsInstruct().op->scope()); + io_copy_op->Attach(op_desc, inst_node->AsStmt().op->scope()); auto kernels = io_copy_op->CreateKernels(valid_places); - io_copy_inst->AsInstruct("io_copy", std::move(kernels), io_copy_op); + io_copy_inst->AsStmt("io_copy", std::move(kernels), io_copy_op); // Remove the old link RemoveDirectedLink(graph->Argument(var), inst_node); // Update the original instruction OpDesc. // Update its input to the io_copy_output_name - auto& inst = inst_node->AsInstruct(); + auto& inst = inst_node->AsStmt(); auto inst_program_desc = inst.op_info()->desc(); // Add new link, var -> new_inst, new_inst->newarg, newarg->inst @@ -111,20 +111,19 @@ void TypeTargetTransformPass::AddIoCopyInst( DirectedLink(io_copy_output_arg, inst_node); // reset opdesc and update kernel information - auto desc_dummy = inst_node->AsInstruct().op->op_info()->desc(); + auto desc_dummy = inst_node->AsStmt().op->op_info()->desc(); UpdateInputTo(&desc_dummy, var, io_copy_output_name); lite::OpDesc desc_fake(desc_dummy); - inst_node->AsInstruct().op->Attach(desc_fake, - inst_node->AsInstruct().op->scope()); + inst_node->AsStmt().op->Attach(desc_fake, inst_node->AsStmt().op->scope()); std::string tmp; - if (inst_node->AsInstruct().op_info()->GetInputArgname("a", &tmp)) { + if (inst_node->AsStmt().op_info()->GetInputArgname("a", &tmp)) { CHECK(false) << "get old a " << tmp; } - for (auto& kernel : inst_node->AsInstruct().valid_kernels) { - inst_node->AsInstruct().op->AttachKernel(kernel.get()); + for (auto& kernel : inst_node->AsStmt().valid_kernels) { + inst_node->AsStmt().op->AttachKernel(kernel.get()); } graph->CheckValid(); diff --git a/paddle/fluid/lite/core/mir/variable_place_inference_pass.h b/paddle/fluid/lite/core/mir/variable_place_inference_pass.h index daa5a5bb6af003de2e807062e6b3b14f253051be..ae5621501daede0c4a2a812ff4ecbdbfb5dbc371 100644 --- a/paddle/fluid/lite/core/mir/variable_place_inference_pass.h +++ b/paddle/fluid/lite/core/mir/variable_place_inference_pass.h @@ -34,8 +34,8 @@ class VariablePlaceInferencePass : public DebugPass { CHECK(!graph->inputs().empty()) << "graph's inputs should be set"; for (const auto& v : graph->inputs()) { // the feed op might in the inputs - if (v->IsInstruct()) { - LOG(INFO) << "found kernel in inputs " << v->AsInstruct().op_type; + if (v->IsStmt()) { + LOG(INFO) << "found kernel in inputs " << v->AsStmt().op_type; continue; } @@ -49,9 +49,9 @@ class VariablePlaceInferencePass : public DebugPass { void CheckAllArgumentTypeDetermined(SSAGraph* graph) { for (auto& node : graph->mutable_nodes()) { - if (node.IsArgument()) { - CHECK(node.AsArgument().type) << "node " << node.AsArgument().name - << " type not determined, " << &node; + if (node.IsArg()) { + CHECK(node.AsArg().type) << "node " << node.AsArg().name + << " type not determined, " << &node; } } } @@ -59,7 +59,7 @@ class VariablePlaceInferencePass : public DebugPass { void InferenceArgumentPlace(SSAGraph* graph) { VLOG(3) << "param-type-registry:\n" << ParamTypeRegistry::Global(); for (auto& x : graph->InstructTopologicalOrder()) { - auto& inst = x->AsInstruct(); + auto& inst = x->AsStmt(); // The IoCopyOp is a tool operator, it won't support the type inference. if (inst.op_type == "io_copy") continue; // LOG(INFO) << "- inferencing type " << @@ -76,7 +76,7 @@ class VariablePlaceInferencePass : public DebugPass { VLOG(3) << "--- var " << arg_name; auto* node = graph->RetrieveArgument(arg_name); CHECK(node) << "argument " << arg_name << " not exists in the graph"; - auto& arg_node = node->AsArgument(); + auto& arg_node = node->AsArg(); if (!arg_node.type) { VLOG(4) << "set type " << *type << " " << node; arg_node.type = type; @@ -94,9 +94,9 @@ class VariablePlaceInferencePass : public DebugPass { VLOG(3) << "--- var " << arg_name; auto* node = graph->RetrieveArgument(arg_name); CHECK(node) << "argument " << arg_name << " not exists in the graph"; - auto& arg_node = node->AsArgument(); + auto& arg_node = node->AsArg(); if (!arg_node.type) { - node->AsArgument().type = type; + node->AsArg().type = type; VLOG(3) << "set type " << *type; } } diff --git a/paddle/fluid/lite/core/optimizer_test.cc b/paddle/fluid/lite/core/optimizer_test.cc index 8e8827484fc53db8c28247f70f4c8ac26f540c13..85ae5981758dd6daf8eb13fcacdc524370ef6494 100644 --- a/paddle/fluid/lite/core/optimizer_test.cc +++ b/paddle/fluid/lite/core/optimizer_test.cc @@ -38,7 +38,7 @@ TEST(Optimizer, test) { optimizer.Run(std::move(program), places); auto runtime_program = optimizer.GenRuntimeProgram(); - LOG(INFO) << "num instructions " << runtime_program->num_instructions(); + LOG(INFO) << "num statements " << runtime_program->num_instructions(); } } // namespace lite diff --git a/paddle/fluid/lite/core/type_system.h b/paddle/fluid/lite/core/type_system.h index f55f4a35d5db48d6b8571e07a540875738c08529..888a7bf8cc0b73517f2dec47eb441d6ac5138add 100644 --- a/paddle/fluid/lite/core/type_system.h +++ b/paddle/fluid/lite/core/type_system.h @@ -152,7 +152,7 @@ class Type : public DataTypeBase { } // Can cast to another type. This is heavily used in MIR, by determine whether - // is is possible to add a instruction to transform a type to another. + // is is possible to add a statement to transform a type to another. virtual bool TypeCastable(const Type& type) const { return id_ == type.id(); } template