From 0f611f1829e3e7680b00b11534a52aa4eb8317ab Mon Sep 17 00:00:00 2001 From: kangguangli Date: Wed, 16 Aug 2023 19:53:23 +0800 Subject: [PATCH] [NewIR] Insert `get_parameter` only for paramters (#56325) * fix inset get_parameter op bug * fix bug: insert only for parameters * fix bug: wrong idx in vector --------- Co-authored-by: zhangbo9674 --- paddle/fluid/framework/executor_cache.cc | 2 +- .../new_executor/new_ir_interpreter.cc | 7 +- .../ir_adaptor/translator/op_translator.cc | 84 +++++++------------ .../translator/program_translator.cc | 6 +- paddle/fluid/ir_adaptor/translator/utils.cc | 11 +++ paddle/fluid/ir_adaptor/translator/utils.h | 3 + python/paddle/static/input.py | 21 ++++- test/ir/new_ir/test_ir_vjp.py | 2 +- test/prim/new_ir_prim/test_decomp_op.py | 2 +- 9 files changed, 72 insertions(+), 66 deletions(-) diff --git a/paddle/fluid/framework/executor_cache.cc b/paddle/fluid/framework/executor_cache.cc index eaa4c3ec0e5..76b16e9f457 100644 --- a/paddle/fluid/framework/executor_cache.cc +++ b/paddle/fluid/framework/executor_cache.cc @@ -367,7 +367,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram( } } - // add fetch with place op to program + // add data op to program auto *block = local_program.MutableBlock(0); for (auto &in_t : x) { auto name = in_t.name(); diff --git a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc index c3c3c7927fb..cf677348aec 100644 --- a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc +++ b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc @@ -478,9 +478,10 @@ std::string NewIRInterpreter::DebugValueInfo() { platform::errors::PreconditionNotMet( "var(%s) should exist in var_name_2_id_", kv.second)); auto* var = InnerScope()->FindVar(kv.second); - PADDLE_ENFORCE(var != nullptr, - platform::errors::PreconditionNotMet( - "var(%s) should exist in var_name_2_id_", kv.second)); + PADDLE_ENFORCE( + var != nullptr, + platform::errors::PreconditionNotMet( + "var(%s) should exist in scope (%p)", kv.second, InnerScope())); os << kv.first.impl() << " -> " << kv.second << " -> " << var_name_2_id_.at(kv.second) << " -> " << var << "\n"; } diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 1b76aae2464..924c9e953f2 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -49,7 +49,9 @@ namespace translator { namespace { -using ResultIdx = size_t; +using IdxInOp = size_t; +using IdxInVector = size_t; +using ResultIdx = std::tuple; using OpDesc = paddle::framework::OpDesc; using BlockDesc = paddle::framework::BlockDesc; using VarDesc = paddle::framework::VarDesc; @@ -325,18 +327,15 @@ void OpTranscriber::InsertSliceOperationForInput( // scan all inputs to see if any of them is generated as a vector // so need an additional `SliceOp` to take it out. for (const auto& n : op_desc.Inputs()) { - auto& name = n.first; auto& args = n.second; for (const auto& arg_name : args) { bool check = - param_map->count(arg_name) != 0 || !yaml_input_set.count(arg_name); - IR_ENFORCE(check, - "arg %s.%s as input should be exists before prasing %s", - name, - arg_name, - op_desc.Type()); - auto defining_info = (*param_map)[arg_name]; + param_map->count(arg_name) != 0 && !yaml_input_set.count(arg_name); + if (!check) { + continue; + } + auto defining_info = param_map->at(arg_name); if (defining_info.generated_by_vector) { InsertSliceOperationForTarget( ctx, param_map, program, defining_info, arg_name); @@ -391,7 +390,8 @@ std::vector OpTranscriber::GenerateOperationInput( } } VLOG(10) << "[op:" << op_desc.Type() << "][input]" << info.name << " " - << legacy_input_name << " " << legacy_input_vars.size(); + << legacy_input_name << " " << legacy_input_vars.size() << "[" + << legacy_input_vars << "]"; if (legacy_input_vars.empty() && mutable_attributes != nullptr && mutable_attributes->count(info.name) != 0) { @@ -507,7 +507,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, ir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); op_output_types.push_back(translated_var_type); - arg_to_idx[var->Name()] = cur_output_idx; + arg_to_idx[var->Name()] = {cur_output_idx, 0}; continue; } } @@ -535,7 +535,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, ir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); - arg_to_idx[var_name] = cur_output_idx; + arg_to_idx[var_name] = {cur_output_idx, 0}; op_output_types.push_back(translated_var_type); // if src type is Vector @@ -544,10 +544,12 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, << "[" << op_desc.Type() << "]" << info.name << " :" << info.type_name << " var: " << legacy_output_name; std::vector types; - for (const auto& var_name : legacy_output_vars) { + for (IdxInVector idx_in_vec = 0; idx_in_vec < legacy_output_vars.size(); + idx_in_vec++) { + const auto& var_name = legacy_output_vars[idx_in_vec]; if (var_name == kEmptyVarName) { types.emplace_back(nullptr); - arg_to_idx[var_name] = cur_output_idx; + arg_to_idx[var_name] = {cur_output_idx, idx_in_vec}; continue; } VarDesc* var = block->FindVarRecursive(var_name); @@ -557,7 +559,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, ir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); types.push_back(translated_var_type); - arg_to_idx[var_name] = cur_output_idx; + arg_to_idx[var_name] = {cur_output_idx, idx_in_vec}; } ir::Type vec_type = ir::VectorType::get(ctx, types); op_output_types.push_back(vec_type); @@ -613,45 +615,16 @@ void OpTranscriber::RecordOpResultMapping(ir::IrContext* ctx, const OpDesc& op_desc, ir::Operation* operation, const OpOutputMapping& arg_to_idx) { - for (const auto& n : op_desc.Outputs()) { - auto& name = n.first; + for (const auto& [arg_name, idx] : arg_to_idx) { + const auto& [idx_in_op, idx_in_vec] = idx; VLOG(10) << "[output recording]" - << "[" << op_desc.Type() << "]" << name; - const auto& args = n.second; - size_t idx_in_vector = 0; - for (const auto& arg_name : args) { - if (arg_name == kEmptyVarName) { - idx_in_vector++; - continue; - } - auto idx_iter = arg_to_idx.find(arg_name); - if (idx_iter == arg_to_idx.end()) { - VLOG(4) << "[output recording]" - << "[" << op_desc.Type() << "][skip]" << arg_name; - continue; - } - auto idx = idx_iter->second; - VLOG(10) << "[output recording]" - << "[" << op_desc.Type() << "]" << arg_name << " " << idx; - - ir::OpResult value = operation->result(idx); - bool generated_by_vector = value.type().isa(); - - // Specially process TensorArray, this because we cannot distinguish it - // with Vector by other conditions but we cannot support it - // like Vector - if (args.size() == 1) { - VarDesc* var = op_desc.Block()->FindVarRecursive(args[0]); - if (var->GetType() == - paddle::framework::proto::VarType::LOD_TENSOR_ARRAY) { - generated_by_vector = false; - } - } + << "[" << op_desc.Type() << "]" << arg_name << " " << idx_in_op + << " " << idx_in_vec; + ir::OpResult value = operation->result(idx_in_op); + bool generated_by_vector = value.type().isa(); - (*param_map)[arg_name] = VariableDefiningInfo( - value, generated_by_vector, generated_by_vector ? idx_in_vector : -1); - idx_in_vector++; - } + (*param_map)[arg_name] = VariableDefiningInfo( + value, generated_by_vector, generated_by_vector ? idx_in_vec : -1); } } @@ -1439,9 +1412,10 @@ struct ElementwiseGradTranscriber : public OpTranscriber { if (idx_iter == arg_to_idx.end()) { IR_THROW("op[%s] should have got its y_grad", op_desc.Type()); } - auto idx = idx_iter->second; + auto [idx_in_op, idx_in_vec] = idx_iter->second; VLOG(10) << "[output recording]" - << "[" << op_desc.Type() << "]" << y_grad_var_name << " " << idx; + << "[" << op_desc.Type() << "]" << y_grad_var_name << " " + << idx_in_op << " " << idx_in_vec; auto y_names = op_desc.Input("Y", true); auto y_name = y_names[0]; @@ -1465,7 +1439,7 @@ struct ElementwiseGradTranscriber : public OpTranscriber { y_type.dyn_cast(); std::vector y_shape = phi::vectorize(y_tensor_type.dims()); - ir::OpResult value = operation->result(idx); + ir::OpResult value = operation->result(idx_in_op); ir::Builder builder(ctx, operation->GetParent()); auto reshape_op = builder.Build(value, y_shape); (*param_map)[y_grad_var_name] = diff --git a/paddle/fluid/ir_adaptor/translator/program_translator.cc b/paddle/fluid/ir_adaptor/translator/program_translator.cc index 342d5f1aa0a..cc076dd30ba 100644 --- a/paddle/fluid/ir_adaptor/translator/program_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/program_translator.cc @@ -143,12 +143,12 @@ void ProgramTranslator::GetParameterForSingleBlock(const BlockDesc& block) { var_desc = block.FindVarRecursive(var_name); } - bool need_get_parameter_op = is_parameter || is_unseen_variable; + bool need_get_parameter_op = is_parameter && is_unseen_variable; if (need_get_parameter_op) { ir::Operation* op = InsertGetParamaterOp(ctx_, var_desc); program_->block()->push_back(op); param_map_[var_name] = VariableDefiningInfo(op->result(0)); - VLOG(10) << "[op translated][get parameter]" << op; + VLOG(10) << "[op translated][get parameter]" << var_name; program_->SetParameter(var_name, nullptr); parameter_visited_.insert(var_name); @@ -224,7 +224,7 @@ void ProgramTranslator::SetParameterFromSingleBlock(const BlockDesc& block) { insert_pos++; block->insert(insert_pos, op); - VLOG(10) << "[op translated][set parameter]" << op; + VLOG(10) << "[op translated][set parameter]" << var_name; program_->SetParameter(var_name, nullptr); parameter_visited_.insert(var_name); diff --git a/paddle/fluid/ir_adaptor/translator/utils.cc b/paddle/fluid/ir_adaptor/translator/utils.cc index 75bcebd16d7..592d248f0d1 100644 --- a/paddle/fluid/ir_adaptor/translator/utils.cc +++ b/paddle/fluid/ir_adaptor/translator/utils.cc @@ -18,6 +18,7 @@ #include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/builtin_type.h" +#include "paddle/ir/core/utils.h" namespace paddle { namespace translator { @@ -46,5 +47,15 @@ ir::Operation* InsertSliceOperationForTarget( return operation; } +std::ostream& operator<<(std::ostream& os, + const std::vector& vec_str) { + ir::PrintInterleave( + vec_str.begin(), + vec_str.end(), + [&os](std::string s) { os << s; }, + [&os]() { os << ", "; }); + return os; +} + } // namespace translator } // namespace paddle diff --git a/paddle/fluid/ir_adaptor/translator/utils.h b/paddle/fluid/ir_adaptor/translator/utils.h index a6e5a8fd209..a207e727ed8 100644 --- a/paddle/fluid/ir_adaptor/translator/utils.h +++ b/paddle/fluid/ir_adaptor/translator/utils.h @@ -31,5 +31,8 @@ ir::Operation* InsertSliceOperationForTarget( const VariableDefiningInfo& defining_info, const std::string& arg_name); +std::ostream& operator<<(std::ostream& os, + const std::vector& vec_str); + } // namespace translator } // namespace paddle diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index 4f856227bda..3fae1a65731 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os + import paddle from paddle.fluid import Variable, core from paddle.fluid.data_feeder import check_type @@ -96,7 +98,7 @@ def data(name, shape, dtype=None, lod_level=0): shape[i] = -1 if dtype: - return helper.create_global_variable( + out = helper.create_global_variable( name=name, shape=shape, dtype=dtype, @@ -107,7 +109,7 @@ def data(name, shape, dtype=None, lod_level=0): need_check_feed=True, ) else: - return helper.create_global_variable( + out = helper.create_global_variable( name=name, shape=shape, dtype=paddle.get_default_dtype(), @@ -118,6 +120,21 @@ def data(name, shape, dtype=None, lod_level=0): need_check_feed=True, ) + if os.environ.get("FLAGS_enable_new_ir_in_executor", None): + helper = LayerHelper('data', **locals()) + helper.append_op( + type='data', + inputs={}, + outputs={'out': out}, + attrs={ + 'index': 0, + 'dtype': 0, + 'place': 0, + 'name': name, + }, + ) + return out + class InputSpec: """ diff --git a/test/ir/new_ir/test_ir_vjp.py b/test/ir/new_ir/test_ir_vjp.py index 45da7162664..595715aa5f2 100644 --- a/test/ir/new_ir/test_ir_vjp.py +++ b/test/ir/new_ir/test_ir_vjp.py @@ -106,7 +106,7 @@ class TestMeanVjp(unittest.TestCase): .source() .get_defining_op() .name(), - "builtin.get_parameter", + "pd.data", ) self.assertEqual( grad_outs[0][0] diff --git a/test/prim/new_ir_prim/test_decomp_op.py b/test/prim/new_ir_prim/test_decomp_op.py index bb6ae6d69ef..ecf9c859079 100644 --- a/test/prim/new_ir_prim/test_decomp_op.py +++ b/test/prim/new_ir_prim/test_decomp_op.py @@ -47,7 +47,7 @@ class TestBuildOp(unittest.TestCase): self.assertEqual( op_name_list, [ - 'builtin.get_parameter', + 'pd.data', 'pd.matmul', 'pd.add', 'pd.full_int_array', -- GitLab