diff --git a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc index f4bb35cb5a9af54d290d64be04fcd25ed025644d..a8dbbedd03859a07cf1573061e281bcc6e78d5fc 100644 --- a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc +++ b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc @@ -954,7 +954,9 @@ void BuildOpFuncList( auto op_name = attr_map.at("op_name").dyn_cast<::ir::StrAttribute>().data(); op_func_node.phi_op_name_ = op_name; - if (op_name == "builtin.combine" || op_name == "pd.feed") { + if (op_name == "builtin.combine" || op_name == "pd.feed" || + op_name == "builtin.set_parameter" || + op_name == "builtin.get_parameter") { VLOG(6) << "skip process " << op_name; continue; } diff --git a/paddle/fluid/framework/new_executor/standalone_executor.cc b/paddle/fluid/framework/new_executor/standalone_executor.cc index aed9150b547ab9414a264213d70f224cf446e72a..24931a15e121e8ac8755822f89a8466f89d7bed6 100644 --- a/paddle/fluid/framework/new_executor/standalone_executor.cc +++ b/paddle/fluid/framework/new_executor/standalone_executor.cc @@ -61,7 +61,7 @@ StandaloneExecutor::StandaloneExecutor(const platform::Place& place, execution_config.create_local_scope = false; execution_config.skip_gc_vars = job->SkipGcVars(); - if (FLAGS_enable_new_ir_in_executor) { + if (FLAGS_enable_new_ir_in_executor && platform::is_cpu_place(place)) { VLOG(6) << "begin to translate" << std::endl; auto base_program = paddle::TranslateLegacyProgramToProgram(*program); auto kernel_program = diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc index 0da1fdc1462952faa9470116ad1cfab62e4167fa..98e4487da46656a449b5206c852966474584ec61 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc +++ b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc @@ -48,8 +48,7 @@ void BuildScope(ir::Block* block, std::unordered_map* name_map) { std::unordered_map map_test; - // int count = name_map->size(); - int count = 0; + int count = name_map->size(); for (auto it = block->begin(); it != block->end(); ++it) { size_t input_num = (*it)->num_operands(); auto attr_map = (*it)->attributes(); @@ -69,6 +68,35 @@ void BuildScope(ir::Block* block, continue; } + if (op_name == "builtin.set_parameter") { + auto param_name = (*it) + ->attributes() + .at("parameter_name") + .dyn_cast() + .data(); + + auto in_ptr = (*it)->operand(0); + // change opreand name to param_name + + auto orig_name = name_map->at(in_ptr); + (*name_map)[in_ptr] = param_name; + scope->Rename(orig_name, param_name); + continue; + } + + if (op_name == "builtin.get_parameter") { + auto param_name = (*it) + ->attributes() + .at("parameter_name") + .dyn_cast() + .data(); + + auto out_ptr = (*it)->result(0); + + name_map->emplace(out_ptr, param_name); + continue; + } + if (op_name == "pd.feed") { auto ptr = (*it)->result(0); std::string name = "inner_var_" + std::to_string(count++); @@ -123,14 +151,14 @@ void BuildScope(ir::Block* block, if (input_num > 0) { for (size_t i = 0; i < input_num; ++i) { auto ptr = (*it)->operand(i); - std::string name; - if (name_map->find(ptr) != name_map->end()) { - name = name_map->at(ptr); - } else { - PADDLE_THROW(phi::errors::PreconditionNotMet( - "input should in name map, [%d] 'th input of [%s] op", - i, - op_name)); + if (ptr) { + PADDLE_ENFORCE_NE( + name_map->find(ptr), + name_map->end(), + phi::errors::PreconditionNotMet( + "input should in name map, [%d] 'th input of [%s] op", + i, + op_name)); } } } @@ -149,7 +177,6 @@ void BuildScope(ir::Block* block, } auto var = scope->Var(name); // Only support DenseTensor or Vector - if (!ptr.type()) { var->GetMutable(); } else if (ptr.type() diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h index e2545eff79b4e27d77bdec502148b453edd8d98a..98fff8ede3e9d0e4f5eaf33314f771d0afc370e3 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h +++ b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h @@ -71,6 +71,12 @@ void BuildPhiContext( phi::errors::NotFound("param [%s] MUST in name2id map", t)); auto index = op_yaml_info.Name2Id().at(t); ir::Value ptr = op->operand(index); + if (!ptr) { + phi::DenseTensor* ptr = nullptr; + OutType in_ptr(ptr); + ctx->EmplaceBackInput(in_ptr); + continue; + } auto in_var_name = name_map.at(ptr); VLOG(6) << "ctx->EmplaceBackInput: " << t << "\t" << in_var_name; @@ -142,10 +148,14 @@ void BuildPhiContext( attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::Int32Attribute") { ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); + } else if (attr_type_name == "ir::Int64Attribute") { + ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::FloatAttribute") { ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::BoolAttribute") { ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); + } else if (attr_type_name == "ir::StrAttribute") { + ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::ArrayAttribute") { auto array_list = attr_map[t].dyn_cast().data(); std::vector vec_res; @@ -160,6 +170,44 @@ void BuildPhiContext( array_list[i].dyn_cast().data()); } } + } else if (attr_type_name == "ir::ArrayAttribute") { + auto array_list = attr_map[t].dyn_cast().data(); + std::vector vec_res; + if (array_list.size() > 0) { + if (array_list[0].isa()) { + for (size_t i = 0; i < array_list.size(); ++i) { + vec_res.push_back( + array_list[i].dyn_cast().data()); + } + + } else { + PADDLE_THROW(phi::errors::Unimplemented("attr type not support [%s] ", + attr_type_name)); + } + } + ctx->EmplaceBackAttr(vec_res); + } else if (attr_type_name == "ir::ArrayAttribute") { + std::cerr << "int64 array" << std::endl; + auto array_list = attr_map[t].dyn_cast().data(); + std::cerr << "len " << array_list.size() << std::endl; + + std::vector vec_res; + if (array_list.size() > 0) { + PADDLE_ENFORCE_EQ( + array_list[0].isa(), + true, + phi::errors::PreconditionNotMet( + "Element in array list MUST be ir::Int64Attribute ")); + std::cerr << "int 64" << std::endl; + + for (size_t i = 0; i < array_list.size(); ++i) { + std::cerr << "i " << i << "\t" + << array_list[i].dyn_cast().data() + << std::endl; + vec_res.push_back( + array_list[i].dyn_cast().data()); + } + } ctx->EmplaceBackAttr(vec_res); } else if (attr_type_name == "paddle::dialect::PlaceAttribute") { ctx->EmplaceBackAttr( diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc index 4353b03fb9ca8cd39fa62cb7fd87ac0eb6ae5e01..ef3d499389e9970501057163f95368711bbd5f66 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc @@ -132,6 +132,9 @@ phi::KernelKey GetKernelKey( } auto input_tmp = op->operand(i); + if (!input_tmp) { + continue; + } auto new_input_tmp = map_value_pair.at(input_tmp); auto input_type = new_input_tmp.type(); @@ -264,6 +267,15 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { if ((*it)->num_operands() > 0) { for (size_t i = 0; i < (*it)->num_operands(); ++i) { auto cur_in = (*it)->operand(i); + if (!cur_in) { + vec_inputs.push_back(ir::OpResult()); + continue; + } + PADDLE_ENFORCE_EQ( + map_value_pair.count(cur_in), + true, + phi::errors::PreconditionNotMet( + "[%d]'s input of [%s] op MUST in map pair", i, (*it)->name())); auto new_in = map_value_pair.at(cur_in); auto new_in_type = new_in.type(); diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index c3475edbfa438a95d705466fea246d3ed7a5f397..962ab37cbbae6e3c3f37ec5432e01c57244c6ee4 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -132,10 +132,6 @@ { axis : dim, keepdim : keep_dim} outputs: out : Out - int_array: - axis : - data_type : int - support_tensor : true manual_signature : [all] extra : attrs : [bool use_mkldnn = false] @@ -163,10 +159,6 @@ { axis : dim, keepdim : keep_dim } extra : attrs : [bool use_mkldnn = false] - int_array: - axis : - data_type : int - support_tensor : true get_expected_kernel_type : amax_grad : GetReduceGradExpectedKernelType manual_signature : [amax] @@ -181,10 +173,6 @@ { axis : dim, keepdim : keep_dim } extra : attrs : [bool use_mkldnn = false] - int_array: - axis : - data_type : int - support_tensor : true get_expected_kernel_type : amin_grad : GetReduceGradExpectedKernelType manual_signature : [amin] @@ -207,10 +195,6 @@ { axis : dim, keepdim : keep_dim } extra : attrs : [bool use_mkldnn = false] - int_array: - axis : - data_type : int - support_tensor : true get_expected_kernel_type : any : GetReduceOpUseInputPlaceExpectedKernelType manual_signature : [any] diff --git a/test/legacy_test/CMakeLists.txt b/test/legacy_test/CMakeLists.txt index 2af0e79d9690e7f4a8e8d954f8dc73c0b83bceb7..064108b286f844071978dceea9ee9efbebe6344b 100644 --- a/test/legacy_test/CMakeLists.txt +++ b/test/legacy_test/CMakeLists.txt @@ -1314,6 +1314,16 @@ foreach(STATIC_BUILD_TEST ${STATIC_BUILD_TESTS}) FLAGS_new_executor_static_build=true) endforeach() +set(NEW_IR_COVERAGE_TESTS test_label_smooth_op test_instance_norm_op_v2) + +foreach(NEW_IR_COVERAGE_TEST ${NEW_IR_COVERAGE_TESTS}) + py_test_modules( + ${NEW_IR_COVERAGE_TEST}_new_ir MODULES ${NEW_IR_COVERAGE_TEST} ENVS + FLAGS_enable_new_ir_in_executor=true) +endforeach() + +set_tests_properties(test_instance_norm_op_v2_new_ir PROPERTIES TIMEOUT 120) + set_tests_properties(test_decoupled_py_reader_static_build PROPERTIES TIMEOUT 120) set_tests_properties(test_fuse_bn_act_pass_static_build PROPERTIES TIMEOUT 120) diff --git a/test/legacy_test/test_instance_norm_op_v2.py b/test/legacy_test/test_instance_norm_op_v2.py index ab687aeb034f5795ce58b4008fb8d898b501c5e3..e3b04c0c7fba8028ff1be9af0a828b02136ecc6e 100644 --- a/test/legacy_test/test_instance_norm_op_v2.py +++ b/test/legacy_test/test_instance_norm_op_v2.py @@ -380,4 +380,5 @@ class TestPrimForwardAndBackward(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/test/legacy_test/test_numel_op.py b/test/legacy_test/test_numel_op.py index 5c8c477877c3261829732e495a4f3679b18d2316..9d87d242a87f8ea7af1bb982c02fc5492c72fc6b 100644 --- a/test/legacy_test/test_numel_op.py +++ b/test/legacy_test/test_numel_op.py @@ -153,4 +153,5 @@ class TestNumelAPI(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/test/legacy_test/test_randperm_op.py b/test/legacy_test/test_randperm_op.py index 5df2873b93ec4cfba354ca15273999adf7e262ef..14bf49b387b63fe73b7facc3900f38239c557a8b 100644 --- a/test/legacy_test/test_randperm_op.py +++ b/test/legacy_test/test_randperm_op.py @@ -378,4 +378,5 @@ class TestRandomValue(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/test/legacy_test/test_yolov3_loss_op.py b/test/legacy_test/test_yolov3_loss_op.py index 79994d87e4266c1eb333bd0e091c1e18a7b36024..afe1dbc1c0a1fd12b86ff6611c962e95213ab606 100644 --- a/test/legacy_test/test_yolov3_loss_op.py +++ b/test/legacy_test/test_yolov3_loss_op.py @@ -475,4 +475,5 @@ class TestYolov3LossStatic(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main()