From a20051cd4d989b048db2056f94632a7d48ba4ff4 Mon Sep 17 00:00:00 2001 From: hong <43953930+phlrain@users.noreply.github.com> Date: Sun, 2 Jul 2023 10:08:50 +0800 Subject: [PATCH] Fix fetch op and null type bug (#55027) * fix_fetch_op_and_null_type_bug * fix compile bug * add test case --- paddle/fluid/ir/dialect/pd_op.yaml | 1 + .../ir/phi_kernel_adaptor/phi_kernel_util.cc | 11 +++++-- .../ir/phi_kernel_adaptor/phi_kernel_util.h | 27 +++++++++++++++-- .../ir/transforms/pd_op_to_kernel_pass.cc | 4 ++- .../ir_adaptor/translator/op_translator.cc | 2 ++ paddle/phi/kernels/cpu/fetch_kernel.cc | 5 ++++ .../pattern_rewrite/pattern_rewrite_test.cc | 2 +- test/ir/new_ir/test_standalone_new_ir.py | 30 +++++++++++++++++++ 8 files changed, 74 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/ir/dialect/pd_op.yaml b/paddle/fluid/ir/dialect/pd_op.yaml index 790d6f92348..c81e1cbcccb 100644 --- a/paddle/fluid/ir/dialect/pd_op.yaml +++ b/paddle/fluid/ir/dialect/pd_op.yaml @@ -18,6 +18,7 @@ data_transform: {} attrs: - {typename: str, name: name} + - {typename: int, name: col} outputs: - {typename: Tensor, name: out, optional: false, intermediate: false} no_need_buffer: null diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc index fcf74c2efab..0da1fdc1462 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc +++ b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc @@ -62,8 +62,9 @@ void BuildScope(ir::Block* block, for (size_t i = 0; i < input_num; ++i) { auto var = scope->Var("fetch"); auto fetch_list = var->GetMutable(); - // for now only support one fetch - fetch_list->resize(1); + int index = + (*it)->attributes().at("col").dyn_cast().data(); + fetch_list->resize(index + 1); } continue; } @@ -148,7 +149,11 @@ void BuildScope(ir::Block* block, } auto var = scope->Var(name); // Only support DenseTensor or Vector - if (ptr.type().isa()) { + + if (!ptr.type()) { + var->GetMutable(); + } else if (ptr.type() + .isa()) { var->GetMutable(); } else if (ptr.type().isa()) { auto tensor_array = diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h index dc3938d8dd9..bb12c7b9c89 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h +++ b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h @@ -146,6 +146,19 @@ void BuildPhiContext( ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::BoolAttribute") { ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); + } else if (attr_type_name == "ir::ArrayAttribute") { + auto array_list = attr_map[t].dyn_cast().data(); + if (array_list[0].isa()) { + std::vector vec_res; + for (size_t i = 0; i < array_list.size(); ++i) { + vec_res.push_back( + array_list[0].dyn_cast().data()); + } + ctx->EmplaceBackAttr(vec_res); + } else { + PADDLE_THROW(phi::errors::Unimplemented("attr type not support [%s] ", + attr_type_name)); + } } else if (attr_type_name == "paddle::dialect::PlaceAttribute") { ctx->EmplaceBackAttr( attr_map[t].dyn_cast().data()); @@ -166,14 +179,22 @@ void BuildPhiContext( // process fetch op auto fetch_var = scope->Var("fetch"); auto* fetch_list = fetch_var->GetMutable(); - auto* out_tensor = &(PADDLE_GET(phi::DenseTensor, fetch_list->at(0))); + int index = + op->attributes().at("col").dyn_cast().data(); + auto* out_tensor = &(PADDLE_GET(phi::DenseTensor, fetch_list->at(index))); ctx->EmplaceBackOutput(out_tensor); } else { for (size_t i = 0; i < op->num_results(); ++i) { ir::Value out_ptr = op->result(i); auto name = name_map.at(out_ptr); - ctx->EmplaceBackOutput(OutType(const_cast( - &(scope->Var(name)->Get())))); + if (out_ptr.type()) { + ctx->EmplaceBackOutput(OutType(const_cast( + &(scope->Var(name)->Get())))); + } else { + phi::DenseTensor* ptr = nullptr; + OutType out_ptr(ptr); + ctx->EmplaceBackOutput(out_ptr); + } if (output_map != nullptr) { // only deal with single input for now, [todo] need support multi input diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc index c60abfd012f..4353b03fb9c 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc @@ -218,7 +218,9 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { if ((*it)->num_results() > 0) { for (size_t i = 0; i < (*it)->num_results(); ++i) { auto result_type = (*it)->result(i).type(); - if (result_type.isa()) { + if (!result_type) { + op_output_types.push_back(result_type); + } else if (result_type.isa()) { auto allocated_dense_tensor_dtype = paddle::dialect::AllocatedDenseTensorType::get( ctx, diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index e1bf7ecfe2e..b12b067e72b 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -916,6 +916,8 @@ struct FetchOpTranscriber : public OpTranscriber { OpOutputTypeList op_output_types; ir::AttributeMap attribute_map = { {"name", ir::StrAttribute::get(ctx, op_desc.InputArgumentNames()[0])}, + {"col", + ir::Int32Attribute::get(ctx, op_desc.GetAttrIfExists("col"))}, }; op_output_types.push_back(op_inputs[0].type()); diff --git a/paddle/phi/kernels/cpu/fetch_kernel.cc b/paddle/phi/kernels/cpu/fetch_kernel.cc index 5880b7687a9..6d8a5aec285 100644 --- a/paddle/phi/kernels/cpu/fetch_kernel.cc +++ b/paddle/phi/kernels/cpu/fetch_kernel.cc @@ -34,6 +34,11 @@ PD_REGISTER_KERNEL(fetch, double, int, int64_t, + uint8_t, + int8_t, + int16_t, + phi::float16, + phi::bfloat16, phi::dtype::complex, phi::dtype::complex, bool) {} diff --git a/test/cpp/ir/pattern_rewrite/pattern_rewrite_test.cc b/test/cpp/ir/pattern_rewrite/pattern_rewrite_test.cc index 8a8a73b0934..c61836928a4 100644 --- a/test/cpp/ir/pattern_rewrite/pattern_rewrite_test.cc +++ b/test/cpp/ir/pattern_rewrite/pattern_rewrite_test.cc @@ -414,7 +414,7 @@ void BuildProgram(ir::Builder &builder) { // NOLINT auto transpose2_op = builder.Build( transpose1_op.out(), std::vector{0, 3, 1, 2}); - builder.Build(transpose2_op.out(), "out"); + builder.Build(transpose2_op.out(), "out", 0); } // TODO(wilber): Add a normal test. diff --git a/test/ir/new_ir/test_standalone_new_ir.py b/test/ir/new_ir/test_standalone_new_ir.py index f5aeb6ce075..ca924467d4e 100644 --- a/test/ir/new_ir/test_standalone_new_ir.py +++ b/test/ir/new_ir/test_standalone_new_ir.py @@ -89,5 +89,35 @@ class TestFeedOp(unittest.TestCase): np.testing.assert_array_equal(out[0], gold_res) +class TestAddGradOp(unittest.TestCase): + def test_with_new_ir(self): + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + + main_program = paddle.static.Program() + new_scope = paddle.static.Scope() + with paddle.static.scope_guard(new_scope): + with paddle.static.program_guard(main_program): + x = paddle.static.data("x", [2, 2], dtype="float32") + y = paddle.static.data("y", [2, 2], dtype="float32") + x.stop_gradient = False + + z = x * y + + paddle.static.gradients(z, x) + + np_a = np.random.rand(2, 2).astype("float32") + np_b = np.random.rand(2, 2).astype("float32") + out = exe.run( + main_program, + feed={"x": np_a, "y": np_b}, + fetch_list=[z.name], + ) + + gold_res = np_a * np_b + + np.testing.assert_array_equal(out[0], gold_res) + + if __name__ == "__main__": unittest.main() -- GitLab