diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 803d31e4aa26d1814288ded2808315dc1358c287..dd59141d2d3191e359f5582d2b53bea54e1e3d68 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -303,7 +303,7 @@ ir::OpInfo OpTranscriber::LoopkUpOpInfo(ir::IrContext* ctx, if (IsInplace(op_desc)) { target_op_name += "_"; } - VLOG(6) << "[op name normalizing: " << op_desc.Type() << " to " + VLOG(6) << "[op name normalizing]: " << op_desc.Type() << " to " << target_op_name; auto op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { @@ -963,7 +963,7 @@ struct SplitOpTranscriber : public OpTranscriber { const std::string& normalized_op_name, const OpInputInfoList& input_infos, ir::Program* program) override { - // input of pslit is [Tensor x, IntArray sections, Scalar(int) axis)] + // input of split is [Tensor x, IntArray sections, Scalar(int) axis)] VLOG(10) << "[op:split][input] start"; @@ -1108,6 +1108,55 @@ struct AddNOpTranscriber : public OpTranscriber { } }; +ir::OpResult TranslateNumClassesForOneHot(ir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + const std::string& normalized_op_name, + const OpInputInfo& input_info, + ir::Program* program) { + const std::string legacy_attr_name = "depth"; + const std::string legacy_tensor_name = "depth_tensor"; + std::vector legacy_vars; + if (op_desc.HasInput(legacy_tensor_name) && + op_desc.Input(legacy_tensor_name).size() > 0) { + legacy_vars = op_desc.Input(legacy_tensor_name); + IR_ENFORCE(legacy_vars.size() == 1, + "depth_tensor input of one hot MUST be a tensor"); + auto var_name = legacy_vars[0]; + IR_ENFORCE(legacy_vars.size() == 1, + "depth_tensor input of one hot MUST be a tensor"); + auto defining_info = param_map->find(legacy_vars[0]); + IR_ENFORCE(defining_info != param_map->end(), + "%s should be existed in one_hot_v2 as input depth_tensor.", + legacy_vars[0]); + return defining_info->second.value; + } + + auto& attribute_translator = AttributeTranslator::instance(); + if (!op_desc.HasAttr(legacy_attr_name)) { + IR_THROW("Op %s arg %s should not be zero size", + op_desc.Type(), + legacy_attr_name); + } + paddle::framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); + VLOG(10) << "[" << op_desc.Type() << "][attribute]" + << " name: " << legacy_attr_name << " " << legacy_attr.index(); + ir::Attribute new_attr = attribute_translator(legacy_attr); + + ir::Operation* defining_op = + InsertFullOperationForAttributeInput(ctx, program, new_attr); + return defining_op->result(0); +} + +struct OneHotTranscriber : public OpTranscriber { + InputHandleFn GetSpecialInputHandlers(std::string input_name) override { + if (input_name != "num_classes") { + return nullptr; + } + return TranslateNumClassesForOneHot; + }; +}; + OpTranslator::OpTranslator() { general_handler = OpTranscriber(); special_handlers["feed"] = FeedOpTranscriber(); @@ -1119,6 +1168,7 @@ OpTranslator::OpTranslator() { special_handlers["assign_value"] = AssignValueOpTranscriber(); special_handlers["increment"] = IncrementOpTranscriber(); special_handlers["rnn"] = RnnOpTranscriber(); + special_handlers["one_hot_v2"] = OneHotTranscriber(); special_handlers["add_n"] = AddNOpTranscriber(); } diff --git a/test/ir/new_ir/test_special_op_translator.py b/test/ir/new_ir/test_special_op_translator.py index 4cae78ee6a6321c8dd97a9d2fe4debdce55214e7..5cfc2f9d875ec56c706f1d4ab476cd145c9c8f84 100644 --- a/test/ir/new_ir/test_special_op_translator.py +++ b/test/ir/new_ir/test_special_op_translator.py @@ -116,5 +116,41 @@ class TestRnnOpTranscriber(unittest.TestCase): new_exe = core.StandaloneExecutor(place, plan, new_scope) +class TestOneHotOpTranscriber(unittest.TestCase): + def test_mutable_attribute(self): + place = core.Place() + place.set_place(paddle.CPUPlace()) + new_scope = paddle.static.Scope() + main_program = paddle.static.Program() + with paddle.static.scope_guard(new_scope): + with paddle.static.program_guard(main_program): + depth = paddle.assign(np.array([10], dtype=np.int32)) + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" + ) + one_hot_label = paddle.nn.functional.one_hot( + x=label, num_classes=depth + ) + + _ = paddle.fluid.core.translate_newirprogram(main_program.desc) + + def test_normal_attribute(self): + place = core.Place() + place.set_place(paddle.CPUPlace()) + new_scope = paddle.static.Scope() + main_program = paddle.static.Program() + with paddle.static.scope_guard(new_scope): + with paddle.static.program_guard(main_program): + depth = 10 + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" + ) + one_hot_label = paddle.nn.functional.one_hot( + x=label, num_classes=depth + ) + + _ = paddle.fluid.core.translate_newirprogram(main_program.desc) + + if __name__ == "__main__": unittest.main() diff --git a/test/legacy_test/test_nn_functional_hot_op.py b/test/legacy_test/test_nn_functional_hot_op.py index ec412356f6f53e609c44adecda2187cb776741aa..0a4fb0942f8c1347eddefcc48c990c8b60ef7e8f 100644 --- a/test/legacy_test/test_nn_functional_hot_op.py +++ b/test/legacy_test/test_nn_functional_hot_op.py @@ -20,7 +20,6 @@ from eager_op_test import OpTest import paddle from paddle import fluid from paddle.fluid import core -from paddle.fluid.framework import Program, program_guard from paddle.nn import functional @@ -118,53 +117,6 @@ class TestOneHotOp_default_dtype_attr(OpTest): self.check_output(check_dygraph=False) -class TestOneHotOp_exception(unittest.TestCase): - def setUp(self): - self.op_type = 'one_hot_v2' - self.depth = 10 - self.place = core.CPUPlace() - self.dimension = 12 - self.x = core.LoDTensor() - x_lod = [[4, 1, 3, 3]] - data = [np.random.randint(11, 20) for i in range(sum(x_lod[0]))] - data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1]) - self.x.set(data, self.place) - self.x.set_recursive_sequence_lengths(x_lod) - - def test_check_output(self): - program = Program() - with program_guard(program): - x = paddle.static.data( - name='x', - shape=[-1, self.dimension], - dtype='float32', - lod_level=1, - ) - x.desc.set_need_check_feed(False) - block = program.current_block() - one_hot_out = block.create_var( - name="one_hot_out", - type=core.VarDesc.VarType.LOD_TENSOR, - dtype='float32', - ) - block.append_op( - type='one_hot', - inputs={'X': x}, - attrs={'depth': self.depth}, - outputs={'Out': one_hot_out}, - ) - exe = fluid.Executor(self.place) - - def run(): - exe.run( - feed={'x': self.x}, - fetch_list=[one_hot_out], - return_numpy=False, - ) - - self.assertRaises(ValueError, run) - - class TestOneHotOpApi(unittest.TestCase): def test_api(self): num_classes = 10 diff --git a/test/legacy_test/test_one_hot_v2_op.py b/test/legacy_test/test_one_hot_v2_op.py index a49060e536de8a1f777efd72fb9d868fe8becb20..8bb28ed4e3f865234f7cff73de947c3309a27b7f 100644 --- a/test/legacy_test/test_one_hot_v2_op.py +++ b/test/legacy_test/test_one_hot_v2_op.py @@ -20,7 +20,6 @@ from eager_op_test import OpTest import paddle from paddle import fluid from paddle.fluid import core -from paddle.fluid.framework import Program, program_guard def one_hot_wrapper(x, depth_tensor, **keargs): @@ -128,53 +127,6 @@ class TestOneHotOp_default_dtype_attr(OpTest): self.check_output() -class TestOneHotOp_exception(unittest.TestCase): - def setUp(self): - self.op_type = 'one_hot_v2' - self.depth = 10 - self.place = core.CPUPlace() - self.dimension = 12 - self.x = core.LoDTensor() - x_lod = [[4, 1, 3, 3]] - data = [np.random.randint(11, 20) for i in range(sum(x_lod[0]))] - data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1]) - self.x.set(data, self.place) - self.x.set_recursive_sequence_lengths(x_lod) - - def test_check_output(self): - program = Program() - with program_guard(program): - x = paddle.static.data( - name='x', - shape=[-1, self.dimension], - dtype='float32', - lod_level=1, - ) - x.desc.set_need_check_feed(False) - block = program.current_block() - one_hot_out = block.create_var( - name="one_hot_out", - type=core.VarDesc.VarType.LOD_TENSOR, - dtype='float32', - ) - block.append_op( - type='one_hot', - inputs={'X': x}, - attrs={'depth': self.depth}, - outputs={'Out': one_hot_out}, - ) - exe = fluid.Executor(self.place) - - def run(): - exe.run( - feed={'x': self.x}, - fetch_list=[one_hot_out], - return_numpy=False, - ) - - self.assertRaises(ValueError, run) - - class TestOneHotOpApi(unittest.TestCase): def test_api(self): depth = 10