未验证 提交 1dcc3bf7 编写于 作者: K kangguangli 提交者: GitHub

[NewIR] fix one_hot_v2 compat (#55317)

* fix

* fix

* fix

* fix

* fix

* fix coverage ci

* add test case
上级 72a910e4
......@@ -303,7 +303,7 @@ ir::OpInfo OpTranscriber::LoopkUpOpInfo(ir::IrContext* ctx,
if (IsInplace(op_desc)) {
target_op_name += "_";
}
VLOG(6) << "[op name normalizing: " << op_desc.Type() << " to "
VLOG(6) << "[op name normalizing]: " << op_desc.Type() << " to "
<< target_op_name;
auto op_info = ctx->GetRegisteredOpInfo(target_op_name);
if (!op_info) {
......@@ -963,7 +963,7 @@ struct SplitOpTranscriber : public OpTranscriber {
const std::string& normalized_op_name,
const OpInputInfoList& input_infos,
ir::Program* program) override {
// input of pslit is [Tensor x, IntArray sections, Scalar(int) axis)]
// input of split is [Tensor x, IntArray sections, Scalar(int) axis)]
VLOG(10) << "[op:split][input] start";
......@@ -1108,6 +1108,55 @@ struct AddNOpTranscriber : public OpTranscriber {
}
};
ir::OpResult TranslateNumClassesForOneHot(ir::IrContext* ctx,
TranslationContext* param_map,
const OpDesc& op_desc,
const std::string& normalized_op_name,
const OpInputInfo& input_info,
ir::Program* program) {
const std::string legacy_attr_name = "depth";
const std::string legacy_tensor_name = "depth_tensor";
std::vector<std::string> legacy_vars;
if (op_desc.HasInput(legacy_tensor_name) &&
op_desc.Input(legacy_tensor_name).size() > 0) {
legacy_vars = op_desc.Input(legacy_tensor_name);
IR_ENFORCE(legacy_vars.size() == 1,
"depth_tensor input of one hot MUST be a tensor");
auto var_name = legacy_vars[0];
IR_ENFORCE(legacy_vars.size() == 1,
"depth_tensor input of one hot MUST be a tensor");
auto defining_info = param_map->find(legacy_vars[0]);
IR_ENFORCE(defining_info != param_map->end(),
"%s should be existed in one_hot_v2 as input depth_tensor.",
legacy_vars[0]);
return defining_info->second.value;
}
auto& attribute_translator = AttributeTranslator::instance();
if (!op_desc.HasAttr(legacy_attr_name)) {
IR_THROW("Op %s arg %s should not be zero size",
op_desc.Type(),
legacy_attr_name);
}
paddle::framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name);
VLOG(10) << "[" << op_desc.Type() << "][attribute]"
<< " name: " << legacy_attr_name << " " << legacy_attr.index();
ir::Attribute new_attr = attribute_translator(legacy_attr);
ir::Operation* defining_op =
InsertFullOperationForAttributeInput(ctx, program, new_attr);
return defining_op->result(0);
}
struct OneHotTranscriber : public OpTranscriber {
InputHandleFn GetSpecialInputHandlers(std::string input_name) override {
if (input_name != "num_classes") {
return nullptr;
}
return TranslateNumClassesForOneHot;
};
};
OpTranslator::OpTranslator() {
general_handler = OpTranscriber();
special_handlers["feed"] = FeedOpTranscriber();
......@@ -1119,6 +1168,7 @@ OpTranslator::OpTranslator() {
special_handlers["assign_value"] = AssignValueOpTranscriber();
special_handlers["increment"] = IncrementOpTranscriber();
special_handlers["rnn"] = RnnOpTranscriber();
special_handlers["one_hot_v2"] = OneHotTranscriber();
special_handlers["add_n"] = AddNOpTranscriber();
}
......
......@@ -116,5 +116,41 @@ class TestRnnOpTranscriber(unittest.TestCase):
new_exe = core.StandaloneExecutor(place, plan, new_scope)
class TestOneHotOpTranscriber(unittest.TestCase):
def test_mutable_attribute(self):
place = core.Place()
place.set_place(paddle.CPUPlace())
new_scope = paddle.static.Scope()
main_program = paddle.static.Program()
with paddle.static.scope_guard(new_scope):
with paddle.static.program_guard(main_program):
depth = paddle.assign(np.array([10], dtype=np.int32))
label = paddle.static.data(
name="label", shape=[-1, 1], dtype="int64"
)
one_hot_label = paddle.nn.functional.one_hot(
x=label, num_classes=depth
)
_ = paddle.fluid.core.translate_newirprogram(main_program.desc)
def test_normal_attribute(self):
place = core.Place()
place.set_place(paddle.CPUPlace())
new_scope = paddle.static.Scope()
main_program = paddle.static.Program()
with paddle.static.scope_guard(new_scope):
with paddle.static.program_guard(main_program):
depth = 10
label = paddle.static.data(
name="label", shape=[-1, 1], dtype="int64"
)
one_hot_label = paddle.nn.functional.one_hot(
x=label, num_classes=depth
)
_ = paddle.fluid.core.translate_newirprogram(main_program.desc)
if __name__ == "__main__":
unittest.main()
......@@ -20,7 +20,6 @@ from eager_op_test import OpTest
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.framework import Program, program_guard
from paddle.nn import functional
......@@ -118,53 +117,6 @@ class TestOneHotOp_default_dtype_attr(OpTest):
self.check_output(check_dygraph=False)
class TestOneHotOp_exception(unittest.TestCase):
def setUp(self):
self.op_type = 'one_hot_v2'
self.depth = 10
self.place = core.CPUPlace()
self.dimension = 12
self.x = core.LoDTensor()
x_lod = [[4, 1, 3, 3]]
data = [np.random.randint(11, 20) for i in range(sum(x_lod[0]))]
data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1])
self.x.set(data, self.place)
self.x.set_recursive_sequence_lengths(x_lod)
def test_check_output(self):
program = Program()
with program_guard(program):
x = paddle.static.data(
name='x',
shape=[-1, self.dimension],
dtype='float32',
lod_level=1,
)
x.desc.set_need_check_feed(False)
block = program.current_block()
one_hot_out = block.create_var(
name="one_hot_out",
type=core.VarDesc.VarType.LOD_TENSOR,
dtype='float32',
)
block.append_op(
type='one_hot',
inputs={'X': x},
attrs={'depth': self.depth},
outputs={'Out': one_hot_out},
)
exe = fluid.Executor(self.place)
def run():
exe.run(
feed={'x': self.x},
fetch_list=[one_hot_out],
return_numpy=False,
)
self.assertRaises(ValueError, run)
class TestOneHotOpApi(unittest.TestCase):
def test_api(self):
num_classes = 10
......
......@@ -20,7 +20,6 @@ from eager_op_test import OpTest
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.framework import Program, program_guard
def one_hot_wrapper(x, depth_tensor, **keargs):
......@@ -128,53 +127,6 @@ class TestOneHotOp_default_dtype_attr(OpTest):
self.check_output()
class TestOneHotOp_exception(unittest.TestCase):
def setUp(self):
self.op_type = 'one_hot_v2'
self.depth = 10
self.place = core.CPUPlace()
self.dimension = 12
self.x = core.LoDTensor()
x_lod = [[4, 1, 3, 3]]
data = [np.random.randint(11, 20) for i in range(sum(x_lod[0]))]
data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1])
self.x.set(data, self.place)
self.x.set_recursive_sequence_lengths(x_lod)
def test_check_output(self):
program = Program()
with program_guard(program):
x = paddle.static.data(
name='x',
shape=[-1, self.dimension],
dtype='float32',
lod_level=1,
)
x.desc.set_need_check_feed(False)
block = program.current_block()
one_hot_out = block.create_var(
name="one_hot_out",
type=core.VarDesc.VarType.LOD_TENSOR,
dtype='float32',
)
block.append_op(
type='one_hot',
inputs={'X': x},
attrs={'depth': self.depth},
outputs={'Out': one_hot_out},
)
exe = fluid.Executor(self.place)
def run():
exe.run(
feed={'x': self.x},
fetch_list=[one_hot_out],
return_numpy=False,
)
self.assertRaises(ValueError, run)
class TestOneHotOpApi(unittest.TestCase):
def test_api(self):
depth = 10
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册