提交 db06a786 编写于 作者: J Jacques Pienaar 提交者: TensorFlower Gardener

Fix miscount of output name.

Previously the name used for an output's occurrence was not incremented resulting in it being reusable. Also forward a fix from the other UniqueName call (these should be extracted into a common class).

PiperOrigin-RevId: 257673679
上级 258fdb29
......@@ -451,10 +451,16 @@ std::string Translator::GetName(Operation* inst) {
}
std::string Translator::UniqueName(llvm::StringRef prefix) {
// Keep incrementing the counter until we find a unique name.
std::string name = prefix;
auto& val = name_to_count_[name];
if (val) name = (prefix + llvm::Twine(val)).str();
++val;
int64_t& prefix_count = name_to_count_[name];
int64_t val = prefix_count;
while (val != 0) {
name = (prefix + llvm::Twine(prefix_count)).str();
++prefix_count;
val = name_to_count_[name];
}
name_to_count_[name] = 1;
return name;
}
......@@ -796,8 +802,10 @@ void Translator::InitializeNamesFromAttribute(FuncOp fn) {
fn.emitWarning() << "invalid entry function specification";
return;
}
for (auto it : llvm::enumerate(fn.getArguments()))
for (auto it : llvm::enumerate(fn.getArguments())) {
op_to_name_[*it.value()->user_begin()] = input_names[it.index()];
++name_to_count_[input_names[it.index()].str()];
}
}
if (auto str = dict_attr.get("outputs").dyn_cast<mlir::StringAttr>()) {
......@@ -815,12 +823,14 @@ void Translator::InitializeNamesFromAttribute(FuncOp fn) {
// ensure the name that will be assigned to the buffer is the same, or
// insert an op so that we can have a buffer named such. This cannot
// currently happen due to pseudo_input nodes.
if (auto op = it.value()->getDefiningOp())
if (auto op = it.value()->getDefiningOp()) {
op_to_name_[op] = output_names[it.index()];
else
name_to_count_[output_names[it.index()].str()] = 1;
} else {
fn.emitWarning() << "output is not due to an op and '"
<< output_names[it.index()]
<< "' may not be a named output";
}
}
}
}
......
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
func @main(tensor<3x2xi32>) -> tensor<3x2xi32> {
func @main(tensor<3x2xi32>) -> tensor<3x2xi32>
attributes {tf.entry_function = {inputs = "input", outputs = "SameNameAsOutput"}} {
^bb0(%arg0: tensor<3x2xi32>):
// CHECK: {
// CHECK-NEXT: version: 3,
......@@ -14,7 +15,7 @@ func @main(tensor<3x2xi32>) -> tensor<3x2xi32> {
// CHECK-NEXT: shape: [ 3, 2 ],
// CHECK-NEXT: type: INT32,
// CHECK-NEXT: buffer: 1,
// CHECK-NEXT: name: "Input",
// CHECK-NEXT: name: "input",
// CHECK-NEXT: quantization: {
// CHECK-EMPTY:
// CHECK-NEXT: }
......@@ -38,7 +39,7 @@ func @main(tensor<3x2xi32>) -> tensor<3x2xi32> {
// CHECK-NEXT: shape: [ ],
// CHECK-NEXT: type: INT32,
// CHECK-NEXT: buffer: 4,
// CHECK-NEXT: name: "Const2",
// CHECK-NEXT: name: "SameNameAsOutput1",
// CHECK-NEXT: quantization: {
// CHECK-EMPTY:
// CHECK-NEXT: }
......@@ -46,7 +47,7 @@ func @main(tensor<3x2xi32>) -> tensor<3x2xi32> {
// CHECK-NEXT: shape: [ ],
// CHECK-NEXT: type: INT32,
// CHECK-NEXT: buffer: 5,
// CHECK-NEXT: name: "add",
// CHECK-NEXT: name: "SameNameAsOutput",
// CHECK-NEXT: quantization: {
// CHECK-EMPTY:
// CHECK-NEXT: }
......@@ -90,7 +91,7 @@ func @main(tensor<3x2xi32>) -> tensor<3x2xi32> {
%0 = "tfl.pseudo_input" (%arg0) : (tensor<3x2xi32>) -> tensor<3x2xi32> loc("Input")
%1 = "tfl.pseudo_const" () {value = dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32> loc("Const")
%2 = "tfl.sub" (%0, %1) {fused_activation_function = "RELU6"} : (tensor<3x2xi32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("sub")
%3 = "std.constant" () {value = dense<10> : tensor<i32>} : () -> tensor<i32> loc("Const2")
%3 = "std.constant" () {value = dense<10> : tensor<i32>} : () -> tensor<i32> loc("SameNameAsOutput")
%4 = "tfl.add" (%3, %2) {fused_activation_function = "NONE"} : (tensor<i32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("add")
return %4 : tensor<3x2xi32>
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册