提交 e22197f3 编写于 作者: H Haoliang Zhang 提交者: TensorFlower Gardener

Add GatherNd operator into tflite mlir converter.

PiperOrigin-RevId: 258463148
上级 c7504db1
......@@ -585,6 +585,24 @@ def TFL_GatherOp : TFL_Op<"gather", [
let hasOptions = 1;
}
def TFL_GatherNdOp : TFL_Op<"gather_nd", [NoSideEffect]> {
let summary = "Gather_nd operator";
let description = [{
Gather slices from `params` into a Tensor with shape specified by `indices`.
}];
// TODO: missing Uint8.
let arguments = (ins
TensorOf<[F32, I8, I64, I32]>:$params,
TFL_I32OrI64Tensor:$indices
);
let results = (outs
TensorOf<[F32, I8, I64, I32]>:$output
);
}
// Same type check of lhs and rhs is handled by the Broadcastable trait.
def TFL_LessEqualOp : TFL_Op<"less_equal", [Broadcastable, NoSideEffect]> {
let summary = "Less_equal operator";
......
......@@ -370,6 +370,22 @@ func @gatherHigherRankIndices(%arg0 : tensor<2x3x6xf32>, %arg1 : tensor<4x5xi32>
// CHECK: %0 = "tfl.gather"(%arg0, %arg1) {axis = 0 : i32} : (tensor<2x3x6xf32>, tensor<4x5xi32>) -> tensor<4x5x3x6xf32>
}
func @gatherNdVectorIndices(%arg0 : tensor<3x2x2xf32>, %arg1 : tensor<2xi32>) -> tensor<2xf32> {
%0 = "tf.GatherNd"(%arg0, %arg1) : (tensor<3x2x2xf32>, tensor<2xi32>) -> tensor<2xf32>
return %0 : tensor<2xf32>
// CHECK-LABEL:gatherNdVectorIndices
// CHECK: %0 = "tfl.gather_nd"(%arg0, %arg1) : (tensor<3x2x2xf32>, tensor<2xi32>) -> tensor<2xf32>
}
func @gatherNdHigherRankIndices(%arg0 : tensor<4x3x2xf32>, %arg1 : tensor<2x2xi32>) -> tensor<2x2xf32> {
%0 = "tf.GatherNd"(%arg0, %arg1) : (tensor<4x3x2xf32>, tensor<2x2xi32>) -> tensor<2x2xf32>
return %0 : tensor<2x2xf32>
// CHECK-LABEL:gatherNdHigherRankIndices
// CHECK: %0 = "tfl.gather_nd"(%arg0, %arg1) : (tensor<4x3x2xf32>, tensor<2x2xi32>) -> tensor<2x2xf32>
}
func @gatherV2VectorIndices(%arg0 : tensor<1x2x20xf32>, %arg1 : tensor<3x5xi32>) -> tensor<1x3x5x20xf32> {
%0 = constant dense<[1]> : tensor<1xi32>
%1 = "tf.GatherV2"(%arg0, %arg1, %0) : (tensor<1x2x20xf32>, tensor<3x5xi32>, tensor<1xi32>) -> tensor<1x3x5x20xf32>
......
......@@ -167,6 +167,9 @@ def : Pat<(TF_GreaterEqualOp $l, $r), (TFL_GreaterEqualOp $l, $r)>;
def : Pat<(TF_GatherOp $params, $indices, $ignored_validate_indices),
(TFL_GatherOp $params, $indices, ConstantAttr<I32Attr, "0">)>;
def : Pat<(TF_GatherNdOp $params, $indices),
(TFL_GatherNdOp $params, $indices)>;
def : Pat<(TF_GatherV2Op $params, $indices,
(ConstantOp ElementsAttr:$axis),
ConstantAttr<I64Attr, "0">:$batch_dims),
......
......@@ -939,6 +939,131 @@ raising an error.
TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
}
def TF_GatherNdOp : TF_Op<"GatherNd", [NoSideEffect]> {
let summary = [{
Gather slices from `params` into a Tensor with shape specified by `indices`.
}];
let description = [{
`indices` is an K-dimensional integer tensor, best thought of as a
(K-1)-dimensional tensor of indices into `params`, where each element defines a
slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in `tf.gather` `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
first `N` dimensions of `params`, where `N = indices.shape[-1]`.
The last dimension of `indices` can be at most the rank of
`params`:
indices.shape[-1] <= params.rank
The last dimension of `indices` corresponds to elements
(if `indices.shape[-1] == params.rank`) or slices
(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
of `params`. The output tensor has shape
indices.shape[:-1] + params.shape[indices.shape[-1]:]
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the
corresponding output value.
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]]
params = [['a', 'b'], ['c', 'd']]
output = ['a', 'd']
```
Slice indexing into a matrix:
```python
indices = [[1], [0]]
params = [['a', 'b'], ['c', 'd']]
output = [['c', 'd'], ['a', 'b']]
```
Indexing into a 3-tensor:
```python
indices = [[1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['a1', 'b1'], ['c1', 'd1']]]
indices = [[0, 1], [1, 0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
indices = [[0, 0, 1], [1, 0, 1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = ['b0', 'b1']
```
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]]
params = [['a', 'b'], ['c', 'd']]
output = [['a'], ['b']]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]]
params = [['a', 'b'], ['c', 'd']]
output = [[['c', 'd']], [['a', 'b']]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[[['a1', 'b1'], ['c1', 'd1']]],
[[['a0', 'b0'], ['c0', 'd0']]]]
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0'], ['a1', 'b1']],
[['a0', 'b0'], ['c1', 'd1']]]
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['b0', 'b1'], ['d0', 'c1']]
```
See also `tf.gather` and `tf.batch_gather`.
}];
let arguments = (ins
TF_Tensor:$params,
TF_I32OrI64Tensor:$indices
);
let results = (outs
TF_Tensor:$output
);
TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
}
def TF_GatherV2Op : TF_Op<"GatherV2", [NoSideEffect]> {
let summary = [{
Gather slices from `params` axis `axis` according to `indices`.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册