提交 4e809823 编写于 作者: F Feng Liu 提交者: TensorFlower Gardener

Reorder the element-wise operations and move operations

This is to run the element-wise operation first and then move the tensor
elments. This is particular useful when the element-wise operations can be
fused to the preceding ops.

PiperOrigin-RevId: 306510101
Change-Id: Ic055f0f70c3ca10325b54092700fbaa26c4b3c9e
上级 c27ed66f
......@@ -2165,6 +2165,17 @@ def TFL_ReluOp: TFL_Op<"relu", [NoSideEffect,
let arguments = (ins TFL_TensorOf<[F32, QUI8, I8]>:$x);
let results = (outs TFL_TensorOf<[F32, QUI8, I8]>:$y);
// This builder doesn't work with quantized type, so it can only be used by
// non-quantization tablegen patterns. Currently, it is used by the
// elementwise-move reordering pattern in the optimize_patterns.td
let builders = [OpBuilder<
"Builder *, OperationState &state, Value input",
[{
state.addOperands({input});
state.addTypes(input.getType());
}]>
];
}
def TFL_Relu6Op: TFL_Op<"relu6", [NoSideEffect,
......@@ -2181,6 +2192,17 @@ def TFL_Relu6Op: TFL_Op<"relu6", [NoSideEffect,
let arguments = (ins TFL_TensorOf<[F32, QUI8, I8]>:$x);
let results = (outs TFL_TensorOf<[F32, QUI8, I8]>:$y);
// This builder doesn't work with quantized type, so it can only be used by
// non-quantization tablegen patterns. Currently, it is used by the
// elementwise-move reordering pattern in the optimize_patterns.td
let builders = [OpBuilder<
"Builder *, OperationState &state, Value input",
[{
state.addOperands({input});
state.addTypes(input.getType());
}]>
];
}
def TFL_Relu1Op: TFL_Op<"relu_n1_to_1", [NoSideEffect,
......@@ -2196,6 +2218,17 @@ def TFL_Relu1Op: TFL_Op<"relu_n1_to_1", [NoSideEffect,
let arguments = (ins TFL_TensorOf<[F32, QUI8, I8]>:$x);
let results = (outs TFL_TensorOf<[F32, QUI8, I8]>:$y);
// This builder doesn't work with quantized type, so it can only be used by
// non-quantization tablegen patterns. Currently, it is used by the
// elementwise-move reordering pattern in the optimize_patterns.td
let builders = [OpBuilder<
"Builder *, OperationState &state, Value input",
[{
state.addOperands({input});
state.addTypes(input.getType());
}]>
];
}
def TFL_ReshapeOp: TFL_Op<"reshape", [
......@@ -2548,6 +2581,17 @@ def TFL_TanhOp: TFL_Op<"tanh", [
let arguments = (ins TFL_TensorOf<[F32, I16, I8, QI8, QUI8, QI16, QUI16, TFL_Uint8]>:$x);
let results = (outs TFL_TensorOf<[F32, I16, I8, QI8, QUI8, QI16, QUI16, TFL_Uint8]>:$y);
// This builder doesn't work with quantized type, so it can only be used by
// non-quantization tablegen patterns. Currently, it is used by the
// elementwise-move reordering pattern in the optimize_patterns.td
let builders = [OpBuilder<
"Builder *, OperationState &state, Value input",
[{
state.addOperands({input});
state.addTypes(input.getType());
}]>
];
}
def TFL_TileOp: TFL_Op<"tile", [NoSideEffect, SameOperandsAndResultsScale,
......
......@@ -439,6 +439,31 @@ func @NotReorderReshapeAddIfNotTailingDim(%arg0: tensor<40x40x1xf32>) -> tensor<
// CHECK: return %[[rs2]]
}
// CHECK-LABEL: @ReorderElementwiseValueOpAndMoveOp
func @ReorderElementwiseValueOpAndMoveOp(%arg0: tensor<40x40x1xf32>) -> tensor<40x40xf32> {
%shape = constant dense<[40, 40]> : tensor<2xi32>
%1 = "tfl.reshape"(%arg0, %shape) : (tensor<40x40x1xf32>, tensor<2xi32>) -> tensor<40x40xf32>
%2 = "tfl.relu"(%1) : (tensor<40x40xf32>) -> tensor<40x40xf32>
return %2 : tensor<40x40xf32>
// CHECK: %[[rs1:.*]] = "tfl.relu"(%arg0
// CHECK: %[[rs2:.*]] = "tfl.reshape"(%[[rs1]]
// CHECK: return %[[rs2]]
}
// CHECK-LABEL: @NotReorderElementwiseValueOpAndMoveOp
func @NotReorderElementwiseValueOpAndMoveOp(%arg0: tensor<40x40x1xf32>) -> (tensor<40x40xf32>, tensor<40x40xf32>) {
%shape = constant dense<[40, 40]> : tensor<2xi32>
%1 = "tfl.reshape"(%arg0, %shape) : (tensor<40x40x1xf32>, tensor<2xi32>) -> tensor<40x40xf32>
%2 = "tfl.relu"(%1) : (tensor<40x40xf32>) -> tensor<40x40xf32>
return %1, %2 : tensor<40x40xf32>, tensor<40x40xf32>
// CHECK: %[[rs1:.*]] = "tfl.reshape"(%arg0
// CHECK: %[[rs2:.*]] = "tfl.relu"(%[[rs1]]
// CHECK: return %[[rs1]], %[[rs2]]
}
// CHECK-LABEL: @FuseFullyConnectedRelu
func @FuseFullyConnectedRelu(%arg0: tensor<1x256xf32>, %arg1: tensor<128x256xf32>, %arg2: tensor<128xf32>) -> tensor<1x128xf32> {
%0 = "tfl.fully_connected" (%arg0, %arg1, %arg2) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<1x256xf32>, tensor<128x256xf32>, tensor<128xf32>) -> tensor<1x128xf32>
......
......@@ -378,6 +378,19 @@ foreach BinaryOp = [TFL_FloorDivOp, TFL_FloorModOp, TFL_MinimumOp,
(IsTailOfShape $rhs, $input)]>;
}
// Reorder the element-wise value operations and the element move operations,
// such that the value operation happens before move operation.
foreach ValueOp = [TFL_CeilOp, TFL_ExpOp, TFL_FloorOp, TFL_NegOp,
TFL_ReluOp, TFL_Relu1Op, TFL_Relu6Op, TFL_RoundOp,
TFL_TanhOp, TFL_SqrtOp, TFL_SquareOp] in {
foreach MoveOp = [TFL_DepthToSpaceOp, TFL_ExpandDimsOp, TFL_SqueezeOp,
TFL_ReshapeOp, TFL_TransposeOp] in {
def : Pat<(ValueOp:$value (MoveOp:$move $input, $move_def)),
(MoveOp (ValueOp $input), $move_def),
[(HasOneUse $move)]>;
}
}
// Returns shape of a ranked tensor.
// if called without a ranked tensor it will fail.
def GetShape: NativeCodeCall<"GetShape($0)">;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册