未验证 提交 f7cc20f9 编写于 作者: J Jason 提交者: GitHub

Merge pull request #432 from SunAhong1993/paddle-2.0

add tf optimizer
......@@ -128,19 +128,25 @@ def tf2paddle(model_path,
else:
from x2paddle.op_mapper.static.tf2paddle.tf_op_mapper import TFOpMapper
from x2paddle.optimizer.tensorflow.bias import BiasOpt
from x2paddle.optimizer.tensorflow.transpose import TransposeOpt
from x2paddle.optimizer.tensorflow.batch_norm import BatchNormOpt
print("Now translating model from tensorflow to paddle.")
model = TFDecoder(model_path, define_input_shape=define_input_shape)
mapper = TFOpMapper(model)
mapper.paddle_graph.build()
if paddle_type == "dygraph":
from x2paddle.optimizer.optimizer import GraphOptimizer
graph_opt = GraphOptimizer(source_frame="tf", paddle_type=paddle_type)
graph_opt.optimize(mapper.paddle_graph)
else:
from x2paddle.optimizer.tensorflow.bias import BiasOpt
from x2paddle.optimizer.tensorflow.transpose import TransposeOpt
from x2paddle.optimizer.tensorflow.batch_norm import BatchNormOpt
bias_opt = BiasOpt()
transpose_opt = TransposeOpt()
batch_norm_opt = BatchNormOpt()
bias_opt.run(program)
batch_norm_opt.run(program)
transpose_opt.run(program)
bias_opt.run(mapper.paddle_graph)
batch_norm_opt.run(mapper.paddle_graph)
transpose_opt.run(mapper.paddle_graph)
mapper.paddle_graph.gen_model(save_dir)
......
......@@ -76,7 +76,7 @@ class TFOpMapper(OpMapper):
'LessEqual': 'paddle.less_equal',
'GreaterEqual': 'paddle.greater_equal',
'Mul': 'paddle.multiply',
'FloorDiv': 'fluid.layers.elementwise_floordiv'
'FloorDiv': 'paddle.floor_divide'
}
def __init__(self, decoder):
......@@ -176,11 +176,12 @@ class TFOpMapper(OpMapper):
x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0]
self.paddle_graph.add_layer(
layer_id = self.paddle_graph.add_layer(
kernel=op_type,
inputs={"x": x.name,
"y": y.name},
outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
def NotEqual(self, node):
x = self.graph.get_node(node.layer.input[0])
......@@ -224,20 +225,15 @@ class TFOpMapper(OpMapper):
return
self.params[node.name] = node.value
if dtype != "float32":
self.params[node.name] = node.value.astype("float32")
if 0 not in shape:
self.paddle_graph.add_layer(
"self.create_parameter",
inputs={},
outputs=[node.name],
shape=shape,
attr=string(node.name))
if dtype != "float32":
self.paddle_graph.add_layer(
kernel="paddle.cast",
inputs={"x": node.name},
outputs=[node.name],
dtype=string(dtype))
attr=string(node.name),
dtype=string(dtype),
default_initializer="paddle.nn.initializer.Constant(value=0.0)")
def Transpose(self, node):
input = self.graph.get_node(node.layer.input[0])
......@@ -262,7 +258,8 @@ class TFOpMapper(OpMapper):
else:
inputs["shape"] = dims.name
layer_attrs["dtype"] = string(input_value.dtype)
layer_attrs["value"] = input_value.value
layer_attrs["fill_value"] = input_value.value
self.paddle_graph.add_layer(
"paddle.full",
......@@ -1043,7 +1040,7 @@ class TFOpMapper(OpMapper):
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
......@@ -1245,15 +1242,15 @@ class TFOpMapper(OpMapper):
x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0]
layer_id = self.paddle_graph.add_layer(
"paddle.fluid.layers.elementwise_sub", inputs=inputs, outputs=[node.name])
# program.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
"fluid.layers.elementwise_sub", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
inputs = {"x": node.name, "y": node.name}
x_shape = node.out_shapes[0]
y_shape = node.out_shapes[0]
layer_id = self.paddle_graph.add_layer(
"paddle.multiply", inputs=inputs, outputs=[node.name])
# program.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
def OneHot(self, node):
input = self.graph.get_node(node.layer.input[0])
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .transpose_elimination import DygraphTransposeElimination
from .transpose_eliminate_pass import DygraphTransposeEliminatePass
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.elimination.dygraph import DygraphTransposeElimination
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class DygraphTransposeEliminatePass(Pass):
name = "transpose_eliminate_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = DygraphTransposeElimination()
fuser.operate(graph)
# 用于注册
transpose_eliminate_pass = DygraphTransposeEliminatePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import sys
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class DygraphTransposeElimination(FuseBase):
def __init__(self):
super(DygraphTransposeElimination, self).__init__(graph_type="dygraph")
self.direct_layers = [
'paddle.nn.ReLU', 'paddle.nn.ReLU6', 'paddle.abs',
'paddle.nn.Sigmoid', 'paddle.exp', 'paddle.rsqrt',
'paddle.nn.Swish', 'paddle.nn.Tanh',
'paddle.nn.Softplus', 'paddle.nn.LeakyReLU',
'paddle.floor', 'paddle.erf', 'paddle.square'
]
self.elementwise_layers = [
'paddle.add', 'fluid.layers.elementwise_sub',
'paddle.multiply', 'paddle.divide'
]
self.reduce_layers = [
'paddle.mean', 'paddle.all',
'paddle.max', 'paddle.any',
'paddle.sum', 'paddle.prod'
]
def get_transpose_num(self, graph):
count = 0
for layer_id, layer in graph.layers.items():
if layer.kernel == "paddle.transpose":
count += 1
return count
def operate(self, graph):
total_layer_num = len(graph.layers)
scanned_layers = set()
optimized_transpose_layers = list()
optimized_reduce_layers = list()
optimized_concat_layers = list()
optimized_elementwise_layers = list()
def get_index(layer):
if layer.kernel.startswith("paddle.nn") and "functional" not in layer.kernel:
return 1
else:
return 0
def strip_transpose(_graph):
layers = copy.deepcopy(_graph.layers)
for layer_id, layer in layers.items():
if layer_id in scanned_layers:
continue
scanned_layers.add(layer_id)
percent = round(len(scanned_layers) / total_layer_num * 100, 2)
sys.stderr.write("\rOptimize Transpose Layers...{}%".format(
percent))
if layer.kernel != "paddle.transpose":
continue
if layer.attrs["perm"] != [0, 2, 3, 1]:
continue
transpose_layers = list()
propagate_layers = list()
reduce_layers = list()
concat_layers = list()
# 此elementwise_layers专用于存储shape(4) + shape(1)的形式layer
elementwise_layers = list()
can_be_optimized = True
for out in _graph.edges_out.get(layer_id, []):
if _graph.layers[out].kernel == "paddle.transpose":
if _graph.layers[out].attrs["perm"] != [0, 3, 1, 2]:
can_be_optimized = False
break
transpose_layers.append(out)
elif _graph.layers[out].kernel in self.elementwise_layers:
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.direct_layers:
ouput_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[ouput_index] in _graph.outputs:
can_be_optimized = False
break
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.reduce_layers:
ouput_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[ouput_index] in _graph.outputs:
can_be_optimized = False
break
if _graph.layers[out].attrs.get('keepdim', False):
can_be_optimized = False
break
propagate_layers.append(out)
reduce_layers.append(out)
elif _graph.layers[out].kernel == "paddle.concat":
ouput_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[ouput_index] in _graph.outputs:
can_be_optimized = False
break
propagate_layers.append(out)
concat_layers.append(out)
else:
can_be_optimized = False
break
visited_layers = set()
while len(propagate_layers) > 0 and can_be_optimized:
current_id = propagate_layers.pop(0)
visited_layers.add(current_id)
for out in _graph.edges_out.get(current_id, []):
if _graph.layers[
out].kernel == "paddle.transpose":
if _graph.layers[out].attrs["perm"] != [0, 3, 1, 2]:
can_be_optimized = False
break
transpose_layers.append(out)
elif _graph.layers[
out].kernel in self.elementwise_layers:
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.direct_layers:
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.reduce_layers:
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if _graph.layers[out].attrs.get('keepdim',
False):
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
reduce_layers.append(out)
elif _graph.layers[out].kernel == "paddle.concat":
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
concat_layers.append(out)
else:
can_be_optimized = False
break
for ipt in _graph.edges_in.get(current_id, []):
if _graph.layers[
current_id].kernel in self.elementwise_layers:
try:
x_shape = _graph.layers[
current_id].input_shapes['x']
y_shape = _graph.layers[
current_id].input_shapes['y']
output_index = get_index(_graph.layers[ipt])
if _graph.layers[ipt].outputs[
output_index] == _graph.layers[current_id].inputs[
'x']:
if len(x_shape) <= 1:
elementwise_layers.append(current_id)
continue
elif _graph.layers[ipt].outputs[
output_index] == _graph.layers[current_id].inputs[
'y']:
if len(y_shape) <= 1:
elementwise_layers.append(current_id)
continue
else:
raise Exception(
"Unexcepted situation happend while optimizing transpose"
)
except Exception as e:
can_be_optimized = False
break
output_index = get_index(_graph.layers[ipt])
if _graph.layers[
ipt].kernel == "paddle.transpose":
if _graph.layers[ipt].attrs["perm"] != [0, 2, 3, 1]:
can_be_optimized = False
break
if ipt not in visited_layers:
transpose_layers.append(ipt)
elif _graph.layers[
ipt].kernel in self.elementwise_layers:
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
elif _graph.layers[ipt].kernel in self.direct_layers:
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
elif _graph.layers[ipt].kernel in self.reduce_layers:
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if _graph.layers[ipt].attrs.get('keepdim',
False):
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
reduce_layers.append(ipt)
elif _graph.layers[ipt].kernel == "paddle.concat":
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
concat_layers.append(ipt)
else:
can_be_optimized = False
break
if not can_be_optimized:
break
if not can_be_optimized:
continue
transpose_layers.append(layer_id)
transpose_layers = list(set(transpose_layers))
for l in transpose_layers:
output_index = get_index(graph.layers[l])
if graph.layers[l].outputs[output_index] in graph.outputs:
can_be_optimized = False
break
if not can_be_optimized:
continue
for l in transpose_layers:
_graph.delete_layer(l)
optimized_transpose_layers.extend(transpose_layers)
optimized_reduce_layers.extend(reduce_layers)
optimized_concat_layers.extend(concat_layers)
optimized_elementwise_layers.extend(elementwise_layers)
return True
return False
before_transpose_num = self.get_transpose_num(graph)
opt_graph = copy.deepcopy(graph)
total_layer_num = len(opt_graph.layers)
while strip_transpose(opt_graph):
pass
for layer_id in list(set(optimized_transpose_layers)):
self.delete_layer_with_associated(graph, layer_id)
for layer_id in list(set(optimized_reduce_layers)):
dim = graph.layers[layer_id].attrs.get('dim', None)
if dim is not None:
for i in range(len(dim)):
dim[i] = [0, 2, 3, 1][dim[i]]
graph.layers[layer_id].attrs['dim'] = dim
for layer_id in list(set(optimized_concat_layers)):
axis = graph.layers[layer_id].attrs.get('axis', 0)
graph.layers[layer_id].attrs['axis'] = [0, 2, 3, 1][axis]
for layer_id in list(set(optimized_elementwise_layers)):
axis = graph.layers[layer_id].attrs.get('axis', -1)
graph.layers[layer_id].attrs['axis'] = [0, 2, 3, 1][axis]
current_transpose_num = self.get_transpose_num(graph)
print(
"\nTranspose layers optimized, before: transpose_num={}, after: transpose_num={}".
format(before_transpose_num, current_transpose_num))
......@@ -12,19 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .adaptive_pool2d_fuser import Dygraph_AdaptivePool2dFuser
from .adaptive_pool2d_fuse_pass import Dygraph_AdaptivePool2dFusePass
from .batchnorm2d_fuser import Dygraph_BatchNorm2dFuser
from .batchnorm2d_fuse_pass import Dygraph_BatchNorm2dFusePass
from .bn_scale_fuser import Dygraph_BNScaleFuser
from .bn_scale_fuse_pass import Dygraph_BNScaleFusePass
from .constant_fuser import Dygraph_ConstantFuser
from .constant_fuse_pass import Dygraph_ConstantFusePass
from .dropout_fuser import Dygraph_DropoutFuser
from .dropout_fuse_pass import Dygraph_DropoutFusePass
from .fc_fuser import Dygraph_FcFuser
from .fc_fuse_pass import Dygraph_FcFusePass
from .interpolate_bilinear_fuser import Dygraph_InterpolateBilinearFuser
from .interpolate_bilinear_fuse_pass import Dygraph_InterpolateBilinearFusePass
from .reshape_fuser import Dygraph_ReshapeFuser
from .reshape_fuse_pass import Dygraph_ReshapeFusePass
from .adaptive_pool2d_fuser import DygraphAdaptivePool2dFuser
from .adaptive_pool2d_fuse_pass import DygraphAdaptivePool2dFusePass
from .batchnorm2d_fuser import DygraphBatchNorm2dFuser
from .batchnorm2d_fuse_pass import DygraphBatchNorm2dFusePass
from .bn_scale_fuser import DygraphBNScaleFuser
from .bn_scale_fuse_pass import DygraphBNScaleFusePass
from .constant_fuser import DygraphConstantFuser
from .constant_fuse_pass import DygraphConstantFusePass
from .conv2d_add_fuser import DygraphConv2DAddFuser
from .conv2d_add_fuse_pass import DygraphConv2DAddFusePass
from .dropout_fuser import DygraphDropoutFuser
from .dropout_fuse_pass import DygraphDropoutFusePass
from .fc_fuser import DygraphFcFuser
from .fc_fuse_pass import DygraphFcFusePass
from .interpolate_bilinear_fuser import DygraphInterpolateBilinearFuser
from .interpolate_bilinear_fuse_pass import DygraphInterpolateBilinearFusePass
from .reshape_fuser import DygraphReshapeFuser
from .reshape_fuse_pass import DygraphReshapeFusePass
from .tf_batchnorm_fuser import DygraphTFBatchNormFuser
from .tf_batchnorm_fuse_pass import DygraphTFBatchNormFusePass
......@@ -13,21 +13,21 @@
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_AdaptivePool2dFuser
from x2paddle.optimizer.fusion.dygraph import DygraphAdaptivePool2dFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_AdaptivePool2dFusePass(Pass):
class DygraphAdaptivePool2dFusePass(Pass):
name = "dygraph_adaptive_pool2d_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_AdaptivePool2dFuser()
fuser = DygraphAdaptivePool2dFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
adaptive_pool2d_fuse_pass = Dygraph_AdaptivePool2dFusePass()
adaptive_pool2d_fuse_pass = DygraphAdaptivePool2dFusePass()
......@@ -18,9 +18,9 @@ from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Dygraph_AdaptivePool2dFuser(FuseBase):
class DygraphAdaptivePool2dFuser(FuseBase):
def __init__(self):
super(Dygraph_AdaptivePool2dFuser, self).__init__(graph_type="dygraph")
super(DygraphAdaptivePool2dFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的adaptive pool2d图结构。
......
......@@ -13,21 +13,21 @@
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_BatchNorm2dFuser
from x2paddle.optimizer.fusion.dygraph import DygraphBatchNorm2dFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_BatchNorm2dFusePass(Pass):
class DygraphBatchNorm2dFusePass(Pass):
name = "dygraph_batchnorm2d_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_BatchNorm2dFuser()
fuser = DygraphBatchNorm2dFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
batchnorm2d_fuse_pass = Dygraph_BatchNorm2dFusePass()
batchnorm2d_fuse_pass = DygraphBatchNorm2dFusePass()
......@@ -18,9 +18,9 @@ from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Dygraph_BatchNorm2dFuser(FuseBase):
class DygraphBatchNorm2dFuser(FuseBase):
def __init__(self):
super(Dygraph_BatchNorm2dFuser, self).__init__(graph_type="dygraph")
super(DygraphBatchNorm2dFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的batchnorm2d图结构。
......
......@@ -13,21 +13,21 @@
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_BNScaleFuser
from x2paddle.optimizer.fusion.dygraph import DygraphBNScaleFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_BNScaleFusePass(Pass):
class DygraphBNScaleFusePass(Pass):
name = "dygraph_bn_scale_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_BNScaleFuser()
fuser = DygraphBNScaleFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
bn_scale_fuse_pass = Dygraph_BNScaleFusePass()
bn_scale_fuse_pass = DygraphBNScaleFusePass()
......@@ -18,9 +18,9 @@ from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Dygraph_BNScaleFuser(FuseBase):
class DygraphBNScaleFuser(FuseBase):
def __init__(self):
super(Dygraph_BNScaleFuser, self).__init__(graph_type="dygraph")
super(DygraphBNScaleFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的batchnorm2d图结构。
......
......@@ -13,21 +13,21 @@
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_ConstantFuser
from x2paddle.optimizer.fusion.dygraph import DygraphConstantFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_ConstantFusePass(Pass):
class DygraphConstantFusePass(Pass):
name = "dygraph_constant_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_ConstantFuser()
fuser = DygraphConstantFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
constant_fuse_pass = Dygraph_ConstantFuser()
constant_fuse_pass = DygraphConstantFuser()
......@@ -18,9 +18,9 @@ from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Dygraph_ConstantFuser(FuseBase):
class DygraphConstantFuser(FuseBase):
def __init__(self):
super(Dygraph_ConstantFuser, self).__init__(graph_type="dygraph")
super(DygraphConstantFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的constant图结构。
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import DygraphConv2DAddFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class DygraphConv2DAddFusePass(Pass):
name = "dygraph_conv2d_add_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = DygraphConv2DAddFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
dygraph_conv2d_add_fuse_pass = DygraphConv2DAddFusePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class DygraphConv2DAddFuser(FuseBase):
def __init__(self):
super(DygraphConv2DAddFuser, self).__init__(graph_type="dygraph")
self.patterns = list()
def build_pattern(self):
""" 描述需要替换的conv2d+add图结构。
conv2d+add层模式python实现代码示例:
模式一:
MobilenetV1_Logits_Conv2d_1c_1x1_biases = self.MobilenetV1_Logits_Conv2d_1c_1x1_biases
conv2d_transpose_14 = paddle.transpose(x=MobilenetV1_Logits_AvgPool_1a_AvgPool, perm=[0, 3, 1, 2])
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = self.conv27(conv2d_transpose_14)
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.transpose(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, perm=[0, 2, 3, 1])
MobilenetV1_Logits_Conv2d_1c_1x1_BiasAdd = paddle.add(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, y=MobilenetV1_Logits_Conv2d_1c_1x1_biases)
模式二:
MobilenetV1_Logits_Conv2d_1c_1x1_biases = self.MobilenetV1_Logits_Conv2d_1c_1x1_biases
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = self.conv27(conv2d_transpose_14)
MobilenetV1_Logits_Conv2d_1c_1x1_BiasAdd = paddle.add(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, y=MobilenetV1_Logits_Conv2d_1c_1x1_biases)
"""
def gen_name(id):
return "x" + str(id)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"self.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
kernel="paddle.transpose",
inputs={"x": "conv-input-0"},
outputs=[gen_name(1)],
perm=[0, 3, 1, 2])
pattern.add_layer(
kernel="paddle.nn.Conv2D",
inputs={"input": gen_name(1)},
outputs=[gen_name(2)])
pattern.add_layer(
kernel="paddle.transpose",
inputs={"x": gen_name(2)},
outputs=[gen_name(2)],
perm=[0, 2, 3, 1])
pattern.add_layer(
kernel="paddle.add",
inputs={"x": gen_name(2),
"y": gen_name(0)},
outputs=[gen_name(3)])
pattern.build(inputs={"input-0": "conv-input-0", })
self.patterns.append(pattern)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"self.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
kernel="paddle.nn.Conv2D",
inputs={"input": "conv-input-0"},
outputs=[gen_name(1)])
pattern.add_layer(
kernel="paddle.add",
inputs={"x": gen_name(1),
"y": gen_name(0)},
outputs=[gen_name(2)])
pattern.build(inputs={"input-0": "conv-input-0", })
self.patterns.append(pattern)
def insert_new_layer(self, graph, parameters, matches):
self.gen_new_layer(matches, graph)
matches_copy = copy.deepcopy(matches)
for layer_id, layer in matches_copy.items():
if layer.kernel not in ["self.create_parameter", "paddle.add"]:
matches.pop(layer_id)
def gen_new_layer(self, matches, graph):
is_transpose = False
for layer_id, layer in matches.items():
if layer.kernel == "self.create_parameter":
bias_name = layer.attrs["attr"]
if layer.kernel == "paddle.transpose":
is_transpose = True
if layer.kernel == "paddle.add":
output_name = layer.outputs[0]
if layer.kernel == "paddle.nn.Conv2D":
conv_id = layer_id
for layer_id, layer in matches.items():
if layer.kernel == "paddle.nn.functional.conv2d_transpose":
layer.bias = bias_name
if not is_transpose:
layer.outputs[0] = output_name
if layer.kernel == "paddle.nn.Conv2D":
layer.attrs["bias_attr"] = bias_name
if not is_transpose:
layer.outputs[1] = output_name
if layer.kernel == "paddle.transpose":
if conv_id in graph.edges_in[layer_id]:
layer.outputs[0] = output_name
......@@ -13,21 +13,21 @@
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_DropoutFuser
from x2paddle.optimizer.fusion.dygraph import DygraphDropoutFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_DropoutFusePass(Pass):
class DygraphDropoutFusePass(Pass):
name = "dygraph_dropout_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_DropoutFuser()
fuser = DygraphDropoutFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
dropout_fuse_pass = Dygraph_DropoutFuser()
dropout_fuse_pass = DygraphDropoutFuser()
......@@ -18,9 +18,9 @@ from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Dygraph_DropoutFuser(FuseBase):
class DygraphDropoutFuser(FuseBase):
def __init__(self):
super(Dygraph_DropoutFuser, self).__init__(graph_type="dygraph")
super(DygraphDropoutFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的constant图结构。
......
......@@ -13,21 +13,21 @@
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_FcFuser
from x2paddle.optimizer.fusion.dygraph import DygraphFcFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_FcFusePass(Pass):
class DygraphFcFusePass(Pass):
name = "dygraph_fc_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_FcFuser()
fuser = DygraphFcFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
fc_fuse_pass = Dygraph_FcFusePass()
fc_fuse_pass = DygraphFcFusePass()
......@@ -18,10 +18,10 @@ from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Dygraph_FcFuser(FuseBase):
class DygraphFcFuser(FuseBase):
def __init__(self):
self.linear_index = 0
super(Dygraph_FcFuser, self).__init__(graph_type="dygraph")
super(DygraphFcFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的fc图结构。
......
......@@ -13,21 +13,21 @@
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_InterpolateBilinearFuser
from x2paddle.optimizer.fusion.dygraph import DygraphInterpolateBilinearFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_InterpolateBilinearFusePass(Pass):
class DygraphInterpolateBilinearFusePass(Pass):
name = "dygraph_interpolate_bilinear_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_InterpolateBilinearFuser()
fuser = DygraphInterpolateBilinearFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
interpolate_bilinear_fuse_pass = Dygraph_InterpolateBilinearFusePass()
interpolate_bilinear_fuse_pass = DygraphInterpolateBilinearFusePass()
......@@ -18,9 +18,9 @@ from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Dygraph_InterpolateBilinearFuser(FuseBase):
class DygraphInterpolateBilinearFuser(FuseBase):
def __init__(self):
super(Dygraph_InterpolateBilinearFuser, self).__init__(graph_type="dygraph")
super(DygraphInterpolateBilinearFuser, self).__init__(graph_type="dygraph")
import torch
torch_version = torch.__version__
torch_version_part = torch_version.split(".")
......
......@@ -13,21 +13,21 @@
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_ReshapeFuser
from x2paddle.optimizer.fusion.dygraph import DygraphReshapeFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_ReshapeFusePass(Pass):
class DygraphReshapeFusePass(Pass):
name = "dygraph_reshape_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_ReshapeFuser()
fuser = DygraphReshapeFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
reshape_fuse_pass = Dygraph_ReshapeFusePass()
reshape_fuse_pass = DygraphReshapeFusePass()
......@@ -18,9 +18,9 @@ from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Dygraph_ReshapeFuser(FuseBase):
class DygraphReshapeFuser(FuseBase):
def __init__(self):
super(Dygraph_ReshapeFuser, self).__init__(graph_type="dygraph")
super(DygraphReshapeFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的reshape图结构。
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import DygraphTFBatchNormFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class DygraphTFBatchNormFusePass(Pass):
name = "dygraph_tf_batchnorm_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = DygraphTFBatchNormFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
dygraph_tf_batchnorm_fuse_pass = DygraphTFBatchNormFusePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from collections import OrderedDict
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class DygraphTFBatchNormFuser(FuseBase):
def __init__(self):
self.bn_index = 0
super(DygraphTFBatchNormFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的batchnorm图结构。
batchnorm层模式python实现代码示例:
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"self.create_parameter",
inputs={},
outputs=[gen_name(0)])
self.pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(1)],
shape=[1])
self.pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(0), "y": gen_name(1)},
outputs=[gen_name(2)])
self.pattern.add_layer(
"paddle.rsqrt",
inputs={"x": gen_name(2)},
outputs=[gen_name(3)])
self.pattern.add_layer(
"self.create_parameter",
inputs={},
outputs=[gen_name(4)])
self.pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(3), "y": gen_name(4)},
outputs=[gen_name(5)])
self.pattern.add_layer(
"self.create_parameter",
inputs={},
outputs=[gen_name(6)])
self.pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(6), "y": gen_name(5)},
outputs=[gen_name(7)])
self.pattern.add_layer(
"self.create_parameter",
inputs={},
outputs=[gen_name(8)])
self.pattern.add_layer(
"fluid.layers.elementwise_sub",
inputs={"x": gen_name(8), "y": gen_name(7)},
outputs=[gen_name(9)])
self.pattern.add_layer(
"paddle.multiply",
inputs={"x": "bn-input-0", "y": gen_name(5)},
outputs=[gen_name(10)])
self.pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(10), "y": gen_name(9)},
outputs=[gen_name(11)])
self.pattern.build(inputs={"input-0": "bn-input-0", })
def insert_new_layer(self, graph, parameters, matches):
new_layers, last_layer_id = self.gen_new_layer(matches, parameters, graph)
matches_copy = copy.deepcopy(matches)
for layer_id, layer in matches_copy.items():
for i in range(3):
if layer_id == new_layers[i].id:
matches.pop(new_layers[i].id)
prefix_layers = OrderedDict()
mid_layers = OrderedDict()
suffix_layers = OrderedDict()
is_need_id = False
for layer_id, layer in graph.layers.items():
if is_need_id:
suffix_layers[layer_id] = layer
else:
if layer_id == last_layer_id:
for i in range(3):
mid_layers[new_layers[i].id] = new_layers[i]
is_need_id = True
prefix_layers[layer_id] = layer
prefix_layers.update(mid_layers)
prefix_layers.update(suffix_layers)
graph.layers = prefix_layers
def gen_new_layer(self, matches, parameters, graph):
layer_id_list = list(matches.keys())
layer_id_list.sort(key = int)
for layer_id, layer in matches.items():
if layer.kernel == "paddle.full":
full_layer = layer
out_layer_id = graph.edges_out[layer_id][0]
if matches[out_layer_id].kernel == "paddle.add":
var_layer_id = graph.edges_in[out_layer_id][0]
var_layer = matches[var_layer_id]
if layer.kernel == "paddle.rsqrt":
out_layer_id = graph.edges_out[layer_id][0]
if matches[out_layer_id].kernel == "paddle.multiply":
gamma_layer_id = graph.edges_in[out_layer_id][1]
gamma_layer = matches[gamma_layer_id]
if layer.kernel == "fluid.layers.elementwise_sub":
in_layer_id = graph.edges_in[layer_id][0]
beta_layer = matches[in_layer_id]
in_layer_id = graph.edges_in[layer_id][1]
in_layer_id = graph.edges_in[in_layer_id][0]
mean_layer = matches[in_layer_id]
out_layer_id = graph.edges_out[layer_id][0]
add_layer = matches[out_layer_id]
if layer.kernel == "paddle.multiply":
in_layer_id = graph.edges_in[layer_id][1]
mul_layer = matches[in_layer_id]
if mul_layer.kernel == "paddle.multiply":
in_layer_id = graph.edges_in[layer_id][0]
if in_layer_id not in matches:
input_name = layer.inputs["x"]
transpose0 = PaddleLayer(
id=layer_id_list[-1] + "_1",
kernel="paddle.transpose",
inputs={"x": input_name},
outputs=["{}_transpose_for_bn".format(input_name)],
perm=[0, 3, 1, 2])
bn_name = "merge_bn{}".format(self.bn_index)
self.bn_index += 1
params = parameters[gamma_layer.outputs[0]]
c = params.shape[0]
bn = PaddleLayer(
id=layer_id_list[-1] + "_2",
kernel="paddle.nn.BatchNorm",
inputs={"input": "{}_transpose_for_bn".format(input_name)},
outputs=[bn_name, "{}_bn".format(input_name)],
num_channels=c,
epsilon=full_layer.attrs["fill_value"],
param_attr=string(gamma_layer.outputs[0]),
bias_attr=string(beta_layer.outputs[0]),
moving_mean_name=string(mean_layer.outputs[0]),
moving_variance_name=string(var_layer.outputs[0]),
is_test=True)
transpose1 = PaddleLayer(
id=layer_id_list[-1] + "_3",
kernel="paddle.transpose",
inputs={"x": "{}_bn".format(input_name)},
outputs=add_layer.outputs,
perm=[0, 2, 3, 1])
return [transpose0, bn, transpose1], layer_id_list[-1]
......@@ -15,6 +15,7 @@
from x2paddle.optimizer.pass_manager import PassManager
from x2paddle.optimizer.fusion.dygraph import *
from x2paddle.optimizer.fusion.static import *
from x2paddle.optimizer.elimination.dygraph import *
class GraphOptimizer(object):
def __init__(self, source_frame, paddle_type="dygraph"):
......@@ -30,6 +31,12 @@ class GraphOptimizer(object):
self.passes = ["dygraph_bn_scale_fuse_pass"]
else:
self.passes = ["static_bn_scale_fuse_pass"]
elif source_frame == "tf":
self.passes = [
"dygraph_conv2d_add_fuse_pass",
"dygraph_tf_batchnorm_fuse_pass",
"transpose_eliminate_pass"
]
else:
# TODO
pass
......@@ -37,6 +44,9 @@ class GraphOptimizer(object):
def optimize(self, graph):
for pass_name in self.passes:
pass_ = PassManager.lookup(pass_name)()
if pass_name.endswith("_eliminate_pass"):
pass_.apply(graph)
else:
while True:
before_len = len(graph.layers)
pass_.apply(graph)
......
......@@ -19,6 +19,7 @@ class PatternMatcher(object):
def __init__(self, pattern):
self.pattern = pattern
# matches的每个match是按照拓扑排序组成layer的dict
self.matches = list()
def operate(self, graph, match_kind="topo"):
......@@ -154,7 +155,7 @@ class PatternMatcher(object):
if len(block.layers) > 0:
self.detect_patterns_by_topo(layer.blocks[j])
def detect_patterns_by_edge(self, graph, ignore_list_inputs=True):
def detect_patterns_by_edge(self, graph):
"""当遇见顺序没有强制规定的pattern时使用该方式
"""
......@@ -163,8 +164,8 @@ class PatternMatcher(object):
pattern_ids = list(pattern_id2layers.keys())
pattern_layer_id = pattern_ids[0]
subgraph_id2layers = dict()
graph_layers = dict(list(graph.layers.items())[start_index:])
layer_id = list(graph_layers.keys())[0]
layer_id = list(graph.layers.keys())[start_index]
graph_layers = graph.layers
def update(layer_id, pattern_layer_id):
layer = graph_layers[layer_id]
......@@ -172,15 +173,23 @@ class PatternMatcher(object):
if layer.kernel != pattern_layer.kernel:
return False
subgraph_id2layers[layer_id] = layer
if pattern.edges_in.get(pattern_layer_id, 0) != 0:
if len(pattern.edges_in[pattern_layer_id]) != \
len(graph.edges_in[layer_id]):
return False
for i, pattern_layer_id_in in enumerate(pattern.edges_in[
pattern_layer_id]):
if pattern_layer_id_in == -1 or ignore_list_inputs:
if pattern_layer_id_in == -1:
continue
layer_id_in = graph.edges_in[layer_id][i]
subgraph_ids = list(subgraph_id2layers.keys())
if layer_id_in not in subgraph_ids:
return False
if pattern_layer_id_in in pattern_ids:
new_layer_id_in = graph.edges_in[layer_id][i]
if new_layer_id_in in subgraph_id2layers:
continue
update(new_layer_id_in, pattern_layer_id_in)
if pattern.edges_out.get(pattern_layer_id, 0) != 0:
if layer_id not in graph.edges_out:
return False
if len(pattern.edges_out[pattern_layer_id]) != \
len(graph.edges_out[layer_id]):
return False
......@@ -188,17 +197,8 @@ class PatternMatcher(object):
pattern_layer_id]):
if pattern_layer_id_out in pattern_ids:
new_layer_id_out = graph.edges_out[layer_id][i]
for j, new_new_layer_id_in in enumerate(
graph.edges_in[new_layer_id_out]):
if new_new_layer_id_in not in subgraph_id2layers:
if ignore_list_inputs:
continue
new_new_pattern_layer_id_in = pattern.edges_in[
pattern_layer_id_out][j]
if new_new_pattern_layer_id_in == -1:
if new_layer_id_out in subgraph_id2layers:
continue
update(new_new_layer_id_in,
new_new_pattern_layer_id_in)
update(new_layer_id_out, pattern_layer_id_out)
while len(subgraph_id2layers) != len(pattern_id2layers):
......@@ -258,6 +258,7 @@ def get_subgraph(prefix_layer_id, suffix_layer_id, graph):
class FuseBase(object):
def __init__(self, graph_type):
self.pattern = PaddleGraph(graph_type=graph_type)
self.patterns = list()
def operate(self, graph, match_kind="topo"):
parameters = graph.parameters
......@@ -267,16 +268,22 @@ class FuseBase(object):
first_layer_id = list(match.keys())[0]
subgraph = get_subgraph("", first_layer_id, graph)
self.insert_new_layer(subgraph, parameters, match)
self.delete_inter_layer(graph)
self.delete_match(graph)
graph.build()
def perform_pattern_matcher(self, graph, match_kind="topo"):
""" 执行模式匹配,找到匹配的子图。
"""
if len(self.patterns) > 0:
self.matches = list()
for pattern in self.patterns:
pattern_matcher = PatternMatcher(pattern)
self.matches.extend(pattern_matcher.operate(graph, match_kind))
else:
pattern_matcher = PatternMatcher(self.pattern)
self.matches = pattern_matcher.operate(graph, match_kind)
def delete_inter_layer(self, graph):
def delete_match(self, graph):
""" 删除不需要的中间layer及其对应参数。
"""
for match in self.matches:
......@@ -291,3 +298,4 @@ class FuseBase(object):
if layer_id in subgraph.layers:
# layer_id可能是属于子图的,此时删除父layer,即删除整个子图
subgraph.layers.pop(layer_id)
\ No newline at end of file
......@@ -6,11 +6,6 @@ class BiasOpt:
self.conv_layers = [
'fluid.layers.conv2d', 'fluid.layers.conv2d_transpose'
]
self.act_layers = [
'fluid.layers.relu', 'fluid.layers.relu6', 'fluid.layers.sigmoid',
'fluid.layers.exp', 'fluid.layers.tanh', 'fluid.layers.softplus',
'fluid.layers.leaky_relu'
]
def run(self, graph):
print("Optimize: BiasOpt...")
......
......@@ -21,7 +21,6 @@ class TransposeOpt:
'fluid.layers.elementwise_add', 'fluid.layers.elementwise_sub',
'fluid.layers.elementwise_mul', 'fluid.layers.elementwise_div'
]
# self.reduce_layers = []
self.reduce_layers = [
'fluid.layers.reduce_mean', 'fluid.layers.reduce_all',
'fluid.layers.reduce_max', 'fluid.layers.reduce_any',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册