提交 ee48679c 编写于 作者: S SunAhong1993

add tf static

上级 fd3c33a8
...@@ -132,18 +132,9 @@ def tf2paddle(model_path, ...@@ -132,18 +132,9 @@ def tf2paddle(model_path,
graph_opt = GraphOptimizer(source_frame="tf", paddle_type=paddle_type) graph_opt = GraphOptimizer(source_frame="tf", paddle_type=paddle_type)
graph_opt.optimize(mapper.paddle_graph) graph_opt.optimize(mapper.paddle_graph)
else: else:
from x2paddle.optimizer.tensorflow.bias import BiasOpt from x2paddle.optimizer.optimizer import GraphOptimizer
from x2paddle.optimizer.tensorflow.transpose import TransposeOpt graph_opt = GraphOptimizer(source_frame="tf", paddle_type=paddle_type)
from x2paddle.optimizer.tensorflow.batch_norm import BatchNormOpt graph_opt.optimize(mapper.paddle_graph)
from x2paddle.optimizer.tensorflow.prelu import PReLUOpt
bias_opt = BiasOpt()
transpose_opt = TransposeOpt()
batch_norm_opt = BatchNormOpt()
prelu_opt = PReLUOpt()
bias_opt.run(mapper.paddle_graph)
batch_norm_opt.run(mapper.paddle_graph)
prelu_opt.run(mapper.paddle_graph)
transpose_opt.run(mapper.paddle_graph)
mapper.paddle_graph.gen_model(save_dir) mapper.paddle_graph.gen_model(save_dir)
......
...@@ -284,7 +284,6 @@ class TFOpMapper(OpMapper): ...@@ -284,7 +284,6 @@ class TFOpMapper(OpMapper):
inputs["shape"] = dims.name inputs["shape"] = dims.name
layer_attrs["dtype"] = string(input_value.dtype) layer_attrs["dtype"] = string(input_value.dtype)
layer_attrs["fill_value"] = input_value.value layer_attrs["fill_value"] = input_value.value
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.full", "paddle.full",
...@@ -578,6 +577,9 @@ class TFOpMapper(OpMapper): ...@@ -578,6 +577,9 @@ class TFOpMapper(OpMapper):
inputs={"x": node.name}, inputs={"x": node.name},
outputs=[node.name], outputs=[node.name],
perm=[0, 2, 3, 1]) perm=[0, 2, 3, 1])
def FusedBatchNormV3(self, node):
self.FusedBatchNorm(node)
def Mean(self, node): def Mean(self, node):
input = self.graph.get_input_node(node, 0) input = self.graph.get_input_node(node, 0)
...@@ -930,6 +932,23 @@ class TFOpMapper(OpMapper): ...@@ -930,6 +932,23 @@ class TFOpMapper(OpMapper):
outputs=[node.name], outputs=[node.name],
axis=axis) axis=axis)
def Concat(self, node):
inputs_list = list()
for i in range(1, len(node.inputs)):
inputs_list.append(self.graph.get_input_node(node, i))
axis = self.graph.get_input_node(node, 0)
assert axis.layer_type == "Const", "axis for ConcatV2 must be type Const"
axis = axis.value
if axis < 0:
axis += len(inputs_list[0].out_shapes[0])
input_names = [i.name for i in inputs_list]
self.paddle_graph.add_layer(
kernel="paddle.concat",
inputs={"x": input_names},
outputs=[node.name],
axis=axis)
def AddN(self, node): def AddN(self, node):
inputs_list = list() inputs_list = list()
for i in range(len(node.inputs) - 1): for i in range(len(node.inputs) - 1):
...@@ -1400,6 +1419,7 @@ class TFOpMapper(OpMapper): ...@@ -1400,6 +1419,7 @@ class TFOpMapper(OpMapper):
inputs = {"x": x.name, "y": y.name} inputs = {"x": x.name, "y": y.name}
x_shape = x.out_shapes[0] x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0] y_shape = y.out_shapes[0]
# TODO(syf)
layer_id = self.paddle_graph.add_layer( layer_id = self.paddle_graph.add_layer(
"fluid.layers.elementwise_sub", inputs=inputs, outputs=[node.name]) "fluid.layers.elementwise_sub", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape} self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
......
...@@ -89,6 +89,7 @@ class CaffeOpMapper(OpMapper): ...@@ -89,6 +89,7 @@ class CaffeOpMapper(OpMapper):
input_shape.append(last_node.output_shape[idx]) input_shape.append(last_node.output_shape[idx])
node.input_shape = input_shape node.input_shape = input_shape
func_name = 'shape_' + node.layer_type.lower() func_name = 'shape_' + node.layer_type.lower()
if is_fluid_op: if is_fluid_op:
node.output_shape = getattr(caffe_shape, func_name)(node.layer, node.output_shape = getattr(caffe_shape, func_name)(node.layer,
...@@ -974,4 +975,4 @@ class CaffeOpMapper(OpMapper): ...@@ -974,4 +975,4 @@ class CaffeOpMapper(OpMapper):
kernel=op_info, kernel=op_info,
inputs={"x": self.get_input_name(input)}, inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name]) outputs=[node.layer_name])
\ No newline at end of file
...@@ -201,6 +201,7 @@ class HierarchicalTree(Tree): ...@@ -201,6 +201,7 @@ class HierarchicalTree(Tree):
code_str = gen_layer_code(self.pd_graph, sub_layers, module_name, code_str = gen_layer_code(self.pd_graph, sub_layers, module_name,
different_attrs=diff_attrs_column) different_attrs=diff_attrs_column)
# print(code_str)
self.codes.append(code_str) self.codes.append(code_str)
for sub_layers in sub_layers_list: for sub_layers in sub_layers_list:
inputs, outputs = get_inputs_outputs(self.pd_graph, sub_layers) inputs, outputs = get_inputs_outputs(self.pd_graph, sub_layers)
...@@ -358,7 +359,7 @@ class HierarchicalTree(Tree): ...@@ -358,7 +359,7 @@ class HierarchicalTree(Tree):
run_func_list.append(" # {}: 形状为{},类型为{}。".format(k, v[0], v[1])) run_func_list.append(" # {}: 形状为{},类型为{}。".format(k, v[0], v[1]))
run_func_list.extend( run_func_list.extend(
[" paddle.disable_static()", [" paddle.disable_static()",
" params = paddle.load('{}/model.pdparams')".format(osp.abspath(save_dir)), " params, _ = fluid.load_dygraph('{}/model')".format(save_dir),
" model = {}()".format(self.pd_graph.name), " model = {}()".format(self.pd_graph.name),
" model.set_dict(params)", " model.set_dict(params)",
" model.eval()", " model.eval()",
...@@ -370,12 +371,7 @@ class HierarchicalTree(Tree): ...@@ -370,12 +371,7 @@ class HierarchicalTree(Tree):
self.update_parameters() self.update_parameters()
import_list = ["import paddle", import_list = ["import paddle",
"import paddle.fluid as fluid", "import paddle.fluid as fluid",
"from paddle.fluid.initializer import Constant", "",]
"from paddle.fluid.param_attr import ParamAttr",
"import math",
"from x2paddle.op_mapper.dygraph.pytorch2paddle " + \
"import pytorch_custom_layer as x2paddle_nn"
"\n",]
import_str = "\n".join(import_list) import_str = "\n".join(import_list)
if not osp.exists(save_dir): if not osp.exists(save_dir):
os.makedirs(save_dir) os.makedirs(save_dir)
......
...@@ -29,9 +29,9 @@ NN_KERNEL_NAME = {"paddle.nn.BatchNorm": "bn", ...@@ -29,9 +29,9 @@ NN_KERNEL_NAME = {"paddle.nn.BatchNorm": "bn",
"paddle.nn.Tanh": "tanh", "paddle.nn.Tanh": "tanh",
"paddle.nn.AvgPool2D": "pool", "paddle.nn.AvgPool2D": "pool",
"paddle.nn.MaxPool2D": "pool", "paddle.nn.MaxPool2D": "pool",
"paddle.nn.Pad1D": "pad", "paddle.nn.Pad1d": "pad",
"paddle.nn.Pad2D": "pad", "paddle.nn.Pad2d": "pad",
"paddle.nn.Pad3D": "pad", "paddle.nn.Pad3d": "pad",
"paddle.nn.Dropout": "dropout", "paddle.nn.Dropout": "dropout",
"paddle.nn.GELU": "gelu", "paddle.nn.GELU": "gelu",
"paddle.nn.Hardtanh": "tanh", "paddle.nn.Hardtanh": "tanh",
...@@ -175,11 +175,9 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()): ...@@ -175,11 +175,9 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()):
if layer.kernel.startswith("paddle.nn") and index == 0: if layer.kernel.startswith("paddle.nn") and index == 0:
continue continue
if not output_name.startswith("x") or output_name in outputs \ if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert": or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
continue continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
elif output_name not in outputs: elif output_name not in outputs:
outputs.append(output_name) outputs.append(output_name)
continue continue
...@@ -189,22 +187,15 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()): ...@@ -189,22 +187,15 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()):
if layer.kernel.startswith("paddle.nn") and index == 0 and "functional" not in layer.kernel: if layer.kernel.startswith("paddle.nn") and index == 0 and "functional" not in layer.kernel:
continue continue
if not output_name.startswith("x") or output_name in outputs \ if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert": or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
continue continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
else: else:
outputs.append(output_name) outputs.append(output_name)
no_output_count = 0 no_output_count = 0
for i, (layer_id, layer) in enumerate(sub_layers.items()): for i, (layer_id, layer) in enumerate(sub_layers.items()):
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel) or \ if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel):
layer.kernel.startswith("custom_layer"): line = "self.{} = {}(".format(layer.outputs[0], layer.kernel)
line = "self.{}".format(layer.outputs[0])
if layer.kernel.startswith("custom_layer"):
line += "= x2paddle_nn.{}(".format(layer.kernel.split(":")[-1])
else:
line += " = {}(".format(layer.kernel)
for k, v in layer.attrs.items(): for k, v in layer.attrs.items():
key_name = "{}_{}".format(layer.outputs[0], k) key_name = "{}_{}".format(layer.outputs[0], k)
if key_name in different_attrs: if key_name in different_attrs:
...@@ -298,10 +289,7 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()): ...@@ -298,10 +289,7 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()):
else: else:
if v not in cur_outputs and v not in inputs: if v not in cur_outputs and v not in inputs:
inputs.append(v) inputs.append(v)
if k == "args": line += "{}={}, ".format(k, v)
line += v
else:
line += "{}={}, ".format(k, v)
for k, v in layer.attrs.items(): for k, v in layer.attrs.items():
key_name = "{}_{}".format(layer.outputs[0], k) key_name = "{}_{}".format(layer.outputs[0], k)
if key_name in different_attrs: if key_name in different_attrs:
......
...@@ -50,25 +50,21 @@ def get_inputs_outputs(pd_graph, layers): ...@@ -50,25 +50,21 @@ def get_inputs_outputs(pd_graph, layers):
for layer_id, layer in layers.items(): for layer_id, layer in layers.items():
# 获取输出节点名字 # 获取输出节点名字
if layer_id not in pd_graph.edges_out: if layer_id not in pd_graph.edges_out:
for index, output_name in enumerate(layer.outputs): for output_name in layer.outputs:
if not output_name.startswith("x") or output_name in outputs \ if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert": or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
continue continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
elif output_name not in outputs: elif output_name not in outputs:
outputs.append(output_name) outputs.append(output_name)
else: else:
for out_layer_id in pd_graph.edges_out[layer_id]: for out_layer_id in pd_graph.edges_out[layer_id]:
if out_layer_id not in layer_ids: if out_layer_id not in layer_ids:
for index, output_name in enumerate(layer.outputs): for output_name in layer.outputs:
if not output_name.startswith("x") or output_name in outputs \ if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert": or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
continue continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
else: else:
outputs.append(output_name) outputs.append(output_name)
# 获取输入节点名字 # 获取输入节点名字
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .transpose_elimination import StaticTransposeElimination
from .transpose_eliminate_pass import StaticTransposeEliminatePass
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.elimination.static import StaticTransposeElimination
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class StaticTransposeEliminatePass(Pass):
name = "static_transpose_eliminate_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = StaticTransposeElimination()
fuser.operate(graph)
# 用于注册
static_transpose_eliminate_pass = StaticTransposeEliminatePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import sys
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class StaticTransposeElimination(FuseBase):
def __init__(self):
super(StaticTransposeElimination, self).__init__(graph_type="static")
self.direct_layers = [
'paddle.nn.functional.relu', 'paddle.nn.functional.relu6', 'paddle.abs',
'paddle.nn.functional.sigmoid', 'paddle.exp', 'paddle.rsqrt',
'paddle.nn.functional.swish', 'paddle.tanh',
'paddle.nn.functional.softplus', 'paddle.nn.functional.leaky_relu',
'paddle.floor', 'paddle.erf', 'paddle.square'
]
self.elementwise_layers = [
'paddle.add', 'fluid.layers.elementwise_sub',
'paddle.multiply', 'paddle.divide'
]
self.reduce_layers = [
'paddle.mean', 'paddle.all',
'paddle.max', 'paddle.any',
'paddle.sum', 'paddle.prod'
]
def get_transpose_num(self, graph):
count = 0
for layer_id, layer in graph.layers.items():
if layer.kernel == "paddle.transpose":
count += 1
return count
def operate(self, graph):
total_layer_num = len(graph.layers)
scanned_layers = set()
optimized_transpose_layers = list()
optimized_reduce_layers = list()
optimized_concat_layers = list()
optimized_elementwise_layers = list()
def get_index(layer):
if layer.kernel.startswith("paddle.nn") and "functional" not in layer.kernel:
return 1
else:
return 0
def strip_transpose(_graph):
layers = copy.deepcopy(_graph.layers)
for layer_id, layer in layers.items():
if layer_id in scanned_layers:
continue
scanned_layers.add(layer_id)
percent = round(len(scanned_layers) / total_layer_num * 100, 2)
sys.stderr.write("\rOptimize Transpose Layers...{}%".format(
percent))
if layer.kernel != "paddle.transpose":
continue
if layer.attrs["perm"] != [0, 2, 3, 1]:
continue
transpose_layers = list()
propagate_layers = list()
reduce_layers = list()
concat_layers = list()
# 此elementwise_layers专用于存储shape(4) + shape(1)的形式layer
elementwise_layers = list()
can_be_optimized = True
for out in _graph.edges_out.get(layer_id, []):
if _graph.layers[out].kernel == "paddle.transpose":
if _graph.layers[out].attrs["perm"] != [0, 3, 1, 2]:
can_be_optimized = False
break
transpose_layers.append(out)
elif _graph.layers[out].kernel in self.elementwise_layers:
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.direct_layers:
ouput_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[ouput_index] in _graph.outputs:
can_be_optimized = False
break
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.reduce_layers:
ouput_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[ouput_index] in _graph.outputs:
can_be_optimized = False
break
if not _graph.layers[out].attrs.get('keepdim', False):
can_be_optimized = False
break
propagate_layers.append(out)
reduce_layers.append(out)
elif _graph.layers[out].kernel == "paddle.concat":
ouput_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[ouput_index] in _graph.outputs:
can_be_optimized = False
break
propagate_layers.append(out)
concat_layers.append(out)
else:
can_be_optimized = False
break
visited_layers = set()
while len(propagate_layers) > 0 and can_be_optimized:
current_id = propagate_layers.pop(0)
visited_layers.add(current_id)
for out in _graph.edges_out.get(current_id, []):
if _graph.layers[
out].kernel == "paddle.transpose":
if _graph.layers[out].attrs["perm"] != [0, 3, 1, 2]:
can_be_optimized = False
break
transpose_layers.append(out)
elif _graph.layers[
out].kernel in self.elementwise_layers:
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.direct_layers:
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.reduce_layers:
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if not _graph.layers[out].attrs.get('keepdim',
False):
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
reduce_layers.append(out)
elif _graph.layers[out].kernel == "paddle.concat":
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
concat_layers.append(out)
else:
can_be_optimized = False
break
for ipt in _graph.edges_in.get(current_id, []):
if _graph.layers[
current_id].kernel in self.elementwise_layers:
try:
x_shape = _graph.layers[
current_id].input_shapes['x']
y_shape = _graph.layers[
current_id].input_shapes['y']
output_index = get_index(_graph.layers[ipt])
if _graph.layers[ipt].outputs[
output_index] == _graph.layers[current_id].inputs[
'x']:
if len(x_shape) <= 1:
elementwise_layers.append(current_id)
continue
elif _graph.layers[ipt].outputs[
output_index] == _graph.layers[current_id].inputs[
'y']:
if len(y_shape) <= 1:
elementwise_layers.append(current_id)
continue
else:
raise Exception(
"Unexcepted situation happend while optimizing transpose"
)
except Exception as e:
can_be_optimized = False
break
output_index = get_index(_graph.layers[ipt])
if _graph.layers[
ipt].kernel == "paddle.transpose":
if _graph.layers[ipt].attrs["perm"] != [0, 2, 3, 1]:
can_be_optimized = False
break
if ipt not in visited_layers:
transpose_layers.append(ipt)
elif _graph.layers[
ipt].kernel in self.elementwise_layers:
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
elif _graph.layers[ipt].kernel in self.direct_layers:
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
elif _graph.layers[ipt].kernel in self.reduce_layers:
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if not _graph.layers[ipt].attrs.get('keepdim',
False):
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
reduce_layers.append(ipt)
elif _graph.layers[ipt].kernel == "paddle.concat":
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
concat_layers.append(ipt)
else:
can_be_optimized = False
break
if not can_be_optimized:
break
if not can_be_optimized:
continue
transpose_layers.append(layer_id)
transpose_layers = list(set(transpose_layers))
for l in transpose_layers:
output_index = get_index(graph.layers[l])
if graph.layers[l].outputs[output_index] in graph.outputs:
can_be_optimized = False
break
if not can_be_optimized:
continue
for l in transpose_layers:
_graph.del_layer(l)
optimized_transpose_layers.extend(transpose_layers)
optimized_reduce_layers.extend(reduce_layers)
optimized_concat_layers.extend(concat_layers)
optimized_elementwise_layers.extend(elementwise_layers)
return True
return False
before_transpose_num = self.get_transpose_num(graph)
opt_graph = copy.deepcopy(graph)
total_layer_num = len(opt_graph.layers)
while strip_transpose(opt_graph):
pass
for layer_id in list(set(optimized_transpose_layers)):
graph.del_layer(layer_id)
for layer_id in list(set(optimized_reduce_layers)):
dim = graph.layers[layer_id].attrs.get('axis', None)
if dim is not None:
for i in range(len(dim)):
dim[i] = [0, 2, 3, 1][dim[i]]
graph.layers[layer_id].attrs['axis'] = dim
for layer_id in list(set(optimized_concat_layers)):
axis = graph.layers[layer_id].attrs.get('axis', 0)
graph.layers[layer_id].attrs['axis'] = [0, 2, 3, 1][axis]
for layer_id in list(set(optimized_elementwise_layers)):
axis = graph.layers[layer_id].attrs.get('axis', -1)
graph.layers[layer_id].attrs['axis'] = [0, 2, 3, 1][axis]
if graph.layers[layer_id].kernel == "paddle.add":
graph.layers[layer_id].kernel = "fluid.layers.elementwise_add"
current_transpose_num = self.get_transpose_num(graph)
print(
"\nTranspose layers optimized, before: transpose_num={}, after: transpose_num={}".
format(before_transpose_num, current_transpose_num))
...@@ -105,10 +105,6 @@ class DygraphConv2DAddFuser(FuseBase): ...@@ -105,10 +105,6 @@ class DygraphConv2DAddFuser(FuseBase):
if layer.kernel == "paddle.nn.Conv2D": if layer.kernel == "paddle.nn.Conv2D":
conv_id = layer_id conv_id = layer_id
for layer_id, layer in matches.items(): for layer_id, layer in matches.items():
if layer.kernel == "paddle.nn.functional.conv2d_transpose":
layer.bias = bias_name
if not is_transpose:
layer.outputs[0] = output_name
if layer.kernel == "paddle.nn.Conv2D": if layer.kernel == "paddle.nn.Conv2D":
layer.attrs["bias_attr"] = bias_name layer.attrs["bias_attr"] = bias_name
if not is_transpose: if not is_transpose:
......
...@@ -13,4 +13,11 @@ ...@@ -13,4 +13,11 @@
# limitations under the License. # limitations under the License.
from .bn_scale_fuser import Static_BNScaleFuser from .bn_scale_fuser import Static_BNScaleFuser
from .bn_scale_fuse_pass import Static_BNScaleFusePass from .bn_scale_fuse_pass import Static_BNScaleFusePass
\ No newline at end of file from .conv2d_add_fuser import StaticConv2DAddFuser
from .conv2d_add_fuse_pass import StaticConv2DAddFusePass
from .prelu_fuser import StaticPReLUFuser
from .prelu_fuse_pass import StaticPReLUFusePass
from .tf_batchnorm_fuser import StaticTFBatchNormFuser
from .tf_batchnorm_fuse_pass import StaticTFBatchNormFusePass
...@@ -79,7 +79,6 @@ class Static_BNScaleFuser(FuseBase): ...@@ -79,7 +79,6 @@ class Static_BNScaleFuser(FuseBase):
graph.layers[new_layer_id] = new_layer graph.layers[new_layer_id] = new_layer
matches.pop(new_layer_id) matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches): def gen_new_layer(self, parameters, matches):
layers_id = list(matches.keys()) layers_id = list(matches.keys())
layer = matches[layers_id[0]] layer = matches[layers_id[0]]
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.static import StaticConv2DAddFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class StaticConv2DAddFusePass(Pass):
name = "static_conv2d_add_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = StaticConv2DAddFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
static_conv2d_add_fuse_pass = StaticConv2DAddFusePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class StaticConv2DAddFuser(FuseBase):
def __init__(self):
super(StaticConv2DAddFuser, self).__init__(graph_type="static")
self.patterns = list()
def build_pattern(self):
""" 描述需要替换的conv2d+add图结构。
conv2d+add层模式python实现代码示例:
模式一:
MobilenetV1_Logits_Conv2d_1c_1x1_biases = paddle.static.create_parameter(dtype='float32', shape=[1001], name='MobilenetV1_Logits_Conv2d_1c_1x1_biases', default_initializer=paddle.nn.initializer.Constant(value=0.0))
conv2d_transpose_14 = paddle.transpose(x=MobilenetV1_Logits_AvgPool_1a_AvgPool, perm=[0, 3, 1, 2])
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.nn.functional.conv2d(x=conv2d_transpose_14, weight=MobilenetV1_Logits_Conv2d_1c_1x1_weights, bias=None, stride=[1, 1], dilation=[1, 1], padding='SAME')
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.transpose(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, perm=[0, 2, 3, 1])
MobilenetV1_Logits_Conv2d_1c_1x1_BiasAdd = paddle.add(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, y=MobilenetV1_Logits_Conv2d_1c_1x1_biases)
模式二:
MobilenetV1_Logits_Conv2d_1c_1x1_biases = paddle.static.create_parameter(dtype='float32', shape=[1001], name='MobilenetV1_Logits_Conv2d_1c_1x1_biases', default_initializer=paddle.nn.initializer.Constant(value=0.0))
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.nn.functional.conv2d(x=conv2d_transpose_14, weight=MobilenetV1_Logits_Conv2d_1c_1x1_weights, bias=None, stride=[1, 1], dilation=[1, 1], padding='SAME')
MobilenetV1_Logits_Conv2d_1c_1x1_BiasAdd = paddle.add(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, y=MobilenetV1_Logits_Conv2d_1c_1x1_biases)
"""
def gen_name(id):
return "x" + str(id)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
kernel="paddle.transpose",
inputs={"x": "conv-input-0"},
outputs=[gen_name(1)],
perm=[0, 3, 1, 2])
pattern.add_layer(
kernel="paddle.nn.functional.conv2d",
inputs={"input": gen_name(1),
"weight": "conv-input-1"},
outputs=[gen_name(2)])
pattern.add_layer(
kernel="paddle.transpose",
inputs={"x": gen_name(2)},
outputs=[gen_name(2)],
perm=[0, 2, 3, 1])
pattern.add_layer(
kernel="paddle.add",
inputs={"x": gen_name(2),
"y": gen_name(0)},
outputs=[gen_name(3)])
pattern.build(inputs={"input-0": "conv-input-0",
"input-1": "conv-input-1"})
self.patterns.append(pattern)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
kernel="paddle.nn.functional.conv2d",
inputs={"input": "conv-input-0",
"weight": "conv-input-1"},
outputs=[gen_name(1)])
pattern.add_layer(
kernel="paddle.add",
inputs={"x": gen_name(1),
"y": gen_name(0)},
outputs=[gen_name(2)])
pattern.build(inputs={"input-0": "conv-input-0",
"input-1": "conv-input-1"})
self.patterns.append(pattern)
def insert_new_layer(self, graph, parameters, matches):
self.gen_new_layer(matches, graph)
matches_copy = copy.deepcopy(matches)
for layer_id, layer in matches_copy.items():
if layer.kernel not in ["paddle.add"]:
matches.pop(layer_id)
def gen_new_layer(self, matches, graph):
is_transpose = False
for layer_id, layer in matches.items():
if layer.kernel == "paddle.static.create_parameter":
bias_name = layer.attrs["name"][1: -1]
if layer.kernel == "paddle.transpose":
is_transpose = True
if layer.kernel == "paddle.add":
output_name = layer.outputs[0]
if layer.kernel == "paddle.nn.functional.conv2d":
conv_id = layer_id
for layer_id, layer in matches.items():
if layer.kernel == "paddle.nn.functional.conv2d":
layer.inputs["bias"] = bias_name
layer.attrs.pop("bias")
if not is_transpose:
layer.outputs[0] = output_name
if layer.kernel == "paddle.transpose":
if conv_id in graph.edges_in[layer_id]:
layer.outputs[0] = output_name
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.static import StaticPReLUFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class StaticPReLUFusePass(Pass):
name = "static_prelu_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = StaticPReLUFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
static_prelu_fuse_pass = StaticPReLUFusePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from collections import OrderedDict
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class StaticPReLUFuser(FuseBase):
def __init__(self):
super(StaticPReLUFuser, self).__init__(graph_type="static")
def build_pattern(self):
""" 描述需要替换的prelu图结构。
prelu层模式python实现代码示例:
conv4_alphas = paddle.static.create_parameter(dtype='float32', shape=[128], name='conv4_alphas', default_initializer=paddle.nn.initializer.Constant(value=0.0))
conv4_mul_1_y = paddle.full(dtype='float32', shape=[1], fill_value=0.5)
conv4_Relu = paddle.nn.functional.relu(x=conv4_BiasAdd)
conv4_Abs = paddle.abs(x=conv4_BiasAdd)
conv4_sub = fluid.layers.elementwise_sub(x=conv4_BiasAdd, y=conv4_Abs)
conv4_mul = paddle.multiply(x=conv4_alphas, y=conv4_sub)
conv4_mul_1 = paddle.multiply(x=conv4_mul, y=conv4_mul_1_y)
conv4_add = paddle.add(x=conv4_Relu, y=conv4_mul_1)
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
self.pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(1)],
shape=[1],
fill_value=0.5)
self.pattern.add_layer(
"paddle.nn.functional.relu",
inputs={"x": "prelu-input-0"},
outputs=[gen_name(2)])
self.pattern.add_layer(
"paddle.abs",
inputs={"x": "prelu-input-0"},
outputs=[gen_name(3)])
self.pattern.add_layer(
"fluid.layers.elementwise_sub",
inputs={"x": "prelu-input-0",
"y": gen_name(3)},
outputs=[gen_name(4)])
self.pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(0),
"y": gen_name(4)},
outputs=[gen_name(5)])
self.pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(5),
"y": gen_name(1)},
outputs=[gen_name(6)])
self.pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(2),
"y": gen_name(6)},
outputs=[gen_name(7)])
self.pattern.build(inputs={"input-0": "prelu-input-0", })
def insert_new_layer(self, graph, parameters, matches):
new_layers, last_layer_id = self.gen_new_layer(matches, parameters, graph)
matches_copy = copy.deepcopy(matches)
for layer_id, layer in matches_copy.items():
for i in range(4):
if layer_id == new_layers[i].id:
matches.pop(new_layers[i].id)
prefix_layers = OrderedDict()
mid_layers = OrderedDict()
suffix_layers = OrderedDict()
is_need_id = False
for layer_id, layer in graph.layers.items():
if is_need_id:
suffix_layers[layer_id] = layer
else:
if layer_id == last_layer_id:
for i in range(4):
mid_layers[new_layers[i].id] = new_layers[i]
is_need_id = True
prefix_layers[layer_id] = layer
prefix_layers.update(mid_layers)
prefix_layers.update(suffix_layers)
graph.layers = prefix_layers
def gen_new_layer(self, matches, parameters, graph):
layer_id_list = list(matches.keys())
layer_id_list.sort(key = int)
for layer_id, layer in matches.items():
if layer.kernel == "paddle.nn.functional.relu":
input_name = layer.inputs["x"]
if layer.kernel == "paddle.static.create_parameter":
param_layer = layer
param_name = layer.outputs[0]
if layer.kernel == "paddle.add":
output_name = layer.outputs[0]
transpose0 = PaddleLayer(
id=layer_id_list[-1] + "_1",
kernel="paddle.transpose",
inputs={"x": input_name},
outputs=["{}_transpose_for_prelu".format(input_name)],
perm=[0, 3, 1, 2])
param = parameters[param_name]
c = param.shape[0]
prelu = PaddleLayer(id=layer_id_list[-1] + "_2",
kernel="paddle.nn.functional.prelu",
inputs={"x": "{}_transpose_for_prelu".format(input_name),
"weight": param_name},
outputs=["{}_prelu".format(input_name)])
transpose1 = PaddleLayer(
id=layer_id_list[-1] + "_3",
kernel="paddle.transpose",
inputs={"x": "{}_prelu".format(input_name)},
outputs=[output_name],
perm=[0, 2, 3, 1])
return [param_layer, transpose0, prelu, transpose1], layer_id_list[-1]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.static import StaticTFBatchNormFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class StaticTFBatchNormFusePass(Pass):
name = "static_tf_batchnorm_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = StaticTFBatchNormFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
static_tf_batchnorm_fuse_pass = StaticTFBatchNormFusePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from collections import OrderedDict
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class StaticTFBatchNormFuser(FuseBase):
def __init__(self):
super(StaticTFBatchNormFuser, self).__init__(graph_type="static")
self.patterns = list()
def build_pattern(self):
""" 描述需要替换的batchnorm图结构。
batchnorm层模式python实现代码示例:
"""
def gen_name(id):
return "x" + str(id)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(1)],
shape=[1])
pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(0), "y": gen_name(1)},
outputs=[gen_name(2)])
pattern.add_layer(
"paddle.rsqrt",
inputs={"x": gen_name(2)},
outputs=[gen_name(3)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(4)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(3), "y": gen_name(4)},
outputs=[gen_name(5)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(6)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(6), "y": gen_name(5)},
outputs=[gen_name(7)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(8)])
pattern.add_layer(
"fluid.layers.elementwise_sub",
inputs={"x": gen_name(8), "y": gen_name(7)},
outputs=[gen_name(9)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": "bn-input-0", "y": gen_name(5)},
outputs=[gen_name(10)])
pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(10), "y": gen_name(9)},
outputs=[gen_name(11)])
pattern.build(inputs={"input-0": "bn-input-0", })
self.patterns.append(pattern)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(1)],
shape=[1])
pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(0), "y": gen_name(1)},
outputs=[gen_name(2)])
pattern.add_layer(
"paddle.rsqrt",
inputs={"x": gen_name(2)},
outputs=[gen_name(3)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(4)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(3), "y": gen_name(4)},
outputs=[gen_name(5)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": "bn-input-0", "y": gen_name(5)},
outputs=[gen_name(10)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(6)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(6), "y": gen_name(5)},
outputs=[gen_name(7)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(8)])
pattern.add_layer(
"fluid.layers.elementwise_sub",
inputs={"x": gen_name(8), "y": gen_name(7)},
outputs=[gen_name(9)])
pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(10), "y": gen_name(9)},
outputs=[gen_name(11)])
pattern.build(inputs={"input-0": "bn-input-0", })
self.patterns.append(pattern)
def insert_new_layer(self, graph, parameters, matches):
new_layers, last_layer_id = self.gen_new_layer(matches, parameters, graph)
matches_copy = copy.deepcopy(matches)
for layer_id, layer in matches_copy.items():
for i in range(7):
if layer_id == new_layers[i].id:
matches.pop(new_layers[i].id)
prefix_layers = OrderedDict()
mid_layers = OrderedDict()
suffix_layers = OrderedDict()
is_need_id = False
for layer_id, layer in graph.layers.items():
if is_need_id:
suffix_layers[layer_id] = layer
else:
if layer_id == last_layer_id:
for i in range(7):
mid_layers[new_layers[i].id] = new_layers[i]
is_need_id = True
prefix_layers[layer_id] = layer
prefix_layers.update(mid_layers)
prefix_layers.update(suffix_layers)
graph.layers = prefix_layers
def gen_new_layer(self, matches, parameters, graph):
layer_id_list = list(matches.keys())
layer_id_list.sort(key = int)
for layer_id, layer in matches.items():
if layer.kernel == "paddle.full":
full_layer = layer
out_layer_id = graph.edges_out[layer_id][0]
if matches[out_layer_id].kernel == "paddle.add":
var_layer_id = graph.edges_in[out_layer_id][0]
var_layer = matches[var_layer_id]
if layer.kernel == "paddle.rsqrt":
out_layer_id = graph.edges_out[layer_id][0]
if matches[out_layer_id].kernel == "paddle.multiply":
gamma_layer_id = graph.edges_in[out_layer_id][1]
gamma_layer = matches[gamma_layer_id]
if layer.kernel == "fluid.layers.elementwise_sub":
in_layer_id = graph.edges_in[layer_id][0]
beta_layer = matches[in_layer_id]
in_layer_id = graph.edges_in[layer_id][1]
in_layer_id = graph.edges_in[in_layer_id][0]
mean_layer = matches[in_layer_id]
out_layer_id = graph.edges_out[layer_id][0]
add_layer = matches[out_layer_id]
if layer.kernel == "paddle.multiply":
in_layer_id = graph.edges_in[layer_id][1]
mul_layer = matches[in_layer_id]
if mul_layer.kernel == "paddle.multiply":
in_layer_id = graph.edges_in[layer_id][0]
if in_layer_id not in matches:
input_name = layer.inputs["x"]
transpose0 = PaddleLayer(
id=layer_id_list[-1] + "_1",
kernel="paddle.transpose",
inputs={"x": input_name},
outputs=["{}_transpose_for_bn".format(input_name)],
perm=[0, 3, 1, 2])
params = parameters[gamma_layer.outputs[0]]
c = params.shape[0]
bn = PaddleLayer(
id=layer_id_list[-1] + "_2",
kernel="paddle.nn.functional.batch_norm",
inputs={"x": "{}_transpose_for_bn".format(input_name),
"running_mean": mean_layer.outputs[0],
"running_var": var_layer.outputs[0],
"weight": gamma_layer.outputs[0],
"bias": beta_layer.outputs[0]},
outputs=["{}_bn".format(input_name)],
epsilon=full_layer.attrs["fill_value"])
transpose1 = PaddleLayer(
id=layer_id_list[-1] + "_3",
kernel="paddle.transpose",
inputs={"x": "{}_bn".format(input_name)},
outputs=add_layer.outputs,
perm=[0, 2, 3, 1])
mean_layer.id = layer_id_list[-1] + "_01"
var_layer.id = layer_id_list[-1] + "_02"
gamma_layer.id = layer_id_list[-1] + "_03"
beta_layer.id = layer_id_list[-1] + "_04"
return [mean_layer, var_layer, gamma_layer, beta_layer,
transpose0, bn, transpose1], layer_id_list[-1]
...@@ -16,13 +16,13 @@ from x2paddle.optimizer.pass_manager import PassManager ...@@ -16,13 +16,13 @@ from x2paddle.optimizer.pass_manager import PassManager
from x2paddle.optimizer.fusion.dygraph import * from x2paddle.optimizer.fusion.dygraph import *
from x2paddle.optimizer.fusion.static import * from x2paddle.optimizer.fusion.static import *
from x2paddle.optimizer.elimination.dygraph import * from x2paddle.optimizer.elimination.dygraph import *
from x2paddle.optimizer.elimination.static import *
class GraphOptimizer(object): class GraphOptimizer(object):
def __init__(self, source_frame, paddle_type="dygraph", jit_type="trace"): def __init__(self, source_frame, paddle_type="dygraph", jit_type="trace"):
if source_frame == "pytorch": if source_frame == "pytorch":
if jit_type == "trace": if jit_type == "trace":
self.passes = ["dygraph_constant_fuse_pass", self.passes = ["trace_fc_fuse_pass"]
"trace_fc_fuse_pass"]
else: else:
self.passes = [ self.passes = [
"dygraph_constant_fuse_pass", "dygraph_constant_fuse_pass",
...@@ -39,12 +39,20 @@ class GraphOptimizer(object): ...@@ -39,12 +39,20 @@ class GraphOptimizer(object):
else: else:
self.passes = ["static_bn_scale_fuse_pass"] self.passes = ["static_bn_scale_fuse_pass"]
elif source_frame == "tf": elif source_frame == "tf":
self.passes = [ if paddle_type == "dygraph":
"dygraph_conv2d_add_fuse_pass", self.passes = [
"dygraph_tf_batchnorm_fuse_pass", "dygraph_conv2d_add_fuse_pass",
"dygraph_prelu_fuse_pass", "dygraph_tf_batchnorm_fuse_pass",
"transpose_eliminate_pass" "dygraph_prelu_fuse_pass",
] "transpose_eliminate_pass"
]
else:
self.passes = [
"static_conv2d_add_fuse_pass",
"static_tf_batchnorm_fuse_pass",
"static_prelu_fuse_pass",
"static_transpose_eliminate_pass"
]
else: else:
self.passes = [] self.passes = []
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册