未验证 提交 20a14d29 编写于 作者: M mamingjie-China 提交者: GitHub

Merge pull request #4 from PaddlePaddle/develop

更新数据
......@@ -19,9 +19,11 @@ onnx : onnx == 1.5.0 onnxruntime == 0.4.0
## 安装
### 安装方式一(推荐)
使用最新的代码版本,可使用如下方式进行安装
```
pip install git+https://github.com/PaddlePaddle/X2Paddle.git@develop
git clone https://github.com/PaddlePaddle/X2Paddle.git
cd X2Paddle
git checkout develop
python setup.py install
```
### 安装方式二
......@@ -29,15 +31,6 @@ pip install git+https://github.com/PaddlePaddle/X2Paddle.git@develop
```
pip install x2paddle
```
### 安装方式三
```
git clone https://github.com/PaddlePaddle/X2Paddle.git
cd X2Paddle
git checkout develop
python setup.py install
```
## 使用方法
### TensorFlow
```
......
......@@ -104,10 +104,14 @@ def tf2paddle(model_path,
# neccesary optimization
optimizer.delete_redundance_code()
# optimizer below is experimental
optimizer.optimize_elementwise_op()
optimizer.merge_activation()
optimizer.merge_bias()
optimizer.merge_batch_norm()
optimizer.merge_prelu()
optimizer.optimize_sub_graph()
# optimizer.merge_batch_norm()
# optimizer.merge_prelu()
else:
mapper = TFOpMapperNHWC(model)
optimizer = TFOptimizer(mapper)
......
......@@ -60,6 +60,15 @@ class TFGraphNode(GraphNode):
raise Exception("Dtype[{}] not in dtype_map".format(dtype))
return self.dtype_map[dtype]
@property
def raw_dtype(self):
keys = ['dtype', 'Tidx', 'T', 'DstT']
for k in keys:
dtype = self.layer.attr[k].type
if dtype > 0:
break
return dtype
@property
def value(self):
assert self.layer_type == "Const", "Only Const node has value."
......@@ -120,6 +129,7 @@ class TFGraph(Graph):
# tensorflow graph optimize
self._remove_isolated_node()
self._remove_identity_node()
self._remove_cast_node()
def get_node(self, node_name, copy=False):
items = node_name.strip().split(':')
......@@ -190,6 +200,27 @@ class TFGraph(Graph):
idx = self.output_nodes.index(node_name)
self.output_nodes[idx] = input_node.layer_name
def _remove_cast_node(self):
cast_node = list()
for node_name, node in self.node_map.items():
if node.layer_type == "Cast":
input = self.get_node(node.inputs[0])
if input.layer_type != "Placeholder" or len(input.outputs) != 1:
continue
cast_node.append(node_name)
for node_name in cast_node:
node = self.get_node(node_name)
input_node = self.get_node(node.inputs[0])
input_node.layer.attr["dtype"].type = node.raw_dtype
self.remove_node(node_name)
self.identity_map[node_name] = input_node.layer_name
if node_name in self.output_nodes:
idx = self.output_nodes.index(node_name)
self.output_nodes[idx] = input_node.layer_name
def data_format_propagation(self, node):
current_node = self.node_map[node.layer_name]
current_node = node.tf_data_format
......
......@@ -170,7 +170,28 @@ class TFOpMapper(OpMapper):
x_shape = y.out_shapes[0]
y_shape = x.out_shapes[0]
else:
raise Exception("Unexpected situation happend")
if len(x_shape) == 1 and len(y_shape) == 4 and x_shape[
0] == y_shape[-1] and y_shape.count(-1) < 1:
shape = [1, x_shape[0], 1, 1]
attr = {"shape": shape}
node.fluid_code.add_layer("reshape",
inputs=x_input,
output="reshape_x",
param_attr=attr)
if y_shape[0] != 1:
attr = {"expand_times": [y_shape[0], 1, 1, 1]}
node.fluid_code.add_layer("expand",
inputs="reshape_x",
output="reshape_x",
param_attr=attr)
inputs = {"x": "reshape_x", "y": y_input}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=None)
return
else:
raise Exception("Unexpected situation happend")
if len(x_shape) == 4 and len(y_shape) == 1:
if x_input.tf_data_format == "NHWC":
......
......@@ -16,10 +16,20 @@
from x2paddle.op_mapper.tf_op_mapper import TFOpMapper
from x2paddle.core.fluid_code import Layer
from x2paddle.core.util import *
import six
import numpy
import copy as cp
def exist_act(node):
for layer in node.fluid_code.layers:
if layer.param_attr is not None:
act = layer.param_attr.get("act", None)
if act is not None:
return True
return False
class TFOptimizer(object):
activation_ops = {
'Relu': 'relu',
......@@ -95,6 +105,59 @@ class TFOptimizer(object):
del out_node.inputs[index]
del self.graph.node_map[node_name]
def optimize_elementwise_op(self):
elementwise_ops = [
'Sub', 'Add', 'RealDiv', 'Maximum', 'Mul', 'FloorDiv',
'GreaterEqual'
]
revertable_ops = ['Add', 'Mul']
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
if node is None:
continue
if node.layer_type in elementwise_ops:
if len(node.fluid_code.layers) != 2:
continue
if node.fluid_code.layers[0].op != "expand":
continue
expand_out = node.fluid_code.layers[0].output
expand_in = node.fluid_code.layers[0].inputs
expand_times = node.fluid_code.layers[0].param_attr[
"expand_times"]
x = node.fluid_code.layers[1].inputs["x"]
y = node.fluid_code.layers[1].inputs["y"]
if isinstance(
x,
six.string_types) and node.layer_type in revertable_ops:
node.fluid_code.layers[1].inputs["y"] = x
node.fluid_code.layers[1].inputs["x"] = y
x = node.fluid_code.layers[1].inputs["x"]
y = expand_in
elif isinstance(y, six.string_types):
y = expand_in
else:
continue
x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0]
if len(x_shape) != len(y_shape):
continue
if len(x_shape) == 4:
x_shape = [x_shape[i] for i in [0, 3, 1, 2]]
y_shape = [y_shape[i] for i in [0, 3, 1, 2]]
continue_flag = True
for i in range(len(x_shape)):
if y_shape[-1 * (i + 1)] == 1 and continue_flag:
expand_times[-1 * (i + 1)] = 1
else:
continue_flag = False
if expand_times.count(1) == len(expand_times):
node.fluid_code.layers[1].inputs["y"] = expand_in
del node.fluid_code.layers[0]
def merge_activation(self):
act_nodes = list()
for node_name in self.graph.topo_sort:
......@@ -353,6 +416,12 @@ class TFOptimizer(object):
node.fluid_code.layers[-2].output = name
del node.fluid_code.layers[-1]
def optimize_sub_graph(self):
self.merge_batch_norm()
self.merge_prelu()
self.merge_scale()
self.merge_affine_channel()
def merge_batch_norm(self):
for i, name in enumerate(self.graph.topo_sort):
node = self.graph.get_node(name)
......@@ -368,6 +437,10 @@ class TFOptimizer(object):
is_batch_norm = False
continue
if exist_act(in_nodes0[0]) or exist_act(in_nodes0[1]):
is_batch_norm = False
continue
in_nodes1 = [
self.graph.get_node(in_name)
for in_name in in_nodes0[0].inputs
......@@ -382,11 +455,17 @@ class TFOptimizer(object):
if in_nodes1[1].layer_type != "Mul":
is_batch_norm = False
continue
if exist_act(in_nodes1[1]):
is_batch_norm = False
continue
if in_nodes2[0].layer_type != "Const" or in_nodes2[
1].layer_type != "Mul":
is_batch_norm = False
continue
if exist_act(in_nodes2[1]):
is_batch_norm = False
continue
in_nodes3 = [
self.graph.get_node(in_name)
......@@ -410,6 +489,9 @@ class TFOptimizer(object):
if in_nodes5.layer_type != "Add":
is_batch_norm = False
continue
if exist_act(in_nodes5):
is_batch_norm = False
continue
in_nodes6 = [
self.graph.get_node(in_name) for in_name in in_nodes5.inputs
......@@ -485,10 +567,9 @@ class TFOptimizer(object):
if is_batch_norm:
index = in_nodes1[0].outputs.index(in_nodes0[0].layer_name)
del in_nodes1[0].outputs[index]
in_nodes1[0].outputs[index] = node.layer_name
node.layer_type = "FusedBatchNorm"
node.inputs = [in_nodes1[0].layer_name]
node.outputs = node.outputs
act = node.fluid_code.layers[-1].param_attr.get("act", None)
node.fluid_code.clear()
attr = {
......@@ -522,6 +603,9 @@ class TFOptimizer(object):
continue
is_prelu = True
if node.layer_type == "Add":
if exist_act(node):
is_prelu = False
continue
in_nodes0 = [
self.graph.get_node(in_name) for in_name in node.inputs
]
......@@ -529,6 +613,10 @@ class TFOptimizer(object):
1].layer_type != "Mul":
is_prelu = False
continue
if exist_act(in_nodes0[1]):
is_prelu = False
continue
if len(in_nodes0[0].outputs) != 1 or len(
in_nodes0[1].outputs) != 1:
is_prelu = False
......@@ -546,6 +634,9 @@ class TFOptimizer(object):
if in_nodes2[0].layer_type != "Mul":
is_prelu = False
continue
if exist_act(in_nodes2[0]):
is_prelu = False
continue
if len(in_nodes2[1].outputs) != 1 or len(
in_nodes2[0].outputs) != 1:
is_prelu = False
......@@ -559,6 +650,9 @@ class TFOptimizer(object):
1].layer_type != "Sub":
is_prelu = False
continue
if exist_act(in_nodes3[1]):
is_prelu = False
continue
if len(in_nodes3[0].outputs) != 1 or len(
in_nodes3[1].outputs) != 1:
is_prelu = False
......@@ -638,10 +732,10 @@ class TFOptimizer(object):
del in_nodes1.outputs[index]
index = in_nodes1.outputs.index(in_nodes4[1].layer_name)
del in_nodes1.outputs[index]
in_nodes1.outputs.append(node.layer_name)
node.layer_type = "Prelu"
node.inputs = [in_nodes1.layer_name]
node.outputs = node.outputs
act = node.fluid_code.layers[-1].param_attr.get("act", None)
node.fluid_code.clear()
attr = {
......@@ -660,3 +754,181 @@ class TFOptimizer(object):
del self.graph.node_map[in_nodes2[1].layer_name]
del self.graph.node_map[in_nodes3[1].layer_name]
del self.graph.node_map[in_nodes4[1].layer_name]
def merge_scale(self):
for i, name in enumerate(self.graph.topo_sort):
node = self.graph.get_node(name)
if node is None:
continue
is_scale = True
if node.layer_type == "Sub":
in_nodes0 = [
self.graph.get_node(in_name) for in_name in node.inputs
]
if in_nodes0[0].layer_type != "Mul" or in_nodes0[
1].layer_type != "Const" or in_nodes0[1].value.size != 1:
is_scale = False
continue
if exist_act(in_nodes0[0]):
is_scale = False
continue
if len(in_nodes0[0].outputs) != 1 or len(
in_nodes0[1].outputs) != 1:
is_scale = False
continue
in_nodes1 = [
self.graph.get_node(in_name)
for in_name in in_nodes0[0].inputs
]
if in_nodes1[0].layer_type != "Const" or in_nodes1[
1].layer_type != "RealDiv" or in_nodes1[
0].value.size != 1:
is_scale = False
continue
if exist_act(in_nodes1[1]):
is_scale = False
continue
if len(in_nodes1[0].outputs) != 1 or len(
in_nodes1[1].outputs) != 1:
is_scale = False
continue
in_nodes2 = [
self.graph.get_node(in_name)
for in_name in in_nodes1[1].inputs
]
if in_nodes2[1].layer_type != "Const" or in_nodes2[
1].value.size != 1:
is_scale = False
continue
if is_scale:
in_node = self.graph.get_node(in_nodes1[1].inputs[0])
index = in_node.outputs.index(in_nodes1[1].layer_name)
in_node.outputs[index] = node.layer_name
node.layer_type = "Scale"
node.inputs = [in_node.layer_name]
scale = 1.0 / in_nodes2[1].value * in_nodes1[0].value
act = None
if node.fluid_code.layers[0].param_attr is not None:
act = node.fluid_code.layers[0].param_attr.get(
"act", None)
node.fluid_code.clear()
attr = {
"scale": scale,
"bias": in_nodes0[1].value,
"bias_after_scale": True,
"act": act
}
node.fluid_code.add_layer("scale",
inputs=in_node,
output=node,
param_attr=attr)
del self.graph.node_map[in_nodes0[0].layer_name]
del self.graph.node_map[in_nodes0[1].layer_name]
del self.graph.node_map[in_nodes1[0].layer_name]
del self.graph.node_map[in_nodes1[1].layer_name]
del self.graph.node_map[in_nodes2[1].layer_name]
def merge_affine_channel(self):
for i, name in enumerate(self.graph.topo_sort):
node = self.graph.get_node(name)
if node is None:
continue
is_affine_channel = True
if node.layer_type == "RealDiv":
in_nodes0 = [
self.graph.get_node(in_name) for in_name in node.inputs
]
bias_add = True
if (in_nodes0[0].layer_type != "Sub" and in_nodes0[0].layer_type
!= "Add") or in_nodes0[1].layer_type != "Const" or len(
in_nodes0[1].value.shape) != 3:
is_affine_channel = False
continue
if in_nodes0[0].layer_type == "Sub":
bias_add = False
if exist_act(in_nodes0[0]):
is_affine_channel = False
continue
if len(in_nodes0[0].outputs) != 1 or len(
in_nodes0[1].outputs) != 1:
is_affine_channel = False
continue
in_nodes1 = [
self.graph.get_node(in_name)
for in_name in in_nodes0[0].inputs
]
if len(in_nodes1[0].out_shapes[0]
) != 4 or in_nodes1[1].layer_type != "Const" or len(
in_nodes1[1].value.shape) != 3:
is_affine_channel = False
continue
if len(in_nodes1[1].outputs) != 1:
is_affine_channel = False
continue
channel = in_nodes1[0].out_shapes[0][-1]
if channel < 0 or channel != in_nodes0[
1].value.size or channel != in_nodes1[1].value.size:
is_affine_channel = False
continue
if in_nodes0[1].out_shapes[0][-1] != in_nodes0[
1].value.size or in_nodes1[1].out_shapes[0][
-1] != in_nodes1[1].value.size:
is_affine_channel = False
continue
if is_affine_channel:
in_node = in_nodes1[0]
index = in_node.outputs.index(in_nodes0[0].layer_name)
in_node.outputs[index] = node.layer_name
node.layer_type = "AffineChannel"
node.inputs = [in_node.layer_name]
scale = 1.0 / in_nodes0[1].value.flatten()
bias = in_nodes1[1].value.flatten(
) / in_nodes0[1].value.flatten()
if not bias_add:
bias *= -1.0
self.op_mapper.weights[node.layer_name + "_scale"] = scale
self.op_mapper.weights[node.layer_name + "_bias"] = bias
act = None
if node.fluid_code.layers[0].param_attr is not None:
act = node.fluid_code.layers[0].param_attr.get(
"act", None)
node.fluid_code.clear()
attr = {
"dtype": string(scale.dtype),
"shape": [channel],
"name": string(node.layer_name + "_scale")
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node.layer_name + "_scale",
param_attr=attr)
attr = {
"dtype": string(scale.dtype),
"shape": [channel],
"name": string(node.layer_name + "_bias")
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node.layer_name + "_bias",
param_attr=attr)
inputs = {
"x": in_node,
"scale": node.layer_name + "_scale",
"bias": node.layer_name + "_bias"
}
attr = {"act": act}
node.fluid_code.add_layer("affine_channel",
inputs=inputs,
output=node,
param_attr=attr)
del self.graph.node_map[in_nodes0[0].layer_name]
del self.graph.node_map[in_nodes0[1].layer_name]
del self.graph.node_map[in_nodes1[1].layer_name]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册