提交 1f79d43d 编写于 作者: J jiangjiajun

vgg16 support

上级 f3378f3b
......@@ -31,7 +31,7 @@ import numpy
with tf.Session() as sess:
inputs = tf.placeholder(dtype=tf.float32,
shape=[None, 224, 224, 3],
shape=[None, None, None, 3],
name="inputs")
logits, endpoint = vgg.vgg_16(inputs, num_classes=1000, is_training=False)
load_model = slim.assign_from_checkpoint_fn(
......@@ -45,4 +45,4 @@ with tf.Session() as sess:
result = sess.run([output_tensor], {input_tensor: data})
numpy.save("tensorflow.npy", numpy.array(result))
freeze_model(sess, ["vgg_16/fc8/squeezed"], "vgg16.pb")
freeze_model(sess, ["vgg_16/fc8/squeezed"], "vgg16_None.pb")
......@@ -15,13 +15,12 @@ from x2paddle.parser.tf_parser import TFParser
from x2paddle.optimizer.tf_optimizer import TFGraphOptimizer
from x2paddle.emitter.tf_emitter import TFEmitter
parser = TFParser('/ssd2/Jason/github/X2Paddle/tool/vgg16.pb',
parser = TFParser('/ssd2/Jason/github/X2Paddle/tool/vgg16_None.pb',
in_nodes=['inputs'],
out_nodes=['output_boxes'],
in_shapes=[[-1, 416, 416, 3]])
optimizer = TFGraphOptimizer()
optimizer.run(parser.tf_graph)
#parser.tf_graph.print()
emitter = TFEmitter(parser)
......
......@@ -42,7 +42,8 @@ class Layer(object):
", ")
else:
in_list += (input.layer_name + ", ")
inlist = in_list.strip(", ") + "], "
in_list = in_list.strip(", ") + "], "
layer_code += in_list
elif isinstance(self.inputs, dict):
for key, input in self.inputs.items():
assert isinstance(
......
......@@ -12,14 +12,50 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.proto import framework_pb2
import struct
import math
import os
def string(param):
return "\'{}\'".format(param)
def get_same_padding(input_size, kernel_size, stride):
new_size = int(math.ceil(input_size * 1.0 / stride))
pad_size = (new_size - 1) * stride + filter_size - in_size
def get_same_padding(in_size, kernel_size, stride):
new_size = int(math.ceil(in_size * 1.0 / stride))
pad_size = (new_size - 1) * stride + kernel_size - in_size
pad0 = int(pad_size / 2)
pad1 = pad_size - pad0
return [pad0, pad1]
def export_paddle_param(param, param_name, dir):
dtype_map = {
"int16": [framework_pb2.VarType.INT16, 'h'],
"int32": [framework_pb2.VarType.INT32, 'i'],
"int64": [framework_pb2.VarType.INT64, 'q'],
"float16": [framework_pb2.VarType.FP16, 'e'],
"float32": [framework_pb2.VarType.FP32, 'f'],
"float64": [framework_pb2.VarType.FP64, 'd']
}
shape = param.shape
if len(shape) == 0:
assert param.size == 1, "Unexpected situation happend!"
shape = [1]
print("param dtype:", param.dtype)
assert str(param.dtype) in dtype_map, "Unknown dtype of params."
fp = open(os.path.join(dir, param_name), 'wb')
fp.write(struct.pack('i', 0))
fp.write(struct.pack('L', 0))
fp.write(struct.pack('i', 0))
tensor_desc = framework_pb2.VarType.TensorDesc()
tensor_desc.data_type = dtype_map[str(param.dtype)][0]
tensor_desc.dims.extend(shape)
desc_size = tensor_desc.ByteSize()
fp.write(struct.pack('i', desc_size))
fp.write(tensor_desc.SerializeToString())
param.tofile(fp)
fp.close()
......@@ -16,6 +16,7 @@ from x2paddle.parser.tf_parser import TFGraph
from x2paddle.core.emitter import Emitter
from x2paddle.core.fluid_code import FluidCode
from x2paddle.core.util import *
import numpy
class TFEmitter(Emitter):
......@@ -26,6 +27,7 @@ class TFEmitter(Emitter):
# attr_node is used to record nodes that
# only for define attribute of op
self.attr_node = list()
self.omit_nodes = list()
self.weights = dict()
def run(self):
......@@ -39,10 +41,17 @@ class TFEmitter(Emitter):
for i in range(len(self.graph.topo_sort)):
node_name = self.graph.topo_sort[i]
if node_name in self.omit_nodes:
continue
node = self.graph.get_node(node_name)
for layer in node.fluid_code.layers:
print(layer.get_code())
for name, param in self.weights.items():
node = self.graph.get_node(name)
export_paddle_param(param, node.layer_name.replace('/', '_'),
"params1")
def Placeholder(self, node):
shape = node.out_shapes[0]
dtype = node.dtype
......@@ -57,9 +66,6 @@ class TFEmitter(Emitter):
param_attr=attr)
def Const(self, node):
## TODO
return
shape = node.out_shapes[0]
dtype = node.dtype
value = node.value
......@@ -79,11 +85,13 @@ class TFEmitter(Emitter):
inputs=None,
output=node,
param_attr=attr)
self.weights[node.layer_name] = node.value
def Transpose(self, node):
return
input = self.graph.get_node(node.layer.input[0], copy=True)
perm = self.graph.get_node(node.layer.input[1], copy=True)
assert perm.layer_type == "Const", "Perm of transpose OP should be Const"
del self.weights[perm.layer_name]
perm.fluid_code.clear()
perm = perm.value.tolist()
......@@ -94,7 +102,6 @@ class TFEmitter(Emitter):
param_attr=attr)
def RealDiv(self, node):
return
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
inputs = {'x': x, 'y': y}
......@@ -104,7 +111,6 @@ class TFEmitter(Emitter):
param_attr=None)
def Relu(self, node):
return
input = self.graph.get_node(node.layer.input[0], copy=True)
node.fluid_code.add_layer("relu",
inputs=input,
......@@ -112,28 +118,24 @@ class TFEmitter(Emitter):
param_attr=None)
def Squeeze(self, node):
return
input = self.graph.get_node(node.layer.input[0], copy=True)
squeeze_dims = node.get_attr('squeeze_dims')
print(squeeze_dims)
attr = {'squeeze_dims': squeeze_dims}
attr = {'axes': squeeze_dims}
node.fluid_code.add_layer("squeeze",
inputs=input,
output=node,
param_attr=attr)
def BiasAdd(self, node):
return
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
inputs = {'x': x, 'y': y}
input = self.graph.get_node(node.layer.input[0], copy=True)
bias = self.graph.get_node(node.layer.input[1], copy=True)
inputs = {'x': input, 'y': bias}
node.fluid_code.add_layer("elementwise_add",
inputs=inputs,
output=node,
param_attr=None)
def Identity(self, node):
return
input = self.graph.get_node(node.layer.input[0], copy=True)
node.fluid_code.add_layer("assign",
inputs=input,
......@@ -141,99 +143,104 @@ class TFEmitter(Emitter):
param_attr=None)
def MaxPool(self, node):
return
input = self.graph.get_node(node.layer.input[0], copy=True)
in_shape = input.out_shapes[0]
k_size = node.get_attr("ksize")
strides = node.get_attr("strides")
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW"
if data_format == "NHWC":
if not channel_first:
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
k_size = [k_size[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[2], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[3], strides[3])
pad_h = get_same_padding(in_shape[2], k_size[0], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[1], strides[3])
pad_h = pad_h[0] + pad_h[1]
pad_w = pad_w[0] + pad_w[1]
attr = {"paddings": [0, pad_h, 0, pad_w], "pad_value": -10000.0}
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
if pad_h + pad_w != 0:
node.fluid_code.add_layer(
"pad2d",
inputs=input if channel_first else node,
output=node,
param_attr=attr)
attr = {
"pool_size": k_size[1:3],
"pool_type": string("max"),
"pool_stride": strides[1:3]
"pool_stride": strides[2:4]
}
node.fluid_code.add_layer("pool2d",
inputs=input,
inputs=input if channel_first else node,
output=node,
param_attr=attr)
if data_format == "NHWC":
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
def Conv2D(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
kernel = self.graph.get_node(node.layer.input[1], copy=True)
assert kernel.layer_type == "Const", "Kernel of Conv2D should be Const"
self.omit_nodes.append(kernel.layer_name)
in_shape = input.out_shapes[0]
k_size = kernel.out_shapes[0]
strides = node.get_attr("strides")
dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW"
if not channel_first:
self.weights[kernel.layer_name] = numpy.transpose(
kernel.value, (3, 2, 0, 1))
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[0], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[1], strides[3])
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
if pad_h[0] + pad_h[1] + pad_w[0] + pad_w[1] != 0:
node.fluid_code.add_layer(
"pad2d",
inputs=input if channel_first else node,
output=node,
param_attr=attr)
attr = {
"bias_attr": False,
"param_attr": string(kernel.layer_name),
"num_filters": k_size[3],
"filter_size": k_size[0:2],
"stride": strides[2:4],
"dilation": dilations[2:4]
}
node.fluid_code.add_layer("conv2d",
inputs=input if channel_first else node,
output=node,
param_attr=attr)
# def Conv2D(self, node):
# input = self.graph.get_node(node.layer.input[0], copy=True)
# in_shape = input.out_shapes[0]
# k_size = node.get_attr("ksize")
# strides = node.get_attr("strides")
# dilations = node.get_attr("dilations")
# data_format = node.get_attr("data_format").decode()
# pad_mode = node.get_attr("padding").decode()
#
# if data_format == "NHWC":
# attr = {"perm": [0, 3, 1, 2]}
# node.fluid_code.add_layer("transpose",
# inputs=input,
# output=node,
# param_attr=attr)
# in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
# k_size = [k_size[i] for i in [0, 3, 1, 2]]
# strides = [strides[i] for i in [0, 3, 1, 2]]
# dilations = [dilations[i] for i in [0, 3, 1, 2]]
#
# if pad_mode == "SAME":
# pad_h = get_same_padding(in_shape[2], k_size[2], strides[2])
# pad_w = get_same_padding(in_shape[3], k_size[3], strides[3])
# pad_h = pad_h[0] + pad_h[1]
# pad_w = pad_w[0] + pad_w[1]
# attr = {"paddings": pad_h+pad_w, "pad_value": 0.0}
# node.fluid_code.add_layer("pad2d",
# inputs=input,
# output=node,
# param_attr=attr)
# attr = {
# "pool_stride": strides[1:3],
# "bias_attr": False,
# "param_attr":,
# "num_filters":,
# "filter_size":,
# "stride":,
# "dilation":
# }
# node.fluid_code.add_layer("conv2d",
# inputs=input,
# output=node,
# param_attr=attr)
#
# if data_format == "NHWC":
# attr = {"perm": [0, 2, 3, 1]}
# node.fluid_code.add_layer("transpose",
# inputs=input,
# output=node,
# param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
......@@ -18,7 +18,7 @@ from x2paddle.parser.tf_parser import TFGraph
class TFGraphOptimizer(object):
def __init__(self):
self.identity_ops = ['Identity']
print("Doint Nothing")
def remove_isolated_node(self, graph):
# delete isolated nodes
......@@ -30,8 +30,33 @@ class TFGraphOptimizer(object):
graph.remove_node(node_name)
def remove_identity_node(self, graph):
identity_node = list()
for node_name, node in graph.node_map.items():
if node.layer_type == "Identity":
identity_node.append(node_name)
for node_name in identity_node:
node = graph.get_node(node_name)
# Remind: Only 1 input for Identity node
input_node = graph.get_node(node.inputs[0])
# remove identity node from graph
idx = input_node.outputs.index(node_name)
del input_node.outputs[idx]
output_names = node.outputs
for output_name in output_names:
output_node = graph.get_node(output_name)
idx = output_node.inputs.index(node_name)
output_node.inputs[idx] = input_node.layer_name
idx = graph.topo_sort.index(node_name)
del graph.topo_sort[idx]
def run(self, graph):
self.remove_isolated_node(graph)
self.remove_identity_node(graph)
# TODO identity node remove
......@@ -39,3 +64,7 @@ class TFGraphOptimizer(object):
# TODO subgraph optimize
# TODO compute optimize
# activation merge
# biasadd merge
......@@ -82,6 +82,7 @@ class TFGraphNode(GraphNode):
class TFGraph(Graph):
def __init__(self, model):
super(TFGraph, self).__init__(model)
self.identity_map = dict()
def build(self):
for layer in self.model.node:
......@@ -101,6 +102,52 @@ class TFGraph(Graph):
super(TFGraph, self).build()
# tensorflow graph optimize
self._remove_isolated_node()
self._remove_identity_node()
def get_node(self, node_name, copy=False):
items = node_name.strip().split(':')
if items[0] in self.identity_map:
items[0] = self.identity_map[items[0]]
new_node_name = ":".join(items)
return super(TFGraph, self).get_node(new_node_name, copy)
def _remove_isolated_node(self):
# delete isolated nodes
isolated_nodes = list()
for node_name in self.node_map.keys():
if len(self.get_node(node_name).inputs) == 0 or len(
self.get_node(node_name).outputs) == 0:
isolated_nodes.append(node_name)
self.remove_node(node_name)
def _remove_identity_node(self):
identity_node = list()
for node_name, node in self.node_map.items():
if node.layer_type == "Identity":
identity_node.append(node_name)
for node_name in identity_node:
node = self.get_node(node_name)
# Remind: Only 1 input for Identity node
input_node = self.get_node(node.inputs[0])
# remove identity node from graph
self.identity_map[node_name] = input_node.layer_name
idx = input_node.outputs.index(node_name)
del input_node.outputs[idx]
output_names = node.outputs
for output_name in output_names:
output_node = self.get_node(output_name)
idx = output_node.inputs.index(node_name)
output_node.inputs[idx] = input_node.layer_name
idx = self.topo_sort.index(node_name)
del self.topo_sort[idx]
class TFParser(object):
def __init__(self, pb_model, in_nodes=None, out_nodes=None, in_shapes=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册