diff --git a/tool/tf_export_model.py b/tool/tf_export_model.py index a028662d70d6814266a52ccc6826e640c7671b7e..bd7edfe6c6a35bfc02fb9d86cfb3f18067f54ddf 100644 --- a/tool/tf_export_model.py +++ b/tool/tf_export_model.py @@ -11,3 +11,38 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from tensorflow.python.framework import graph_util +import tensorflow as tf + + +def freeze_model(sess, output_tensor_names, freeze_model_path): + out_graph = graph_util.convert_variables_to_constants( + sess, sess.graph.as_graph_def(), output_tensor_names) + with tf.gfile.GFile(freeze_model_path, 'wb') as f: + f.write(out_graph.SerializeToString()) + + print("freeze model saved in {}".format(freeze_model_path)) + + +import tensorflow.contrib.slim as slim +from tensorflow.contrib.slim.nets import vgg +import numpy + +with tf.Session() as sess: + inputs = tf.placeholder(dtype=tf.float32, + shape=[None, 224, 224, 3], + name="inputs") + logits, endpoint = vgg.vgg_16(inputs, num_classes=1000, is_training=False) + load_model = slim.assign_from_checkpoint_fn( + "vgg_16.ckpt", slim.get_model_variables("vgg_16")) + load_model(sess) + + numpy.random.seed(13) + data = numpy.random.rand(5, 224, 224, 3) + input_tensor = sess.graph.get_tensor_by_name("inputs:0") + output_tensor = sess.graph.get_tensor_by_name("vgg_16/fc8/squeezed:0") + result = sess.run([output_tensor], {input_tensor: data}) + numpy.save("tensorflow.npy", numpy.array(result)) + + freeze_model(sess, ["vgg_16/fc8/squeezed"], "vgg16.pb") diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 03874a7e8c9cfde87dc4b73f9be5bf05e1ae85c9..7e02ae022713c119c446546247811e9f25dd91f4 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -15,7 +15,7 @@ from x2paddle.parser.tf_parser import TFParser from x2paddle.optimizer.tf_optimizer import TFGraphOptimizer from x2paddle.emitter.tf_emitter import TFEmitter -parser = TFParser('/ssd3/dltpsz/frozen_darknet_yolov3_model.pb', +parser = TFParser('/ssd2/Jason/github/X2Paddle/tool/vgg16.pb', in_nodes=['inputs'], out_nodes=['output_boxes'], in_shapes=[[-1, 416, 416, 3]]) diff --git a/x2paddle/core/fluid_code.py b/x2paddle/core/fluid_code.py index 65b4a1823de5fbd70c19345a31260e5b122491b8..484f6de0d89f8aabaa016d878e448576f3aa1c9b 100644 --- a/x2paddle/core/fluid_code.py +++ b/x2paddle/core/fluid_code.py @@ -59,6 +59,8 @@ class Layer(object): "[{}]".format(self.inputs.index) + ", ") else: layer_code += (self.inputs.layer_name + ", ") + elif isinstance(self.inputs, str): + layer_code += (self.inputs + ", ") else: raise Exception("Unknown type of inputs.") diff --git a/x2paddle/core/util.py b/x2paddle/core/util.py new file mode 100644 index 0000000000000000000000000000000000000000..1cf590e81182b787502363ac67048e567092e93e --- /dev/null +++ b/x2paddle/core/util.py @@ -0,0 +1,25 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def string(param): + return "\'{}\'".format(param) + + +def get_same_padding(input_size, kernel_size, stride): + new_size = int(math.ceil(input_size * 1.0 / stride)) + pad_size = (new_size - 1) * stride + filter_size - in_size + pad0 = int(pad_size / 2) + pad1 = pad_size - pad0 + return [pad0, pad1] diff --git a/x2paddle/emitter/tf_emitter.py b/x2paddle/emitter/tf_emitter.py index 74077af2bc83c958f0be620a894eccecd7eca963..f058319f781e803ffecb16d99cf0eb77c3d648cf 100644 --- a/x2paddle/emitter/tf_emitter.py +++ b/x2paddle/emitter/tf_emitter.py @@ -23,6 +23,9 @@ class TFEmitter(Emitter): super(TFEmitter, self).__init__() self.parser = parser self.graph = parser.tf_graph + # attr_node is used to record nodes that + # only for define attribute of op + self.attr_node = list() self.weights = dict() def run(self): @@ -54,6 +57,9 @@ class TFEmitter(Emitter): param_attr=attr) def Const(self, node): + ## TODO + return + shape = node.out_shapes[0] dtype = node.dtype value = node.value @@ -75,8 +81,9 @@ class TFEmitter(Emitter): param_attr=attr) def Transpose(self, node): - input = self.graph.get_node(node.layer.input[0]) - perm = self.graph.get_node(node.layer.input[1]) + return + input = self.graph.get_node(node.layer.input[0], copy=True) + perm = self.graph.get_node(node.layer.input[1], copy=True) perm.fluid_code.clear() perm = perm.value.tolist() @@ -87,13 +94,146 @@ class TFEmitter(Emitter): param_attr=attr) def RealDiv(self, node): - x = self.graph.get_node(node.layer.input[0]) - y = self.graph.get_node(node.layer.input[1]) + return + x = self.graph.get_node(node.layer.input[0], copy=True) + y = self.graph.get_node(node.layer.input[1], copy=True) inputs = {'x': x, 'y': y} node.fluid_code.add_layer("elementwise_div", inputs=inputs, output=node, param_attr=None) - def Fc(self, node): - self.weight['asdf'] = np.tranpose(node.kerneln[1, 0]) + def Relu(self, node): + return + input = self.graph.get_node(node.layer.input[0], copy=True) + node.fluid_code.add_layer("relu", + inputs=input, + output=node, + param_attr=None) + + def Squeeze(self, node): + return + input = self.graph.get_node(node.layer.input[0], copy=True) + squeeze_dims = node.get_attr('squeeze_dims') + print(squeeze_dims) + attr = {'squeeze_dims': squeeze_dims} + node.fluid_code.add_layer("squeeze", + inputs=input, + output=node, + param_attr=attr) + + def BiasAdd(self, node): + return + x = self.graph.get_node(node.layer.input[0], copy=True) + y = self.graph.get_node(node.layer.input[1], copy=True) + inputs = {'x': x, 'y': y} + node.fluid_code.add_layer("elementwise_add", + inputs=inputs, + output=node, + param_attr=None) + + def Identity(self, node): + return + input = self.graph.get_node(node.layer.input[0], copy=True) + node.fluid_code.add_layer("assign", + inputs=input, + output=node, + param_attr=None) + + def MaxPool(self, node): + return + input = self.graph.get_node(node.layer.input[0], copy=True) + in_shape = input.out_shapes[0] + k_size = node.get_attr("ksize") + strides = node.get_attr("strides") + data_format = node.get_attr("data_format").decode() + pad_mode = node.get_attr("padding").decode() + + if data_format == "NHWC": + attr = {"perm": [0, 3, 1, 2]} + node.fluid_code.add_layer("transpose", + inputs=input, + output=node, + param_attr=attr) + in_shape = [in_shape[i] for i in [0, 3, 1, 2]] + k_size = [k_size[i] for i in [0, 3, 1, 2]] + strides = [strides[i] for i in [0, 3, 1, 2]] + + if pad_mode == "SAME": + pad_h = get_same_padding(in_shape[2], k_size[2], strides[2]) + pad_w = get_same_padding(in_shape[3], k_size[3], strides[3]) + pad_h = pad_h[0] + pad_h[1] + pad_w = pad_w[0] + pad_w[1] + attr = {"paddings": [0, pad_h, 0, pad_w], "pad_value": -10000.0} + node.fluid_code.add_layer("pad2d", + inputs=input, + output=node, + param_attr=attr) + attr = { + "pool_size": k_size[1:3], + "pool_type": string("max"), + "pool_stride": strides[1:3] + } + node.fluid_code.add_layer("pool2d", + inputs=input, + output=node, + param_attr=attr) + + if data_format == "NHWC": + attr = {"perm": [0, 2, 3, 1]} + node.fluid_code.add_layer("transpose", + inputs=input, + output=node, + param_attr=attr) + + +# def Conv2D(self, node): +# input = self.graph.get_node(node.layer.input[0], copy=True) +# in_shape = input.out_shapes[0] +# k_size = node.get_attr("ksize") +# strides = node.get_attr("strides") +# dilations = node.get_attr("dilations") +# data_format = node.get_attr("data_format").decode() +# pad_mode = node.get_attr("padding").decode() +# +# if data_format == "NHWC": +# attr = {"perm": [0, 3, 1, 2]} +# node.fluid_code.add_layer("transpose", +# inputs=input, +# output=node, +# param_attr=attr) +# in_shape = [in_shape[i] for i in [0, 3, 1, 2]] +# k_size = [k_size[i] for i in [0, 3, 1, 2]] +# strides = [strides[i] for i in [0, 3, 1, 2]] +# dilations = [dilations[i] for i in [0, 3, 1, 2]] +# +# if pad_mode == "SAME": +# pad_h = get_same_padding(in_shape[2], k_size[2], strides[2]) +# pad_w = get_same_padding(in_shape[3], k_size[3], strides[3]) +# pad_h = pad_h[0] + pad_h[1] +# pad_w = pad_w[0] + pad_w[1] +# attr = {"paddings": pad_h+pad_w, "pad_value": 0.0} +# node.fluid_code.add_layer("pad2d", +# inputs=input, +# output=node, +# param_attr=attr) +# attr = { +# "pool_stride": strides[1:3], +# "bias_attr": False, +# "param_attr":, +# "num_filters":, +# "filter_size":, +# "stride":, +# "dilation": +# } +# node.fluid_code.add_layer("conv2d", +# inputs=input, +# output=node, +# param_attr=attr) +# +# if data_format == "NHWC": +# attr = {"perm": [0, 2, 3, 1]} +# node.fluid_code.add_layer("transpose", +# inputs=input, +# output=node, +# param_attr=attr) diff --git a/x2paddle/parser/tf_parser.py b/x2paddle/parser/tf_parser.py index 791af375238dd6a70db6636706d799e15ba56c3c..732541a65f29845fd95b91cae265b08cbbb48af0 100644 --- a/x2paddle/parser/tf_parser.py +++ b/x2paddle/parser/tf_parser.py @@ -16,6 +16,7 @@ from x2paddle.core.graph import GraphNode, Graph from x2paddle.core.fluid_code import FluidCode from tensorflow.python.framework import tensor_util from tensorflow.python.platform import gfile +from tensorflow.core.framework import attr_value_pb2 import tensorflow as tf import copy @@ -56,6 +57,27 @@ class TFGraphNode(GraphNode): field = getattr(attr, attr.WhichOneof('value')) return tensor_util.MakeNdarray(field) + def get_attr(self, name): + if name not in self.layer.attr: + return None + attr = self.layer.attr[name] + field = attr.WhichOneof('value') + value = getattr(attr, field) if field else None + + if isinstance(value, attr_value_pb2.AttrValue.ListValue): + result = list(value.ListFields()[0][1]) + for i in range(len(result)): + if isinstance(result[i], int): + result[i] = int(result[i]) + try: + if isinstance(result[i], long): + result[i] = int(result[i]) + except: + pass + return result + else: + return value + class TFGraph(Graph): def __init__(self, model):