diff --git a/README.md b/README.md index 4f739a89abc6a56359a28b803eb5833b92d8e139..c85db582014fc0b42678a65c21fb221c5f72ac78 100644 --- a/README.md +++ b/README.md @@ -14,64 +14,14 @@ pip install git+https://github.com/PaddlePaddle/X2Paddle.git@develop ``` ## How To Use +### TensorFlow ``` x2paddle --framework=tensorflow --model=tf_model.pb --save_dir=pd_model ``` - -## 转换tensorflow vgg_16模型 - -### 步骤一 下载模型参数文件 -``` -wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz -``` - -### 步骤二 导出vgg_16的pb模型 -使用如下python脚本转换 -``` -import tensorflow.contrib.slim as slim -from tensorflow.contrib.slim.nets import vgg -from tensorflow.python.framework import graph_util -import tensorflow as tf - -def freeze_model(sess, output_tensor_names, freeze_model_path): - out_graph = graph_util.convert_variables_to_constants( - sess, sess.graph.as_graph_def(), output_tensor_names) - with tf.gfile.GFile(freeze_model_path, 'wb') as f: - f.write(out_graph.SerializeToString()) - - print("freeze model saved in {}".format(freeze_model_path)) - -with tf.Session() as sess: - inputs = tf.placeholder(dtype=tf.float32, - shape=[None, 224, 224, 3], - name="inputs") - logits, endpoint = vgg.vgg_16(inputs, num_classes=1000, is_training=False) - load_model = slim.assign_from_checkpoint_fn( - "vgg_16.ckpt", slim.get_model_variables("vgg_16")) - load_model(sess) - - freeze_model(sess, ["vgg_16/fc8/squeezed"], "vgg16.pb") +### Caffe ``` - -### 步骤三 模型转换 - +x2paddle --framework=caffe --proto=deploy.proto --weight=deploy.caffemodel --save_dir=pd_model ``` -x2paddle --framework=tensorflow \ - --model=../vgg16.pb \ - --save_dir=paddle_model -``` -## 转换caffe SqueezeNet模型 -### 步骤一 下载模型参数文件和proto文件 -``` -wget https://github.com/DeepScale/SqueezeNet/blob/master/SqueezeNet_v1.1/squeezenet_v1.1.caffemodel -wget https://github.com/DeepScale/SqueezeNet/blob/master/SqueezeNet_v1.1/deploy.prototxt -``` - -### 步骤二 模型转换 - -``` -x2paddle --framework=caffe \ - --weight=../squeezenet_v1.1.caffemodel \ - --proto =../deploy.prototxt \ - --save_dir=paddle_model +## Related Docs +[1. 如何导出TensorFlow的pb模型](export_tf_model.md) diff --git a/export_tf_model.md b/export_tf_model.md new file mode 100644 index 0000000000000000000000000000000000000000..d2f98b7446802e636841308fece28f394388c6c1 --- /dev/null +++ b/export_tf_model.md @@ -0,0 +1,44 @@ +## 如何导出TensorFlow模型 + +本文档介绍如何将TensorFlow模型导出为X2Paddle支持的模型格式。 + +TensorFlow提供了接口可将网络参数和网络结构同时保存到同一个文件中,并且只保存指定的前向计算子图,下面示例展示了如何导出tensorflow/models下的VGG16模型 + +步骤一 下载模型参数文件 +``` +wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz +tar xzvf vgg_16_2016_08_28.tar.gz +``` + +步骤二 加载和导出模型 +``` +#coding: utf-8 +import tensorflow.contrib.slim as slim +from tensorflow.contrib.slim.nets import vgg +from tensorflow.python.framework import graph_util +import tensorflow as tf + +# 固化模型函数 +# output_tensor_names: list,指定模型的输出tensor的name +# freeze_model_path: 模型导出的文件路径 +def freeze_model(sess, output_tensor_names, freeze_model_path): + out_graph = graph_util.convert_variables_to_constants( + sess, sess.graph.as_graph_def(), output_tensor_names) + with tf.gfile.GFile(freeze_model_path, 'wb') as f: + f.write(out_graph.SerializeToString()) + + print("freeze model saved in {}".format(freeze_model_path)) + +# 加载模型参数 +sess = tf.Session() +inputs = tf.placeholder(dtype=tf.float32, + shape=[None, 224, 224, 3], + name="inputs") +logits, endpoint = vgg.vgg_16(inputs, num_classes=1000, is_training=False) +load_model = slim.assign_from_checkpoint_fn( + "vgg_16.ckpt", slim.get_model_variables("vgg_16")) +load_model(sess) + +# 导出模型 +freeze_model(sess, ["vgg_16/fc8/squeezed"], "vgg16.pb") +```