提交 319792d0 编写于 作者: J jiangjiajun

modify add demos

上级 bbd4e495
import sys
import math
val1 = map(float, open(sys.argv[1]).read().strip().split('\n'))
val2 = map(float, open(sys.argv[2]).read().strip().split('\n'))
if len(val1) != len(val2):
raise Exception("Not Same Length")
max_diff = 0
avg_diff = 0
for i in range(len(val1)):
diff = math.fabs(val1[i] - val2[i])
if diff > max_diff:
max_diff = diff
avg_diff += diff
avg_diff /= len(val1)
print("max_diff: {}\tavg_diff: {}".format(max_diff, avg_diff))
# coding:utf-8
import sys
sys.path.append("..")
from paddle_resnet_v1_101.mymodel import KitModel
import paddle.fluid as fluid
import numpy
use_cuda = True
def model_initialize():
# 构建模型结构,并初始化参数
result = KitModel()
if use_cuda:
exe = fluid.Executor(fluid.CUDAPlace(0))
else:
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
# 根据save_var.list列表,加载模型参数
var_list = list()
global_block = fluid.default_main_program().global_block()
with open('../paddle_resnet_v1_101/save_var.list') as f:
for line in f:
try:
# 过滤部分不需要加载的参数(OP配置参数)
var = global_block.var(line.strip())
var_list.append(var)
except:
pass
fluid.io.load_vars(exe, '../paddle_resnet_v1_101', vars=var_list)
prog = fluid.default_main_program()
return exe, prog, result
def test_case(exe, prog, result):
# 测试随机数据输入
numpy.random.seed(13)
img_data = numpy.random.rand(1000, 224, 224, 3)
# tf中输入为NHWC,PaddlePaddle则为NCHW,需transpose
img_data = numpy.transpose(img_data, (0, 3, 1, 2))
# input_0为输入数据的张量名,张量名和数据类型须与my_model.py中定义一致
for i in range(0, 50):
r, = exe.run(
fluid.default_main_program(),
feed={
'input_0':
numpy.array(img_data[i * 20:i * 20 + 20], dtype='float32')
},
fetch_list=[result])
r = r.flatten()
files = open('fluid_resnet_v1_101.result', 'a+')
for i in range(0, r.shape[0]):
files.write(str(r[i]) + '\n')
files.close()
# 调用save_inference_model可将模型结构(当前以代码形式保存)和参数均序列化保存
# 保存后的模型可使用load_inference_model加载
# http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/api_guides/low_level/inference.html#api-guide-inference
# fluid.io.save_inference_model("./paddle_model", ["input_0"], [result], exe)
if __name__ == "__main__":
exe, prog, result = model_initialize()
test_case(exe, prog, result)
# coding:utf-8
import sys
sys.path.append("..")
from paddle_resnet_v1_50.mymodel import KitModel
import paddle.fluid as fluid
import numpy
use_cuda = True
def model_initialize():
# 构建模型结构,并初始化参数
result = KitModel()
if use_cuda:
exe = fluid.Executor(fluid.CUDAPlace(0))
else:
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
# 根据save_var.list列表,加载模型参数
var_list = list()
global_block = fluid.default_main_program().global_block()
with open('../paddle_resnet_v1_50/save_var.list') as f:
for line in f:
try:
# 过滤部分不需要加载的参数(OP配置参数)
var = global_block.var(line.strip())
var_list.append(var)
except:
pass
fluid.io.load_vars(exe, '../paddle_resnet_v1_50', vars=var_list)
prog = fluid.default_main_program()
return exe, prog, result
def test_case(exe, prog, result):
# 测试随机数据输入
numpy.random.seed(13)
img_data = numpy.random.rand(1000, 224, 224, 3)
# tf中输入为NHWC,PaddlePaddle则为NCHW,需transpose
img_data = numpy.transpose(img_data, (0, 3, 1, 2))
# input_0为输入数据的张量名,张量名和数据类型须与my_model.py中定义一致
for i in range(0, 50):
r, = exe.run(
fluid.default_main_program(),
feed={
'input_0':
numpy.array(img_data[i * 20:i * 20 + 20], dtype='float32')
},
fetch_list=[result])
r = r.flatten()
files = open('fluid_resnet_v1_50.result', 'a+')
for i in range(0, r.shape[0]):
files.write(str(r[i]) + '\n')
files.close()
# 调用save_inference_model可将模型结构(当前以代码形式保存)和参数均序列化保存
# 保存后的模型可使用load_inference_model加载
# http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/api_guides/low_level/inference.html#api-guide-inference
# fluid.io.save_inference_model("./paddle_model", ["input_0"], [result], exe)
if __name__ == "__main__":
exe, prog, result = model_initialize()
test_case(exe, prog, result)
rm -rf fluid_vgg_19.result
python vgg_19_infer.py
echo "paddle fluid vgg_19 model"
python diff.py fluid_vgg_19.result tf_vgg_19.result
rm -rf fluid_vgg_16.result
python vgg_16_infer.py
echo "paddle fluid vgg_16 model"
python diff.py fluid_vgg_16.result tf_vgg_16.result
rm -rf fluid_resnet_v1_50.result
python resnet_v1_50_infer.py
echo "paddle fluid resnet_v1_50 model"
python diff.py fluid_resnet_v1_50.result tf_resnet_v1_50.result
rm -rf fluid_resnet_v1_101.result
python resnet_v1_101_infer.py
echo "paddle fluid resnet_v1_101 model"
python diff.py fluid_resnet_v1_101.result tf_resnet_v1_101.result
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
# coding:utf-8
import sys
sys.path.append("..")
from paddle_vgg_16.mymodel import KitModel
import paddle.fluid as fluid
import numpy
use_cuda = True
def model_initialize():
# 构建模型结构,并初始化参数
result = KitModel()
if use_cuda:
exe = fluid.Executor(fluid.CUDAPlace(0))
else:
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
# 根据save_var.list列表,加载模型参数
var_list = list()
global_block = fluid.default_main_program().global_block()
with open('../paddle_vgg_16/save_var.list') as f:
for line in f:
try:
# 过滤部分不需要加载的参数(OP配置参数)
var = global_block.var(line.strip())
var_list.append(var)
except:
pass
fluid.io.load_vars(exe, '../paddle_vgg_16', vars=var_list)
prog = fluid.default_main_program()
return exe, prog, result
def test_case(exe, prog, result):
# 测试随机数据输入
numpy.random.seed(13)
img_data = numpy.random.rand(1000, 224, 224, 3)
# tf中输入为NHWC,PaddlePaddle则为NCHW,需transpose
img_data = numpy.transpose(img_data, (0, 3, 1, 2))
# input_0为输入数据的张量名,张量名和数据类型须与my_model.py中定义一致
for i in range(0, 50):
r, = exe.run(
fluid.default_main_program(),
feed={
'input_0':
numpy.array(img_data[i * 20:i * 20 + 20], dtype='float32')
},
fetch_list=[result])
r = r.flatten()
files = open('fluid_vgg_16.result', 'a+')
for i in range(0, r.shape[0]):
files.write(str(r[i]) + '\n')
files.close()
# 调用save_inference_model可将模型结构(当前以代码形式保存)和参数均序列化保存
# 保存后的模型可使用load_inference_model加载
# http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/api_guides/low_level/inference.html#api-guide-inference
# fluid.io.save_inference_model("./paddle_model", ["input_0"], [result], exe)
if __name__ == "__main__":
exe, prog, result = model_initialize()
test_case(exe, prog, result)
# coding:utf-8
import sys
sys.path.append("..")
from paddle_vgg_19.mymodel import KitModel
import paddle.fluid as fluid
import numpy
use_cuda = True
def model_initialize():
# 构建模型结构,并初始化参数
result = KitModel()
if use_cuda:
exe = fluid.Executor(fluid.CUDAPlace(0))
else:
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
# 根据save_var.list列表,加载模型参数
var_list = list()
global_block = fluid.default_main_program().global_block()
with open('../paddle_vgg_19/save_var.list') as f:
for line in f:
try:
# 过滤部分不需要加载的参数(OP配置参数)
var = global_block.var(line.strip())
var_list.append(var)
except:
pass
fluid.io.load_vars(exe, '../paddle_vgg_19', vars=var_list)
prog = fluid.default_main_program()
return exe, prog, result
def test_case(exe, prog, result):
# 测试随机数据输入
numpy.random.seed(13)
img_data = numpy.random.rand(1000, 224, 224, 3)
# tf中输入为NHWC,PaddlePaddle则为NCHW,需transpose
img_data = numpy.transpose(img_data, (0, 3, 1, 2))
# input_0为输入数据的张量名,张量名和数据类型须与my_model.py中定义一致
for i in range(0, 50):
r, = exe.run(
fluid.default_main_program(),
feed={
'input_0':
numpy.array(img_data[i * 20:i * 20 + 20], dtype='float32')
},
fetch_list=[result])
r = r.flatten()
files = open('fluid_vgg_19.result', 'a+')
for i in range(0, r.shape[0]):
files.write(str(r[i]) + '\n')
files.close()
# 调用save_inference_model可将模型结构(当前以代码形式保存)和参数均序列化保存
# 保存后的模型可使用load_inference_model加载
# http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/api_guides/low_level/inference.html#api-guide-inference
# fluid.io.save_inference_model("./paddle_model", ["input_0"], [result], exe)
if __name__ == "__main__":
exe, prog, result = model_initialize()
test_case(exe, prog, result)
from tensorflow.contrib.slim.nets import inception from tensorflow.contrib.slim.nets import inception
from tensorflow.contrib.slim.nets import vgg as vgg from tensorflow.contrib.slim.nets import vgg as vgg
from tensorflow.contrib.slim.nets import resnet_v1 as resnet_v1 from tensorflow.contrib.slim.nets import resnet_v1 as resnet_v1
from tensorflow.contrib.framework.python.ops import arg_scope
import tensorflow.contrib.slim as slim import tensorflow.contrib.slim as slim
import tensorflow as tf import tensorflow as tf
import numpy
from six import text_type as _text_type from six import text_type as _text_type
def inception_v3(ckpt_file): def inception_v3(ckpt_file):
def get_tuned_variables(): def get_tuned_variables():
CHECKPOINT_EXCLUDE_SCOPES = 'InceptionV3/Logits,InceptionV3/AuxLogits' CHECKPOINT_EXCLUDE_SCOPES = 'InceptionV3/Logits,InceptionV3/AuxLogits'
exclusions = [scope.strip() for scope in CHECKPOINT_EXCLUDE_SCOPES.split(',')] exclusions = [
scope.strip() for scope in CHECKPOINT_EXCLUDE_SCOPES.split(',')
]
variables_to_restore = [] variables_to_restore = []
for var in slim.get_model_variables(): for var in slim.get_model_variables():
excluded = False excluded = False
for exclusion in exclusions: for exclusion in exclusions:
...@@ -21,78 +21,105 @@ def inception_v3(ckpt_file): ...@@ -21,78 +21,105 @@ def inception_v3(ckpt_file):
break break
if not excluded: if not excluded:
variables_to_restore.append(var) variables_to_restore.append(var)
return variables_to_restore return variables_to_restore
img_size = inception.inception_v3.default_image_size img_size = inception.inception_v3.default_image_size
img = tf.placeholder(tf.float32, shape=[None, img_size, img_size, 3], name='inputs') img = tf.placeholder(
tf.float32, shape=[None, img_size, img_size, 3], name='inputs')
with slim.arg_scope(inception.inception_v3_arg_scope()): with slim.arg_scope(inception.inception_v3_arg_scope()):
logits, _ = inception.inception_v3(img, num_classes=1000, is_training=False) logits, _ = inception.inception_v3(
img, num_classes=1000, is_training=False)
sess = tf.Session() sess = tf.Session()
init = tf.global_variables_initializer() init = tf.global_variables_initializer()
sess.run(init) sess.run(init)
load_model = tf.contrib.slim.assign_from_checkpoint_fn(ckpt_file, get_tuned_variables(), ignore_missing_vars=True) load_model = tf.contrib.slim.assign_from_checkpoint_fn(
ckpt_file, get_tuned_variables(), ignore_missing_vars=True)
load_model(sess) load_model(sess)
return sess return sess
def resnet_v1_50(ckpt_file): def resnet_v1_50(ckpt_file):
img_size = resnet_v1.resnet_v1.default_image_size img_size = resnet_v1.resnet_v1.default_image_size
img = tf.placeholder(tf.float32, shape=[None, img_size, img_size, 3], name='inputs') img = tf.placeholder(
tf.float32, shape=[None, img_size, img_size, 3], name='inputs')
with slim.arg_scope(resnet_v1.resnet_arg_scope()): with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, endpoint = resnet_v1.resnet_v1_50(img, num_classes=1000, is_training=False) net, endpoint = resnet_v1.resnet_v1_50(
img, num_classes=1000, is_training=False)
sess = tf.Session() sess = tf.Session()
load_model = tf.contrib.slim.assign_from_checkpoint_fn(ckpt_file, tf.contrib.slim.get_model_variables("resnet_v1_50")) load_model = tf.contrib.slim.assign_from_checkpoint_fn(
ckpt_file, tf.contrib.slim.get_model_variables("resnet_v1_50"))
load_model(sess) load_model(sess)
return sess return sess
def resnet_v1_101(ckpt_file): def resnet_v1_101(ckpt_file):
img_size = resnet_v1.resnet_v1.default_image_size img_size = resnet_v1.resnet_v1.default_image_size
img = tf.placeholder(tf.float32, shape=[None, img_size, img_size, 3], name='inputs') img = tf.placeholder(
tf.float32, shape=[None, img_size, img_size, 3], name='inputs')
with slim.arg_scope(resnet_v1.resnet_arg_scope()): with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, endpoint = resnet_v1.resnet_v1_101(img, num_classes=1000, is_training=False) net, endpoint = resnet_v1.resnet_v1_101(
img, num_classes=1000, is_training=False)
sess = tf.Session() sess = tf.Session()
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
load_model = tf.contrib.slim.assign_from_checkpoint_fn(ckpt_file, tf.contrib.slim.get_model_variables("resnet_v1_101")) load_model = tf.contrib.slim.assign_from_checkpoint_fn(
ckpt_file, tf.contrib.slim.get_model_variables("resnet_v1_101"))
load_model(sess) load_model(sess)
return sess return sess
def vgg_16(ckpt_file): def vgg_16(ckpt_file):
img_size = vgg.vgg_16.default_image_size img_size = vgg.vgg_16.default_image_size
inputs = tf.placeholder(tf.float32, shape=[None, img_size, img_size, 3], inputs = tf.placeholder(
name="inputs") tf.float32, shape=[None, img_size, img_size, 3], name="inputs")
logits, endpoint = vgg.vgg_16(inputs, num_classes=1000, is_training=False) logits, endpoint = vgg.vgg_16(inputs, num_classes=1000, is_training=False)
sess = tf.Session() sess = tf.Session()
load_model = tf.contrib.slim.assign_from_checkpoint_fn(ckpt_file, load_model = tf.contrib.slim.assign_from_checkpoint_fn(
tf.contrib.slim.get_model_variables("vgg_16")) ckpt_file, tf.contrib.slim.get_model_variables("vgg_16"))
load_model(sess) load_model(sess)
return sess return sess
def vgg_19(ckpt_file): def vgg_19(ckpt_file):
img_size = vgg.vgg_19.default_image_size img_size = vgg.vgg_19.default_image_size
inputs = tf.placeholder(tf.float32, shape=[None, img_size, img_size, 3], inputs = tf.placeholder(
name="inputs") tf.float32, shape=[None, img_size, img_size, 3], name="inputs")
logits, endpoint = vgg.vgg_19(inputs, num_classes=1000, is_training=False) logits, endpoint = vgg.vgg_19(inputs, num_classes=1000, is_training=False)
sess = tf.Session() sess = tf.Session()
load_model = tf.contrib.slim.assign_from_checkpoint_fn(ckpt_file, load_model = tf.contrib.slim.assign_from_checkpoint_fn(
tf.contrib.slim.get_model_variables("vgg_19")) ckpt_file, tf.contrib.slim.get_model_variables("vgg_19"))
load_model(sess) load_model(sess)
return sess return sess
def save_checkpoint(sess, save_dir): def save_checkpoint(sess, save_dir):
saver = tf.train.Saver() saver = tf.train.Saver()
saver.save(sess, save_dir+"/model") saver.save(sess, save_dir + "/model")
def get_parser(): def get_parser():
import argparse import argparse
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", type=_text_type, default=None, help="inception_v3/resnet_v1_50/resnet_v1_101/vgg_16/vgg_19") parser.add_argument(
parser.add_argument("--ckpt_file", "-c", type=_text_type, default=None, help="parameters ckpt file") "--model",
parser.add_argument("--save_dir", "-s", type=_text_type, default=None, help="model path") "-m",
type=_text_type,
default=None,
help="inception_v3/resnet_v1_50/resnet_v1_101/vgg_16/vgg_19")
parser.add_argument(
"--ckpt_file",
"-c",
type=_text_type,
default=None,
help="parameters ckpt file")
parser.add_argument(
"--save_dir", "-s", type=_text_type, default=None, help="model path")
return parser return parser
if __name__ == "__main__": if __name__ == "__main__":
parser = get_parser() parser = get_parser()
args = parser.parse_args() args = parser.parse_args()
...@@ -110,5 +137,7 @@ if __name__ == "__main__": ...@@ -110,5 +137,7 @@ if __name__ == "__main__":
elif args.model == "vgg_19": elif args.model == "vgg_19":
sess = vgg_19(args.ckpt_file) sess = vgg_19(args.ckpt_file)
else: else:
raise Exception("Only support inception_v3/resnet_v1_50/resnet_v1_101/vgg_16/vgg_19") raise Exception(
"Only support inception_v3/resnet_v1_50/resnet_v1_101/vgg_16/vgg_19"
)
save_checkpoint(sess, args.save_dir) save_checkpoint(sess, args.save_dir)
export CUDA_VISIBLE_DEVICES=-1
wget http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz
tar xzvf resnet_v1_101_2016_08_28.tar.gz
python export_to_checkpoint.py --model resnet_v1_101 --ckpt_file resnet_v1_101.ckpt --save_dir resnet_v1_101_checkpoint
rm resnet_v1_101_2016_08_28.tar.gz resnet_v1_101.ckpt
tf2fluid --meta_file resnet_v1_101_checkpoint/model.meta \
--ckpt_dir resnet_v1_101_checkpoint \
--in_nodes inputs \
--input_shape None,224,224,3 \
--output_nodes resnet_v1_101/predictions/Softmax \
--save_dir paddle_resnet_v1_101
export CUDA_VISIBLE_DEVICES=-1
wget http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz
tar xzvf resnet_v1_50_2016_08_28.tar.gz
python export_to_checkpoint.py --model resnet_v1_50 --ckpt_file resnet_v1_50.ckpt --save_dir resnet_v1_50_checkpoint
rm resnet_v1_50_2016_08_28.tar.gz resnet_v1_50.ckpt
tf2fluid --meta_file resnet_v1_50_checkpoint/model.meta \
--ckpt_dir resnet_v1_50_checkpoint \
--in_nodes inputs \
--input_shape None,224,224,3 \
--output_nodes resnet_v1_50/predictions/Softmax \
--save_dir paddle_resnet_v1_50
export CUDA_VISIBLE_DEVICES=-1
wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz
tar xzvf vgg_16_2016_08_28.tar.gz
python export_to_checkpoint.py --model vgg_16 --ckpt_file vgg_16.ckpt --save_dir vgg_16_checkpoint
rm vgg_16_2016_08_28.tar.gz vgg_16.ckpt
tf2fluid --meta_file vgg_16_checkpoint/model.meta \
--ckpt_dir vgg_16_checkpoint \
--in_nodes inputs \
--input_shape None,224,224,3 \
--output_nodes vgg_16/fc8/squeezed \
--save_dir paddle_vgg_16
export CUDA_VISIBLE_DEVICES=-1
wget http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz
tar xzvf vgg_19_2016_08_28.tar.gz
python export_to_checkpoint.py --model vgg_19 --ckpt_file vgg_19.ckpt --save_dir vgg_19_checkpoint
rm vgg_19_2016_08_28.tar.gz vgg_19.ckpt
tf2fluid --meta_file vgg_19_checkpoint/model.meta \
--ckpt_dir vgg_19_checkpoint \
--in_nodes inputs \
--input_shape None,224,224,3 \
--output_nodes vgg_19/fc8/squeezed \
--save_dir paddle_vgg_19
...@@ -413,11 +413,7 @@ class PaddleEmitter(object): ...@@ -413,11 +413,7 @@ class PaddleEmitter(object):
data1 = node.inputs[0] data1 = node.inputs[0]
data2 = node.inputs[1] data2 = node.inputs[1]
axis = self.get_axis(data1, data2) axis = self.get_axis(data1, data2)
code = list() code = "{} = layers.elementwise_add({}, {}, axis={})".format(node.output_name, data1.ref_name, data2.ref_name, axis)
code.append("# {}, {}, {}".format(node.layer_name, data1.layer_name,
data2.layer_name))
code.append("{} = layers.elementwise_add({}, {}, axis={})".format(
node.output_name, data1.ref_name, data2.ref_name, axis))
return code return code
def emit_mean(self, node): def emit_mean(self, node):
......
...@@ -41,6 +41,7 @@ class TensorflowCkptParser(object): ...@@ -41,6 +41,7 @@ class TensorflowCkptParser(object):
graph_def, ver = tensorflow.get_default_graph()._as_graph_def( graph_def, ver = tensorflow.get_default_graph()._as_graph_def(
add_shapes=True) add_shapes=True)
# self.sess = sess
if in_nodes is not None and input_shape is not None: if in_nodes is not None and input_shape is not None:
graph_def = strip_unused_lib.strip_unused( graph_def = strip_unused_lib.strip_unused(
...@@ -62,6 +63,11 @@ class TensorflowPbParser(object): ...@@ -62,6 +63,11 @@ class TensorflowPbParser(object):
tensorflow.reset_default_graph() tensorflow.reset_default_graph()
original_graph_def = tensorflow.GraphDef() original_graph_def = tensorflow.GraphDef()
original_graph_def.ParseFromString(serialized) original_graph_def.ParseFromString(serialized)
# tensorflow.import_graph_def(origin_graph_def, name="")
# self.sess = tensorflow.Session(graph=tf.get_default_graph())
# self.sess.run(tensorflow.global_variables_initializer())
original_graph_def = strip_unused_lib.strip_unused( original_graph_def = strip_unused_lib.strip_unused(
input_graph_def=original_graph_def, input_graph_def=original_graph_def,
input_node_names=in_nodes, input_node_names=in_nodes,
...@@ -84,7 +90,6 @@ class TensorflowPbParser(object): ...@@ -84,7 +90,6 @@ class TensorflowPbParser(object):
if in_type_list[in_nodes[i]] == 1 or in_type_list[ if in_type_list[in_nodes[i]] == 1 or in_type_list[
in_nodes[i]] == 0: in_nodes[i]] == 0:
dtype = tensorflow.float32 dtype = tensorflow.float32
print(input_shape[i])
x = tensorflow.placeholder(dtype, shape=input_shape[i]) x = tensorflow.placeholder(dtype, shape=input_shape[i])
elif in_type_list[in_nodes[i]] == 3: elif in_type_list[in_nodes[i]] == 3:
dtype = tensorflow.int32 dtype = tensorflow.int32
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册