Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
319792d0
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 2 年 前同步成功
通知
329
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
319792d0
编写于
2月 12, 2019
作者:
J
jiangjiajun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify add demos
上级
bbd4e495
变更
17
展开全部
隐藏空白更改
内联
并排
Showing
17 changed file
with
4000419 addition
and
33 deletion
+4000419
-33
tensorflow2fluid/demo/diff_test/diff.py
tensorflow2fluid/demo/diff_test/diff.py
+17
-0
tensorflow2fluid/demo/diff_test/resnet_v1_101_infer.py
tensorflow2fluid/demo/diff_test/resnet_v1_101_infer.py
+67
-0
tensorflow2fluid/demo/diff_test/resnet_v1_50_infer.py
tensorflow2fluid/demo/diff_test/resnet_v1_50_infer.py
+67
-0
tensorflow2fluid/demo/diff_test/run.sh
tensorflow2fluid/demo/diff_test/run.sh
+19
-0
tensorflow2fluid/demo/diff_test/tf_resnet_v1_101.result
tensorflow2fluid/demo/diff_test/tf_resnet_v1_101.result
+1000000
-0
tensorflow2fluid/demo/diff_test/tf_resnet_v1_50.result
tensorflow2fluid/demo/diff_test/tf_resnet_v1_50.result
+1000000
-0
tensorflow2fluid/demo/diff_test/tf_vgg_16.result
tensorflow2fluid/demo/diff_test/tf_vgg_16.result
+1000000
-0
tensorflow2fluid/demo/diff_test/tf_vgg_19.result
tensorflow2fluid/demo/diff_test/tf_vgg_19.result
+1000000
-0
tensorflow2fluid/demo/diff_test/vgg_16_infer.py
tensorflow2fluid/demo/diff_test/vgg_16_infer.py
+67
-0
tensorflow2fluid/demo/diff_test/vgg_19_infer.py
tensorflow2fluid/demo/diff_test/vgg_19_infer.py
+67
-0
tensorflow2fluid/demo/export_to_checkpoint.py
tensorflow2fluid/demo/export_to_checkpoint.py
+56
-27
tensorflow2fluid/demo/resnet_v1_101.sh
tensorflow2fluid/demo/resnet_v1_101.sh
+13
-0
tensorflow2fluid/demo/resnet_v1_50.sh
tensorflow2fluid/demo/resnet_v1_50.sh
+13
-0
tensorflow2fluid/demo/vgg_16.sh
tensorflow2fluid/demo/vgg_16.sh
+13
-0
tensorflow2fluid/demo/vgg_19.sh
tensorflow2fluid/demo/vgg_19.sh
+13
-0
tensorflow2fluid/src/paddle_emitter.py
tensorflow2fluid/src/paddle_emitter.py
+1
-5
tensorflow2fluid/src/tensorflow_parser.py
tensorflow2fluid/src/tensorflow_parser.py
+6
-1
未找到文件。
tensorflow2fluid/demo/diff_test/diff.py
0 → 100644
浏览文件 @
319792d0
import
sys
import
math
val1
=
map
(
float
,
open
(
sys
.
argv
[
1
]).
read
().
strip
().
split
(
'
\n
'
))
val2
=
map
(
float
,
open
(
sys
.
argv
[
2
]).
read
().
strip
().
split
(
'
\n
'
))
if
len
(
val1
)
!=
len
(
val2
):
raise
Exception
(
"Not Same Length"
)
max_diff
=
0
avg_diff
=
0
for
i
in
range
(
len
(
val1
)):
diff
=
math
.
fabs
(
val1
[
i
]
-
val2
[
i
])
if
diff
>
max_diff
:
max_diff
=
diff
avg_diff
+=
diff
avg_diff
/=
len
(
val1
)
print
(
"max_diff: {}
\t
avg_diff: {}"
.
format
(
max_diff
,
avg_diff
))
tensorflow2fluid/demo/diff_test/resnet_v1_101_infer.py
0 → 100644
浏览文件 @
319792d0
# coding:utf-8
import
sys
sys
.
path
.
append
(
".."
)
from
paddle_resnet_v1_101.mymodel
import
KitModel
import
paddle.fluid
as
fluid
import
numpy
use_cuda
=
True
def
model_initialize
():
# 构建模型结构,并初始化参数
result
=
KitModel
()
if
use_cuda
:
exe
=
fluid
.
Executor
(
fluid
.
CUDAPlace
(
0
))
else
:
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
# 根据save_var.list列表,加载模型参数
var_list
=
list
()
global_block
=
fluid
.
default_main_program
().
global_block
()
with
open
(
'../paddle_resnet_v1_101/save_var.list'
)
as
f
:
for
line
in
f
:
try
:
# 过滤部分不需要加载的参数(OP配置参数)
var
=
global_block
.
var
(
line
.
strip
())
var_list
.
append
(
var
)
except
:
pass
fluid
.
io
.
load_vars
(
exe
,
'../paddle_resnet_v1_101'
,
vars
=
var_list
)
prog
=
fluid
.
default_main_program
()
return
exe
,
prog
,
result
def
test_case
(
exe
,
prog
,
result
):
# 测试随机数据输入
numpy
.
random
.
seed
(
13
)
img_data
=
numpy
.
random
.
rand
(
1000
,
224
,
224
,
3
)
# tf中输入为NHWC,PaddlePaddle则为NCHW,需transpose
img_data
=
numpy
.
transpose
(
img_data
,
(
0
,
3
,
1
,
2
))
# input_0为输入数据的张量名,张量名和数据类型须与my_model.py中定义一致
for
i
in
range
(
0
,
50
):
r
,
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
'input_0'
:
numpy
.
array
(
img_data
[
i
*
20
:
i
*
20
+
20
],
dtype
=
'float32'
)
},
fetch_list
=
[
result
])
r
=
r
.
flatten
()
files
=
open
(
'fluid_resnet_v1_101.result'
,
'a+'
)
for
i
in
range
(
0
,
r
.
shape
[
0
]):
files
.
write
(
str
(
r
[
i
])
+
'
\n
'
)
files
.
close
()
# 调用save_inference_model可将模型结构(当前以代码形式保存)和参数均序列化保存
# 保存后的模型可使用load_inference_model加载
# http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/api_guides/low_level/inference.html#api-guide-inference
# fluid.io.save_inference_model("./paddle_model", ["input_0"], [result], exe)
if
__name__
==
"__main__"
:
exe
,
prog
,
result
=
model_initialize
()
test_case
(
exe
,
prog
,
result
)
tensorflow2fluid/demo/diff_test/resnet_v1_50_infer.py
0 → 100644
浏览文件 @
319792d0
# coding:utf-8
import
sys
sys
.
path
.
append
(
".."
)
from
paddle_resnet_v1_50.mymodel
import
KitModel
import
paddle.fluid
as
fluid
import
numpy
use_cuda
=
True
def
model_initialize
():
# 构建模型结构,并初始化参数
result
=
KitModel
()
if
use_cuda
:
exe
=
fluid
.
Executor
(
fluid
.
CUDAPlace
(
0
))
else
:
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
# 根据save_var.list列表,加载模型参数
var_list
=
list
()
global_block
=
fluid
.
default_main_program
().
global_block
()
with
open
(
'../paddle_resnet_v1_50/save_var.list'
)
as
f
:
for
line
in
f
:
try
:
# 过滤部分不需要加载的参数(OP配置参数)
var
=
global_block
.
var
(
line
.
strip
())
var_list
.
append
(
var
)
except
:
pass
fluid
.
io
.
load_vars
(
exe
,
'../paddle_resnet_v1_50'
,
vars
=
var_list
)
prog
=
fluid
.
default_main_program
()
return
exe
,
prog
,
result
def
test_case
(
exe
,
prog
,
result
):
# 测试随机数据输入
numpy
.
random
.
seed
(
13
)
img_data
=
numpy
.
random
.
rand
(
1000
,
224
,
224
,
3
)
# tf中输入为NHWC,PaddlePaddle则为NCHW,需transpose
img_data
=
numpy
.
transpose
(
img_data
,
(
0
,
3
,
1
,
2
))
# input_0为输入数据的张量名,张量名和数据类型须与my_model.py中定义一致
for
i
in
range
(
0
,
50
):
r
,
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
'input_0'
:
numpy
.
array
(
img_data
[
i
*
20
:
i
*
20
+
20
],
dtype
=
'float32'
)
},
fetch_list
=
[
result
])
r
=
r
.
flatten
()
files
=
open
(
'fluid_resnet_v1_50.result'
,
'a+'
)
for
i
in
range
(
0
,
r
.
shape
[
0
]):
files
.
write
(
str
(
r
[
i
])
+
'
\n
'
)
files
.
close
()
# 调用save_inference_model可将模型结构(当前以代码形式保存)和参数均序列化保存
# 保存后的模型可使用load_inference_model加载
# http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/api_guides/low_level/inference.html#api-guide-inference
# fluid.io.save_inference_model("./paddle_model", ["input_0"], [result], exe)
if
__name__
==
"__main__"
:
exe
,
prog
,
result
=
model_initialize
()
test_case
(
exe
,
prog
,
result
)
tensorflow2fluid/demo/diff_test/run.sh
0 → 100644
浏览文件 @
319792d0
rm
-rf
fluid_vgg_19.result
python vgg_19_infer.py
echo
"paddle fluid vgg_19 model"
python diff.py fluid_vgg_19.result tf_vgg_19.result
rm
-rf
fluid_vgg_16.result
python vgg_16_infer.py
echo
"paddle fluid vgg_16 model"
python diff.py fluid_vgg_16.result tf_vgg_16.result
rm
-rf
fluid_resnet_v1_50.result
python resnet_v1_50_infer.py
echo
"paddle fluid resnet_v1_50 model"
python diff.py fluid_resnet_v1_50.result tf_resnet_v1_50.result
rm
-rf
fluid_resnet_v1_101.result
python resnet_v1_101_infer.py
echo
"paddle fluid resnet_v1_101 model"
python diff.py fluid_resnet_v1_101.result tf_resnet_v1_101.result
tensorflow2fluid/demo/diff_test/tf_resnet_v1_101.result
0 → 100644
浏览文件 @
319792d0
此差异已折叠。
点击以展开。
tensorflow2fluid/demo/diff_test/tf_resnet_v1_50.result
0 → 100644
浏览文件 @
319792d0
此差异已折叠。
点击以展开。
tensorflow2fluid/demo/diff_test/tf_vgg_16.result
0 → 100644
浏览文件 @
319792d0
此差异已折叠。
点击以展开。
tensorflow2fluid/demo/diff_test/tf_vgg_19.result
0 → 100644
浏览文件 @
319792d0
此差异已折叠。
点击以展开。
tensorflow2fluid/demo/diff_test/vgg_16_infer.py
0 → 100644
浏览文件 @
319792d0
# coding:utf-8
import
sys
sys
.
path
.
append
(
".."
)
from
paddle_vgg_16.mymodel
import
KitModel
import
paddle.fluid
as
fluid
import
numpy
use_cuda
=
True
def
model_initialize
():
# 构建模型结构,并初始化参数
result
=
KitModel
()
if
use_cuda
:
exe
=
fluid
.
Executor
(
fluid
.
CUDAPlace
(
0
))
else
:
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
# 根据save_var.list列表,加载模型参数
var_list
=
list
()
global_block
=
fluid
.
default_main_program
().
global_block
()
with
open
(
'../paddle_vgg_16/save_var.list'
)
as
f
:
for
line
in
f
:
try
:
# 过滤部分不需要加载的参数(OP配置参数)
var
=
global_block
.
var
(
line
.
strip
())
var_list
.
append
(
var
)
except
:
pass
fluid
.
io
.
load_vars
(
exe
,
'../paddle_vgg_16'
,
vars
=
var_list
)
prog
=
fluid
.
default_main_program
()
return
exe
,
prog
,
result
def
test_case
(
exe
,
prog
,
result
):
# 测试随机数据输入
numpy
.
random
.
seed
(
13
)
img_data
=
numpy
.
random
.
rand
(
1000
,
224
,
224
,
3
)
# tf中输入为NHWC,PaddlePaddle则为NCHW,需transpose
img_data
=
numpy
.
transpose
(
img_data
,
(
0
,
3
,
1
,
2
))
# input_0为输入数据的张量名,张量名和数据类型须与my_model.py中定义一致
for
i
in
range
(
0
,
50
):
r
,
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
'input_0'
:
numpy
.
array
(
img_data
[
i
*
20
:
i
*
20
+
20
],
dtype
=
'float32'
)
},
fetch_list
=
[
result
])
r
=
r
.
flatten
()
files
=
open
(
'fluid_vgg_16.result'
,
'a+'
)
for
i
in
range
(
0
,
r
.
shape
[
0
]):
files
.
write
(
str
(
r
[
i
])
+
'
\n
'
)
files
.
close
()
# 调用save_inference_model可将模型结构(当前以代码形式保存)和参数均序列化保存
# 保存后的模型可使用load_inference_model加载
# http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/api_guides/low_level/inference.html#api-guide-inference
# fluid.io.save_inference_model("./paddle_model", ["input_0"], [result], exe)
if
__name__
==
"__main__"
:
exe
,
prog
,
result
=
model_initialize
()
test_case
(
exe
,
prog
,
result
)
tensorflow2fluid/demo/diff_test/vgg_19_infer.py
0 → 100644
浏览文件 @
319792d0
# coding:utf-8
import
sys
sys
.
path
.
append
(
".."
)
from
paddle_vgg_19.mymodel
import
KitModel
import
paddle.fluid
as
fluid
import
numpy
use_cuda
=
True
def
model_initialize
():
# 构建模型结构,并初始化参数
result
=
KitModel
()
if
use_cuda
:
exe
=
fluid
.
Executor
(
fluid
.
CUDAPlace
(
0
))
else
:
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
# 根据save_var.list列表,加载模型参数
var_list
=
list
()
global_block
=
fluid
.
default_main_program
().
global_block
()
with
open
(
'../paddle_vgg_19/save_var.list'
)
as
f
:
for
line
in
f
:
try
:
# 过滤部分不需要加载的参数(OP配置参数)
var
=
global_block
.
var
(
line
.
strip
())
var_list
.
append
(
var
)
except
:
pass
fluid
.
io
.
load_vars
(
exe
,
'../paddle_vgg_19'
,
vars
=
var_list
)
prog
=
fluid
.
default_main_program
()
return
exe
,
prog
,
result
def
test_case
(
exe
,
prog
,
result
):
# 测试随机数据输入
numpy
.
random
.
seed
(
13
)
img_data
=
numpy
.
random
.
rand
(
1000
,
224
,
224
,
3
)
# tf中输入为NHWC,PaddlePaddle则为NCHW,需transpose
img_data
=
numpy
.
transpose
(
img_data
,
(
0
,
3
,
1
,
2
))
# input_0为输入数据的张量名,张量名和数据类型须与my_model.py中定义一致
for
i
in
range
(
0
,
50
):
r
,
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
'input_0'
:
numpy
.
array
(
img_data
[
i
*
20
:
i
*
20
+
20
],
dtype
=
'float32'
)
},
fetch_list
=
[
result
])
r
=
r
.
flatten
()
files
=
open
(
'fluid_vgg_19.result'
,
'a+'
)
for
i
in
range
(
0
,
r
.
shape
[
0
]):
files
.
write
(
str
(
r
[
i
])
+
'
\n
'
)
files
.
close
()
# 调用save_inference_model可将模型结构(当前以代码形式保存)和参数均序列化保存
# 保存后的模型可使用load_inference_model加载
# http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/api_guides/low_level/inference.html#api-guide-inference
# fluid.io.save_inference_model("./paddle_model", ["input_0"], [result], exe)
if
__name__
==
"__main__"
:
exe
,
prog
,
result
=
model_initialize
()
test_case
(
exe
,
prog
,
result
)
tensorflow2fluid/demo/export_to_checkpoint.py
浏览文件 @
319792d0
from
tensorflow.contrib.slim.nets
import
inception
from
tensorflow.contrib.slim.nets
import
inception
from
tensorflow.contrib.slim.nets
import
vgg
as
vgg
from
tensorflow.contrib.slim.nets
import
vgg
as
vgg
from
tensorflow.contrib.slim.nets
import
resnet_v1
as
resnet_v1
from
tensorflow.contrib.slim.nets
import
resnet_v1
as
resnet_v1
from
tensorflow.contrib.framework.python.ops
import
arg_scope
import
tensorflow.contrib.slim
as
slim
import
tensorflow.contrib.slim
as
slim
import
tensorflow
as
tf
import
tensorflow
as
tf
import
numpy
from
six
import
text_type
as
_text_type
from
six
import
text_type
as
_text_type
def
inception_v3
(
ckpt_file
):
def
inception_v3
(
ckpt_file
):
def
get_tuned_variables
():
def
get_tuned_variables
():
CHECKPOINT_EXCLUDE_SCOPES
=
'InceptionV3/Logits,InceptionV3/AuxLogits'
CHECKPOINT_EXCLUDE_SCOPES
=
'InceptionV3/Logits,InceptionV3/AuxLogits'
exclusions
=
[
scope
.
strip
()
for
scope
in
CHECKPOINT_EXCLUDE_SCOPES
.
split
(
','
)]
exclusions
=
[
scope
.
strip
()
for
scope
in
CHECKPOINT_EXCLUDE_SCOPES
.
split
(
','
)
]
variables_to_restore
=
[]
variables_to_restore
=
[]
for
var
in
slim
.
get_model_variables
():
for
var
in
slim
.
get_model_variables
():
excluded
=
False
excluded
=
False
for
exclusion
in
exclusions
:
for
exclusion
in
exclusions
:
...
@@ -21,78 +21,105 @@ def inception_v3(ckpt_file):
...
@@ -21,78 +21,105 @@ def inception_v3(ckpt_file):
break
break
if
not
excluded
:
if
not
excluded
:
variables_to_restore
.
append
(
var
)
variables_to_restore
.
append
(
var
)
return
variables_to_restore
return
variables_to_restore
img_size
=
inception
.
inception_v3
.
default_image_size
img_size
=
inception
.
inception_v3
.
default_image_size
img
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
name
=
'inputs'
)
img
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
name
=
'inputs'
)
with
slim
.
arg_scope
(
inception
.
inception_v3_arg_scope
()):
with
slim
.
arg_scope
(
inception
.
inception_v3_arg_scope
()):
logits
,
_
=
inception
.
inception_v3
(
img
,
num_classes
=
1000
,
is_training
=
False
)
logits
,
_
=
inception
.
inception_v3
(
img
,
num_classes
=
1000
,
is_training
=
False
)
sess
=
tf
.
Session
()
sess
=
tf
.
Session
()
init
=
tf
.
global_variables_initializer
()
init
=
tf
.
global_variables_initializer
()
sess
.
run
(
init
)
sess
.
run
(
init
)
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
ckpt_file
,
get_tuned_variables
(),
ignore_missing_vars
=
True
)
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
ckpt_file
,
get_tuned_variables
(),
ignore_missing_vars
=
True
)
load_model
(
sess
)
load_model
(
sess
)
return
sess
return
sess
def
resnet_v1_50
(
ckpt_file
):
def
resnet_v1_50
(
ckpt_file
):
img_size
=
resnet_v1
.
resnet_v1
.
default_image_size
img_size
=
resnet_v1
.
resnet_v1
.
default_image_size
img
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
name
=
'inputs'
)
img
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
name
=
'inputs'
)
with
slim
.
arg_scope
(
resnet_v1
.
resnet_arg_scope
()):
with
slim
.
arg_scope
(
resnet_v1
.
resnet_arg_scope
()):
net
,
endpoint
=
resnet_v1
.
resnet_v1_50
(
img
,
num_classes
=
1000
,
is_training
=
False
)
net
,
endpoint
=
resnet_v1
.
resnet_v1_50
(
img
,
num_classes
=
1000
,
is_training
=
False
)
sess
=
tf
.
Session
()
sess
=
tf
.
Session
()
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
ckpt_file
,
tf
.
contrib
.
slim
.
get_model_variables
(
"resnet_v1_50"
))
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
ckpt_file
,
tf
.
contrib
.
slim
.
get_model_variables
(
"resnet_v1_50"
))
load_model
(
sess
)
load_model
(
sess
)
return
sess
return
sess
def
resnet_v1_101
(
ckpt_file
):
def
resnet_v1_101
(
ckpt_file
):
img_size
=
resnet_v1
.
resnet_v1
.
default_image_size
img_size
=
resnet_v1
.
resnet_v1
.
default_image_size
img
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
name
=
'inputs'
)
img
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
name
=
'inputs'
)
with
slim
.
arg_scope
(
resnet_v1
.
resnet_arg_scope
()):
with
slim
.
arg_scope
(
resnet_v1
.
resnet_arg_scope
()):
net
,
endpoint
=
resnet_v1
.
resnet_v1_101
(
img
,
num_classes
=
1000
,
is_training
=
False
)
net
,
endpoint
=
resnet_v1
.
resnet_v1_101
(
img
,
num_classes
=
1000
,
is_training
=
False
)
sess
=
tf
.
Session
()
sess
=
tf
.
Session
()
sess
.
run
(
tf
.
global_variables_initializer
())
sess
.
run
(
tf
.
global_variables_initializer
())
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
ckpt_file
,
tf
.
contrib
.
slim
.
get_model_variables
(
"resnet_v1_101"
))
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
ckpt_file
,
tf
.
contrib
.
slim
.
get_model_variables
(
"resnet_v1_101"
))
load_model
(
sess
)
load_model
(
sess
)
return
sess
return
sess
def
vgg_16
(
ckpt_file
):
def
vgg_16
(
ckpt_file
):
img_size
=
vgg
.
vgg_16
.
default_image_size
img_size
=
vgg
.
vgg_16
.
default_image_size
inputs
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
inputs
=
tf
.
placeholder
(
name
=
"inputs"
)
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
name
=
"inputs"
)
logits
,
endpoint
=
vgg
.
vgg_16
(
inputs
,
num_classes
=
1000
,
is_training
=
False
)
logits
,
endpoint
=
vgg
.
vgg_16
(
inputs
,
num_classes
=
1000
,
is_training
=
False
)
sess
=
tf
.
Session
()
sess
=
tf
.
Session
()
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
ckpt_file
,
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
tf
.
contrib
.
slim
.
get_model_variables
(
"vgg_16"
))
ckpt_file
,
tf
.
contrib
.
slim
.
get_model_variables
(
"vgg_16"
))
load_model
(
sess
)
load_model
(
sess
)
return
sess
return
sess
def
vgg_19
(
ckpt_file
):
def
vgg_19
(
ckpt_file
):
img_size
=
vgg
.
vgg_19
.
default_image_size
img_size
=
vgg
.
vgg_19
.
default_image_size
inputs
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
inputs
=
tf
.
placeholder
(
name
=
"inputs"
)
tf
.
float32
,
shape
=
[
None
,
img_size
,
img_size
,
3
],
name
=
"inputs"
)
logits
,
endpoint
=
vgg
.
vgg_19
(
inputs
,
num_classes
=
1000
,
is_training
=
False
)
logits
,
endpoint
=
vgg
.
vgg_19
(
inputs
,
num_classes
=
1000
,
is_training
=
False
)
sess
=
tf
.
Session
()
sess
=
tf
.
Session
()
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
ckpt_file
,
load_model
=
tf
.
contrib
.
slim
.
assign_from_checkpoint_fn
(
tf
.
contrib
.
slim
.
get_model_variables
(
"vgg_19"
))
ckpt_file
,
tf
.
contrib
.
slim
.
get_model_variables
(
"vgg_19"
))
load_model
(
sess
)
load_model
(
sess
)
return
sess
return
sess
def
save_checkpoint
(
sess
,
save_dir
):
def
save_checkpoint
(
sess
,
save_dir
):
saver
=
tf
.
train
.
Saver
()
saver
=
tf
.
train
.
Saver
()
saver
.
save
(
sess
,
save_dir
+
"/model"
)
saver
.
save
(
sess
,
save_dir
+
"/model"
)
def
get_parser
():
def
get_parser
():
import
argparse
import
argparse
parser
=
argparse
.
ArgumentParser
()
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--model"
,
"-m"
,
type
=
_text_type
,
default
=
None
,
help
=
"inception_v3/resnet_v1_50/resnet_v1_101/vgg_16/vgg_19"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--ckpt_file"
,
"-c"
,
type
=
_text_type
,
default
=
None
,
help
=
"parameters ckpt file"
)
"--model"
,
parser
.
add_argument
(
"--save_dir"
,
"-s"
,
type
=
_text_type
,
default
=
None
,
help
=
"model path"
)
"-m"
,
type
=
_text_type
,
default
=
None
,
help
=
"inception_v3/resnet_v1_50/resnet_v1_101/vgg_16/vgg_19"
)
parser
.
add_argument
(
"--ckpt_file"
,
"-c"
,
type
=
_text_type
,
default
=
None
,
help
=
"parameters ckpt file"
)
parser
.
add_argument
(
"--save_dir"
,
"-s"
,
type
=
_text_type
,
default
=
None
,
help
=
"model path"
)
return
parser
return
parser
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
parser
=
get_parser
()
parser
=
get_parser
()
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
...
@@ -110,5 +137,7 @@ if __name__ == "__main__":
...
@@ -110,5 +137,7 @@ if __name__ == "__main__":
elif
args
.
model
==
"vgg_19"
:
elif
args
.
model
==
"vgg_19"
:
sess
=
vgg_19
(
args
.
ckpt_file
)
sess
=
vgg_19
(
args
.
ckpt_file
)
else
:
else
:
raise
Exception
(
"Only support inception_v3/resnet_v1_50/resnet_v1_101/vgg_16/vgg_19"
)
raise
Exception
(
"Only support inception_v3/resnet_v1_50/resnet_v1_101/vgg_16/vgg_19"
)
save_checkpoint
(
sess
,
args
.
save_dir
)
save_checkpoint
(
sess
,
args
.
save_dir
)
tensorflow2fluid/demo/resnet_v1_101.sh
0 → 100644
浏览文件 @
319792d0
export
CUDA_VISIBLE_DEVICES
=
-1
wget http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz
tar
xzvf resnet_v1_101_2016_08_28.tar.gz
python export_to_checkpoint.py
--model
resnet_v1_101
--ckpt_file
resnet_v1_101.ckpt
--save_dir
resnet_v1_101_checkpoint
rm
resnet_v1_101_2016_08_28.tar.gz resnet_v1_101.ckpt
tf2fluid
--meta_file
resnet_v1_101_checkpoint/model.meta
\
--ckpt_dir
resnet_v1_101_checkpoint
\
--in_nodes
inputs
\
--input_shape
None,224,224,3
\
--output_nodes
resnet_v1_101/predictions/Softmax
\
--save_dir
paddle_resnet_v1_101
tensorflow2fluid/demo/resnet_v1_50.sh
0 → 100644
浏览文件 @
319792d0
export
CUDA_VISIBLE_DEVICES
=
-1
wget http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz
tar
xzvf resnet_v1_50_2016_08_28.tar.gz
python export_to_checkpoint.py
--model
resnet_v1_50
--ckpt_file
resnet_v1_50.ckpt
--save_dir
resnet_v1_50_checkpoint
rm
resnet_v1_50_2016_08_28.tar.gz resnet_v1_50.ckpt
tf2fluid
--meta_file
resnet_v1_50_checkpoint/model.meta
\
--ckpt_dir
resnet_v1_50_checkpoint
\
--in_nodes
inputs
\
--input_shape
None,224,224,3
\
--output_nodes
resnet_v1_50/predictions/Softmax
\
--save_dir
paddle_resnet_v1_50
tensorflow2fluid/demo/vgg_16.sh
0 → 100644
浏览文件 @
319792d0
export
CUDA_VISIBLE_DEVICES
=
-1
wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz
tar
xzvf vgg_16_2016_08_28.tar.gz
python export_to_checkpoint.py
--model
vgg_16
--ckpt_file
vgg_16.ckpt
--save_dir
vgg_16_checkpoint
rm
vgg_16_2016_08_28.tar.gz vgg_16.ckpt
tf2fluid
--meta_file
vgg_16_checkpoint/model.meta
\
--ckpt_dir
vgg_16_checkpoint
\
--in_nodes
inputs
\
--input_shape
None,224,224,3
\
--output_nodes
vgg_16/fc8/squeezed
\
--save_dir
paddle_vgg_16
tensorflow2fluid/demo/vgg_19.sh
0 → 100644
浏览文件 @
319792d0
export
CUDA_VISIBLE_DEVICES
=
-1
wget http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz
tar
xzvf vgg_19_2016_08_28.tar.gz
python export_to_checkpoint.py
--model
vgg_19
--ckpt_file
vgg_19.ckpt
--save_dir
vgg_19_checkpoint
rm
vgg_19_2016_08_28.tar.gz vgg_19.ckpt
tf2fluid
--meta_file
vgg_19_checkpoint/model.meta
\
--ckpt_dir
vgg_19_checkpoint
\
--in_nodes
inputs
\
--input_shape
None,224,224,3
\
--output_nodes
vgg_19/fc8/squeezed
\
--save_dir
paddle_vgg_19
tensorflow2fluid/src/paddle_emitter.py
浏览文件 @
319792d0
...
@@ -413,11 +413,7 @@ class PaddleEmitter(object):
...
@@ -413,11 +413,7 @@ class PaddleEmitter(object):
data1
=
node
.
inputs
[
0
]
data1
=
node
.
inputs
[
0
]
data2
=
node
.
inputs
[
1
]
data2
=
node
.
inputs
[
1
]
axis
=
self
.
get_axis
(
data1
,
data2
)
axis
=
self
.
get_axis
(
data1
,
data2
)
code
=
list
()
code
=
"{} = layers.elementwise_add({}, {}, axis={})"
.
format
(
node
.
output_name
,
data1
.
ref_name
,
data2
.
ref_name
,
axis
)
code
.
append
(
"# {}, {}, {}"
.
format
(
node
.
layer_name
,
data1
.
layer_name
,
data2
.
layer_name
))
code
.
append
(
"{} = layers.elementwise_add({}, {}, axis={})"
.
format
(
node
.
output_name
,
data1
.
ref_name
,
data2
.
ref_name
,
axis
))
return
code
return
code
def
emit_mean
(
self
,
node
):
def
emit_mean
(
self
,
node
):
...
...
tensorflow2fluid/src/tensorflow_parser.py
浏览文件 @
319792d0
...
@@ -41,6 +41,7 @@ class TensorflowCkptParser(object):
...
@@ -41,6 +41,7 @@ class TensorflowCkptParser(object):
graph_def
,
ver
=
tensorflow
.
get_default_graph
().
_as_graph_def
(
graph_def
,
ver
=
tensorflow
.
get_default_graph
().
_as_graph_def
(
add_shapes
=
True
)
add_shapes
=
True
)
# self.sess = sess
if
in_nodes
is
not
None
and
input_shape
is
not
None
:
if
in_nodes
is
not
None
and
input_shape
is
not
None
:
graph_def
=
strip_unused_lib
.
strip_unused
(
graph_def
=
strip_unused_lib
.
strip_unused
(
...
@@ -62,6 +63,11 @@ class TensorflowPbParser(object):
...
@@ -62,6 +63,11 @@ class TensorflowPbParser(object):
tensorflow
.
reset_default_graph
()
tensorflow
.
reset_default_graph
()
original_graph_def
=
tensorflow
.
GraphDef
()
original_graph_def
=
tensorflow
.
GraphDef
()
original_graph_def
.
ParseFromString
(
serialized
)
original_graph_def
.
ParseFromString
(
serialized
)
# tensorflow.import_graph_def(origin_graph_def, name="")
# self.sess = tensorflow.Session(graph=tf.get_default_graph())
# self.sess.run(tensorflow.global_variables_initializer())
original_graph_def
=
strip_unused_lib
.
strip_unused
(
original_graph_def
=
strip_unused_lib
.
strip_unused
(
input_graph_def
=
original_graph_def
,
input_graph_def
=
original_graph_def
,
input_node_names
=
in_nodes
,
input_node_names
=
in_nodes
,
...
@@ -84,7 +90,6 @@ class TensorflowPbParser(object):
...
@@ -84,7 +90,6 @@ class TensorflowPbParser(object):
if
in_type_list
[
in_nodes
[
i
]]
==
1
or
in_type_list
[
if
in_type_list
[
in_nodes
[
i
]]
==
1
or
in_type_list
[
in_nodes
[
i
]]
==
0
:
in_nodes
[
i
]]
==
0
:
dtype
=
tensorflow
.
float32
dtype
=
tensorflow
.
float32
print
(
input_shape
[
i
])
x
=
tensorflow
.
placeholder
(
dtype
,
shape
=
input_shape
[
i
])
x
=
tensorflow
.
placeholder
(
dtype
,
shape
=
input_shape
[
i
])
elif
in_type_list
[
in_nodes
[
i
]]
==
3
:
elif
in_type_list
[
in_nodes
[
i
]]
==
3
:
dtype
=
tensorflow
.
int32
dtype
=
tensorflow
.
int32
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录