未验证 提交 146db990 编写于 作者: J Jason 提交者: GitHub

Merge pull request #7 from SunAhong1993/master

兼容python2和python3
......@@ -42,7 +42,7 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path,
if code_output_path:
print_stderr('Saving source...')
with open(code_output_path, 'wb') as src_out:
src_out.write(transformer.transform_source())
src_out.write(str.encode(transformer.transform_source()))
print_stderr('set env variable before using converted model '\
'if used custom_layers:')
custom_pk_path = os.path.dirname(os.path.abspath(__file__))
......
......@@ -6,6 +6,7 @@
import sys
import os
import functools
def walk_dir(rootdir):
......@@ -28,11 +29,11 @@ def calc_diff(f1, f2):
d1 = d1.flatten()
d2 = d2.flatten()
d1_num = reduce(lambda x, y: x * y, d1.shape)
d2_num = reduce(lambda x, y: x * y, d2.shape)
d1_num = functools.reduce(lambda x, y: x * y, d1.shape)
d2_num = functools.reduce(lambda x, y: x * y, d2.shape)
if d1_num != d2_num:
print d1.shape
print d2.shape
print(d1.shape)
print(d2.shape)
assert (d1_num == d2_num), "their shape is not consistent"
try:
......
......@@ -57,7 +57,7 @@ def build_model(net_file, net_name):
fluid = import_fluid()
inputs_dict = MyNet.input_shapes()
input_name = inputs_dict.keys()[0]
input_name = list(inputs_dict.keys())[0]
input_shape = inputs_dict[input_name]
images = fluid.layers.data(
name=input_name, shape=input_shape, dtype='float32')
......@@ -222,8 +222,8 @@ def infer(model_path, imgfile, net_file=None, net_name=None, debug=True):
feed_shapes = ret['feed_shapes']
net = ret['net']
input_name = feed_names[0]
input_shape = feed_shapes[0]
input_name = list(feed_names)[0]
input_shape = list(feed_shapes)[0]
np_images = load_data(imgfile, input_shape)
results = exe.run(program=program,
......@@ -249,7 +249,7 @@ def caffe_infer(prototxt, caffemodel, datafile):
import caffe
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
input_layer = net.blobs.keys()[0]
input_layer = list(net.blobs.keys())[0]
print('got name of input layer is:%s' % (input_layer))
input_shape = list(net.blobs[input_layer].data.shape[1:])
......@@ -266,7 +266,7 @@ def caffe_infer(prototxt, caffemodel, datafile):
for k, v in net.blobs.items():
k = k.replace('/', '_')
names.append(k)
results.append(v.data.copy())
results.append(v.data[0].copy())
dump_path = 'results.caffe'
dump_results(results, names, dump_path)
......
......@@ -29,14 +29,14 @@ fi
mkdir -p $results_root
prototxt="models.caffe/$model_name/${model_name}.prototxt"
caffemodel="models.caffe/${model_name}/${model_name}.caffemodel"
prototxt="$2/${model_name}.prototxt"
caffemodel="$2/${model_name}.caffemodel"
#1, dump layers' results from paddle
paddle_results="$results_root/${model_name}.paddle"
rm -rf $paddle_results
rm -rf "results.paddle"
bash ./tools/run.sh $model_name ./models.caffe/$model_name ./models/$model_name
bash ./tools/run.sh $model_name $2 $3
if [[ $? -ne 0 ]] || [[ ! -e "results.paddle" ]];then
echo "not found paddle's results, maybe failed to convert"
exit 1
......@@ -47,7 +47,7 @@ mv results.paddle $paddle_results
caffe_results="$results_root/${model_name}.caffe"
rm -rf $caffe_results
rm -rf "results.caffe"
PYTHON=`which cfpython`
PYTHON=`which python`
if [[ -z $PYTHON ]];then
PYTHON=`which python`
fi
......
......@@ -10,6 +10,8 @@
#
#set -x
if [[ $# -lt 3 ]];then
echo "usage:"
echo " bash $0 [model_name] [cf_model_path] [pd_model_path] [only_convert]"
......@@ -21,7 +23,6 @@ else
pd_model_path=$3
only_convert=$4
fi
proto_file=$cf_model_path/${model_name}.prototxt
caffemodel_file=$cf_model_path/${model_name}.caffemodel
weight_file=$pd_model_path/${model_name}.npy
......@@ -41,7 +42,7 @@ if [[ ! -e $pd_model_path ]];then
mkdir $pd_model_path
fi
PYTHON=`which cfpython`
PYTHON=`which python`
if [[ -z $PYTHON ]];then
PYTHON=`which python`
fi
......@@ -60,7 +61,7 @@ else
fi
if [[ -z $only_convert ]];then
PYTHON=`which pdpython`
PYTHON=`which python`
if [[ -z $PYTHON ]];then
PYTHON=`which python`
fi
......
......@@ -4,19 +4,20 @@
from .register import get_registered_layers
#custom layer import begins
import axpy
import flatten
import argmax
import reshape
import roipooling
import priorbox
import permute
import detection_out
import normalize
import select
import crop
import power
import reduction
from . import axpy
from . import flatten
from . import argmax
from . import argmax
from . import reshape
from . import roipooling
from . import priorbox
from . import permute
from . import detection_out
from . import normalize
from . import select
from . import crop
from . import power
from . import reduction
#custom layer import ends
......
......@@ -22,7 +22,7 @@ def crop_shape(input_shape, shape=None):
input_shape.shape), "input_shape is diff with output_shape"
return shape
else:
raise Exception, "crop_shape input error"
raise Exception("crop_shape input error")
return None
......@@ -54,7 +54,7 @@ def crop_layer(input, name, shape=None, axis=2, offset=None):
output_shape = shape
input_tensor = input
else:
raise Exception, "crop_layer input error"
raise Exception("crop_layer input error")
assert len(output_shape) == len(
input_shape), "input_shape is diff with output_shape"
......
......@@ -152,14 +152,14 @@ class Graph(object):
data_shape = '--'
out_shape = node.output_shape or '--'
s.append('{:<20} {:<30} {:>20} {:>20}'.format(
node.kind, node.name, data_shape, tuple(out_shape)))
node.kind, node.name, data_shape, str(tuple(out_shape))))
else:
for d in node.data:
#data_shape = node.data[0].shape if node.data else '--'
data_shape = d.shape
out_shape = node.output_shape or '--'
s.append('{:<20} {:<30} {:>20} {:>20}'.format(
node.kind, node.name, data_shape, tuple(out_shape)))
node.kind, node.name, str(data_shape), str(tuple(out_shape))))
return '\n'.join(s)
......@@ -368,4 +368,4 @@ class NodeMapper(NodeDispatch):
return mapped_node
def commit(self, mapped_chains):
raise NotImplementedError('Must be implemented by subclass.')
raise NotImplementedError('Must be implemented by subclass.')
\ No newline at end of file
import re
import numbers
from collections import namedtuple
import custom_layers
import sys
from . import custom_layers
from .shapes import *
LAYER_DESCRIPTORS = {
......
......@@ -42,7 +42,7 @@ class MyNet(object):
def convert(cls, npy_model, fluid_path, outputs=None):
fluid = import_fluid()
shapes = cls.input_shapes()
input_name = shapes.keys()[0]
input_name = list(shapes.keys())[0]
feed_data = {}
for name, shape in shapes.items():
data_layer = fluid.layers.data(
......@@ -157,5 +157,5 @@ def generate_main_code(net_name):
if __name__ == "__main__":
""" just for testing
"""
print generate_net_code('Attribute', "{'data': [3, 277, 277]}")
print generate_main_code('Attribute')
print(generate_net_code('Attribute', "{'data': [3, 277, 277]}"))
print(generate_main_code('Attribute'))
......@@ -2,6 +2,8 @@ import sys
import os
import math
import numpy as np
from past.builtins import basestring
def import_fluid():
......@@ -108,7 +110,7 @@ class Network(object):
continue
layer = self.layers[op_name]
for param_name, data in data_dict[op_name].iteritems():
for param_name, data in data_dict[op_name].items():
try:
name = '%s_%s' % (op_name, param_name)
v = fluid.global_scope().find_var(name)
......
import numpy as np
from past.builtins import basestring
from ..errors import KaffeError, print_stderr
from ..graph import GraphBuilder, NodeMapper
from ..layers import NodeKind
......@@ -34,6 +34,8 @@ class PaddleNode(object):
'''Emits the Python source for this node.'''
# Format positional arguments
args = map(self.format, self.args)
args = list(args)
# Format any keyword arguments
if self.kwargs:
args += [self.pair(k, v) for k, v in self.kwargs]
......@@ -335,7 +337,9 @@ class Transformer(object):
]
self.graph = graph.transformed(transformers)
#for the purpose of recording name mapping because of fused nodes
trace = SubNodeFuser.traced_names()
chg2real = {}
......@@ -372,11 +376,14 @@ class Transformer(object):
# Convert parameters to dictionaries
ParameterNamer(),
]
self.graph = self.graph.transformed(transformers)
self.params = {
node.name: node.data
for node in self.graph.nodes if node.data
}
self.params['caffe2fluid_name_trace'] = self.graph.get_name_trace()
return self.params
......
......@@ -38,7 +38,7 @@ class DataInjector(object):
caffe = get_caffe_resolver().caffe
net = caffe.Net(self.def_path, self.data_path, caffe.TEST)
data = lambda blob: blob.data
self.params = [(k, map(data, v)) for k, v in net.params.items()]
self.params = [(k, list(map(data, v))) for k, v in net.params.items()]
def load_using_pb(self):
data = get_caffe_resolver().NetParameter()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册