提交 1b472393 编写于 作者: J jiangjiajun

add mode of without data format optimization for tensorflow

上级 b4beabb7
......@@ -57,11 +57,25 @@ def arg_parser():
action="store_true",
default=False,
help="get version of x2paddle")
parser.add_argument(
"--without_data_format_optimization",
"-wo",
action="store_true",
default=False,
help="tf model conversion without data format optimization")
parser.add_argument("--define_input_shape",
"-d",
action="store_true",
default=False,
help="define input shape for tf model")
return parser
def tf2paddle(model_path, save_dir):
def tf2paddle(model_path,
save_dir,
without_data_format_optimization=False,
define_input_shape=False):
# check tensorflow installation and version
try:
import tensorflow as tf
......@@ -77,17 +91,23 @@ def tf2paddle(model_path, save_dir):
from x2paddle.decoder.tf_decoder import TFDecoder
from x2paddle.op_mapper.tf_op_mapper import TFOpMapper
from x2paddle.op_mapper.tf_op_mapper_nhwc import TFOpMapperNHWC
from x2paddle.optimizer.tf_optimizer import TFOptimizer
print("Now translating model from tensorflow to paddle.")
model = TFDecoder(model_path)
mapper = TFOpMapper(model)
optimizer = TFOptimizer(mapper)
# neccesary optimization
optimizer.delete_redundance_code()
# optimizer below is experimental
optimizer.merge_activation()
optimizer.merge_bias()
model = TFDecoder(model_path, define_input_shape=define_input_shape)
if not without_data_format_optimization:
mapper = TFOpMapper(model)
optimizer = TFOptimizer(mapper)
# neccesary optimization
optimizer.delete_redundance_code()
# optimizer below is experimental
optimizer.merge_activation()
optimizer.merge_bias()
else:
mapper = TFOpMapperNHWC(model)
optimizer = TFOptimizer(mapper)
optimizer.delete_redundance_code()
mapper.save_inference_model(save_dir)
......@@ -155,7 +175,14 @@ def main():
if args.framework == "tensorflow":
assert args.model is not None, "--model should be defined while translating tensorflow model"
tf2paddle(args.model, args.save_dir)
without_data_format_optimization = False
define_input_shape = False
if args.without_data_format_optimization:
without_data_format_optimization = True
if args.define_input_shape:
define_input_shape = True
tf2paddle(args.model, args.save_dir, without_data_format_optimization,
define_input_shape)
elif args.framework == "caffe":
assert args.prototxt is not None and args.weight is not None, "--prototxt and --weight should be defined while translating caffe model"
......
......@@ -64,11 +64,8 @@ class Layer(object):
else:
layer_code = layer_code + key + "={}, ".format(
input.layer_name)
elif isinstance(input, str):
layer_code = layer_code + key + "={}, ".format(input)
else:
raise Exception(
"Element of inputs should GraphNode or String")
layer_code = layer_code + key + "={}, ".format(input)
elif isinstance(self.inputs, GraphNode):
if hasattr(self.inputs, "index"):
layer_code += (self.inputs.layer_name +
......
......@@ -39,7 +39,7 @@ class TFGraphNode(GraphNode):
self.pd_data_format = "NCHW"
self.fluid_code = FluidCode()
self.dtype_map = {1: "float32", 3: "int32", 4: "int8", 9: "int64"}
self.dtype_map = {1: "float32", 3: "int32", 4: "uint8", 9: "int64"}
@property
def out_shapes(self):
......@@ -52,7 +52,11 @@ class TFGraphNode(GraphNode):
@property
def dtype(self):
dtype = self.layer.attr["dtype"].type
keys = ['dtype', 'Tidx', 'T']
for k in keys:
dtype = self.layer.attr[k].type
if dtype > 0:
break
if dtype not in self.dtype_map:
raise Exception("Dtype[{}] not in dtype_map".format(dtype))
return self.dtype_map[dtype]
......@@ -198,9 +202,10 @@ class TFGraph(Graph):
class TFDecoder(object):
def __init__(self, pb_model, data_format="NHWC"):
def __init__(self, pb_model, data_format="NHWC", define_input_shape=False):
self.sess = tf.Session()
self.input_info = dict()
self.define_input_shape = define_input_shape
with gfile.FastGFile(pb_model, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
......@@ -229,10 +234,15 @@ class TFDecoder(object):
if layer.op != "Placeholder":
continue
graph_node = TFGraphNode(layer)
dtype = graph_node.dtype
dtype = graph_node.layer.attr['dtype'].type
print("========dtype", dtype)
need_define_shape = 0
if not graph_node.get_attr("shape"):
if self.define_input_shape:
need_define_shape = 3
elif graph_node.layer.attr[
'shape'].shape.unknown_rank or not graph_node.get_attr(
"shape"):
need_define_shape = 1
else:
value = graph_node.layer.attr["shape"].shape
......@@ -241,13 +251,21 @@ class TFDecoder(object):
need_define_shape = 2
if need_define_shape > 0:
shape = None
if graph_node.get_attr("shape"):
value = value = graph_node.layer.attr["shape"].shape
shape = [dim.size for dim in value.dim]
if need_define_shape == 1:
print("Unknown shape for input tensor[tensor name: \"{}\"]".
format(layer.name))
else:
elif need_define_shape == 2:
print(
"\nShape[now is {}] for input tensor[tensor name: \"{}\"] not support yet"
.format(shape, layer.name))
else:
print(
"Define shape[now is {}] for input tensor[tensor name: \"{}\']"
.format(shape, layer.name))
print(
"Use your keyboard type the shape of input tensor below :)")
......@@ -264,12 +282,14 @@ class TFDecoder(object):
for dim in shape.strip().split(',')
]
assert shape.count(None) <= 1, "Only one dimension can be None"
print("]]]]]]]]]dtype", dtype)
x2paddle_input = tf.placeholder(dtype=dtype,
shape=shape,
name="x2paddle_{}".format(
layer.name))
input_map["{}:0".format(layer.name)] = x2paddle_input
shape[shape.index(None)] = -1
if shape.count(None) > 0:
shape[shape.index(None)] = -1
self.input_info["x2paddle_{}".format(layer.name)] = (shape,
dtype)
else:
......
......@@ -57,7 +57,10 @@ class TFOpMapper(OpMapper):
'Sigmoid': ['sigmoid'],
'Exp': ['exp'],
'Rsqrt': ['rsqrt'],
'swish_f32': ['swish']
'swish_f32': ['swish'],
'LeakyRelu': ['leaky_relu', {
'alpha': 'alpha'
}]
}
elementwise_ops = {
'Add': 'elementwise_add',
......@@ -639,14 +642,20 @@ class TFOpMapper(OpMapper):
def Tile(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
expand_times = self.graph.get_node(node.layer.input[1], copy=True)
assert expand_times.layer_type == "Const"
self.omit_nodes.append(expand_times.layer_name)
expand_times = expand_times.value.tolist()
if expand_times.layer_type == "Const":
expand_times = expand_times.value.tolist()
else:
expand_times = self.decoder.infer_shape_tensor(expand_times)
if input.tf_data_format == "NHWC":
if len(input.out_shapes[0]) == 4:
expand_times = [expand_times[i] for i in [0, 3, 1, 2]]
elif len(input.out_shape[0]) == 3:
expand_times = [expand_times[i] for i in [2, 0, 1]]
for i in range(len(expand_times)):
if expand_times[i] < 0:
expand_times[i] = 1
attr = {"expand_times": expand_times}
node.fluid_code.add_layer("expand",
inputs=input,
......@@ -699,20 +708,27 @@ class TFOpMapper(OpMapper):
limit = self.graph.get_node(node.layer.input[1], copy=True)
delta = self.graph.get_node(node.layer.input[2], copy=True)
if start.layer_type == "Const":
self.omit_nodes.append(start.layer_name)
start = start.value
else:
start = self.decoder.infer_tensor(start)
if limit.layer_type == "Const":
self.omit_nodes.append(limit.layer_name)
limit = limit.value
else:
limit = self.decoder.infer_tensor(limit)
if delta.layer_type == "Const":
self.omit_nodes.append(delta.layer_name)
delta = delta.value
else:
delta = self.decoder.infer_tensor(delta)
self.omit_nodes.append(start.layer_name)
self.omit_nodes.append(limit.layer_name)
limit = self.decoder.infer_tensor(limit)
inputs = {"start": start, "end": limit, "step": delta}
attr = {"dtype": string(node.dtype)}
node.fluid_code.append("range",
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer("range",
inputs=inputs,
output=node,
param_attr=None)
def Mean(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1011,3 +1027,39 @@ class TFOpMapper(OpMapper):
inputs=input,
output=node,
param_attr=attr)
def ResizeNearestNeighbor(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
resize_shape = self.graph.get_node(node.layer.input[1], copy=True)
self.omit_nodes.append(resize_shape.layer_name)
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
align_corners = node.get_attr("align_corners")
attr = {"align_corners": align_corners, "out_shape": resize_shape}
node.fluid_code.add_layer("resize_nearest",
inputs=input,
output=node,
param_attr=attr)
def ResizeBilinear(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
resize_shape = self.graph.get_node(node.layer.input[1], copy=True)
self.omit_nodes.append(resize_shape.layer_name)
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
align_corners = node.get_attr("align_corners")
attr = {
"align_corners": align_corners,
"out_shape": resize_shape,
"align_mode": 1
}
node.fluid_code.add_layer("resize_bilinear",
inputs=input,
output=node,
param_attr=attr)
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册