未验证 提交 41cc9f14 编写于 作者: J Jason 提交者: GitHub

Merge pull request #182 from jiangjiajun/develop-1.6

Develop 1.6
...@@ -98,29 +98,12 @@ def tf2paddle(model_path, ...@@ -98,29 +98,12 @@ def tf2paddle(model_path,
print("Now translating model from tensorflow to paddle.") print("Now translating model from tensorflow to paddle.")
model = TFDecoder(model_path, define_input_shape=define_input_shape) model = TFDecoder(model_path, define_input_shape=define_input_shape)
if not without_data_format_optimization: mapper = TFOpMapperNHWC(model)
mapper = TFOpMapper(model) optimizer = TFOptimizer(mapper)
optimizer = TFOptimizer(mapper) optimizer.delete_redundance_code()
# neccesary optimization optimizer.strip_graph()
optimizer.delete_redundance_code() # optimizer.merge_activation()
# optimizer below is experimental # optimizer.merge_bias()
optimizer.optimize_elementwise_op()
optimizer.merge_activation()
optimizer.merge_bias()
optimizer.optimize_sub_graph()
# optimizer.merge_batch_norm()
# optimizer.merge_prelu()
else:
mapper = TFOpMapperNHWC(model)
optimizer = TFOptimizer(mapper)
optimizer.delete_redundance_code()
optimizer.strip_graph()
optimizer.merge_activation()
optimizer.merge_bias()
optimizer.make_nchw_input_output()
optimizer.remove_transpose()
mapper.save_inference_model(save_dir) mapper.save_inference_model(save_dir)
...@@ -182,21 +165,29 @@ def main(): ...@@ -182,21 +165,29 @@ def main():
if args.version: if args.version:
import x2paddle import x2paddle
print("x2paddle-{} with python>=3.5, paddlepaddle>=1.5.0\n".format( print("x2paddle-{} with python>=3.5, paddlepaddle>=1.6.1\n".format(
x2paddle.__version__)) x2paddle.__version__))
return return
assert args.framework is not None, "--framework is not defined(support tensorflow/caffe/onnx)"
assert args.save_dir is not None, "--save_dir is not defined"
try: try:
import paddle import paddle
v0, v1, v2 = paddle.__version__.split('.') v0, v1, v2 = paddle.__version__.split('.')
if int(v0) != 1 or int(v1) < 5: if int(v0) == 0 and int(v1) == 0 and int(v2) == 0:
print("paddlepaddle>=1.5.0 is required") print(
"You have installed paddlepaddle-dev? We're not sure it's working for x2paddle!"
)
print(
"==================paddlepaddle>=1.6.1 is strongly recommended================="
)
elif int(v0) != 1 or int(v1) < 6:
print("paddlepaddle>=1.6.1 is required")
return return
except: except:
print("paddlepaddle not installed, use \"pip install paddlepaddle\"") print("paddlepaddle not installed, use \"pip install paddlepaddle\"")
return
assert args.framework is not None, "--framework is not defined(support tensorflow/caffe/onnx)"
assert args.save_dir is not None, "--save_dir is not defined"
if args.framework == "tensorflow": if args.framework == "tensorflow":
assert args.model is not None, "--model should be defined while translating tensorflow model" assert args.model is not None, "--model should be defined while translating tensorflow model"
......
...@@ -80,6 +80,11 @@ class Layer(object): ...@@ -80,6 +80,11 @@ class Layer(object):
param_attr = collections.OrderedDict(self.param_attr) param_attr = collections.OrderedDict(self.param_attr)
for key, value in param_attr.items(): for key, value in param_attr.items():
if isinstance(value, GraphNode):
value_name = value.layer_name
if hasattr(value, "index"):
value_name += "[{}]".format(value.index)
value = value_name
if '\n' in str(value): if '\n' in str(value):
value = string(str(value).replace('\n', ',')) value = string(str(value).replace('\n', ','))
layer_code = layer_code + key + "={}, ".format(value) layer_code = layer_code + key + "={}, ".format(value)
......
...@@ -389,26 +389,10 @@ class TFDecoder(object): ...@@ -389,26 +389,10 @@ class TFDecoder(object):
compare01 = (results[0] == results[1]) compare01 = (results[0] == results[1])
compare12 = (results[1] == results[2]) compare12 = (results[1] == results[2])
if compare01.all() and compare12.all(): compare = compare01 & compare12
return results[0].tolist() index = numpy.argwhere(compare==False).flatten()
results[0][index] = -1
if (compare01 == compare12).all(): return results[0].tolist()
index = numpy.argwhere(compare01 == False).flatten()
if index.shape[0] != 1:
raise Exception("There's not only one unstable dimension")
results[0][index[0]] = -1
index = numpy.argwhere(results[0] < 0).flatten()
if index.shape[0] > 2:
print("Warning: More than two dimension less than zero")
if index.shape[0] == 2 and out_shape is not None:
if out_shape[index[1]] > 0:
results[0][index[1]] = out_shape[index[1]]
else:
results[0][index[0]] = out_shape[index[0]]
return results[0].tolist()
else:
raise Exception("Couldn't infer a stable shape shape tensor value")
def infer_tensor_shape(self, graph_node): def infer_tensor_shape(self, graph_node):
if hasattr(graph_node, "index"): if hasattr(graph_node, "index"):
...@@ -436,11 +420,7 @@ class TFDecoder(object): ...@@ -436,11 +420,7 @@ class TFDecoder(object):
if compare01.all() and compare12.all(): if compare01.all() and compare12.all():
return shape[0].tolist() return shape[0].tolist()
if (compare01 == compare12).all(): compare = compare01 & compare12
index = numpy.argwhere(compare01 == False).flatten() index = numpy.argwhere(compare==False).flatten()
if index.shape[0] != 1: shapes[0][index] = -1
raise Exception("There's not only one unstable dimension") return shapes[0].tolist()
if index[0] != 0:
raise Exception("Batch size not in the first dimension")
shapes[0][0] = -1
return shapes[0].tolist()
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from x2paddle.decoder.tf_decoder import TFGraph from x2paddle.decoder.tf_decoder import TFGraph, TFGraphNode
from x2paddle.core.op_mapper import OpMapper from x2paddle.core.op_mapper import OpMapper
from x2paddle.core.util import * from x2paddle.core.util import *
import inspect import inspect
...@@ -31,6 +31,33 @@ def get_same_padding(in_size, kernel_size, stride): ...@@ -31,6 +31,33 @@ def get_same_padding(in_size, kernel_size, stride):
return [pad0, pad1] return [pad0, pad1]
def process_pack_shape(graph, param, shape_value):
pack_inputs = [
graph.get_node(name, copy=True) for name in param.layer.input
]
all_const_value = 0
for i in range(len(pack_inputs)):
if pack_inputs[i].layer_type == "Const":
pack_inputs[i] = pack_inputs[i].value
all_const_value += 1
elif shape_value[i] > 0:
pack_inputs[i] = shape_value[i]
all_const_value += 1
else:
if hasattr(pack_inputs[i], "index"):
index = pack_inputs[i].index
pack_inputs[i] = pack_inputs[i].layer_name + "[{}]".format(
index)
else:
pack_inputs[i] = pack_inputs[i].layer_name
string_params = "["
for i in range(len(pack_inputs)):
string_params += "{}, ".format(pack_inputs[i])
string_params = string_params.strip(", ") + "]"
return string_params
class TFOpMapperNHWC(OpMapper): class TFOpMapperNHWC(OpMapper):
directly_map_ops = { directly_map_ops = {
'Relu': ['relu'], 'Relu': ['relu'],
...@@ -122,28 +149,10 @@ class TFOpMapperNHWC(OpMapper): ...@@ -122,28 +149,10 @@ class TFOpMapperNHWC(OpMapper):
tf_param = node.get_attr(tf_param_name) tf_param = node.get_attr(tf_param_name)
attr[pd_param_name] = tf_param attr[pd_param_name] = tf_param
if len(input.out_shapes[0]) == 4 and op_info[0] != 'shape': node.fluid_code.add_layer(op_info[0],
attr1 = {"perm": [0, 3, 1, 2]} inputs=input,
node.fluid_code.add_layer('transpose', output=node,
inputs=input, param_attr=attr)
output=node,
param_attr=attr1)
input = node
node.fluid_code.add_layer(op_info[0],
inputs=input,
output=node,
param_attr=attr)
input = node
attr2 = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer('transpose',
inputs=input,
output=node,
param_attr=attr2)
else:
node.fluid_code.add_layer(op_info[0],
inputs=input,
output=node,
param_attr=attr)
def elementwise_map(self, node): def elementwise_map(self, node):
assert node.layer_type in self.elementwise_ops assert node.layer_type in self.elementwise_ops
...@@ -216,30 +225,11 @@ class TFOpMapperNHWC(OpMapper): ...@@ -216,30 +225,11 @@ class TFOpMapperNHWC(OpMapper):
output="y_tmp", output="y_tmp",
param_attr=attr) param_attr=attr)
y_input = "y_tmp" y_input = "y_tmp"
if len(x_shape) == 4 and len(y_shape) == 4: inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer("transpose", node.fluid_code.add_layer(op_type,
inputs=x_input, inputs=inputs,
output=x_input, output=node,
param_attr={'perm': [0, 3, 1, 2]}) param_attr=None)
node.fluid_code.add_layer("transpose",
inputs=y_input,
output=y_input,
param_attr={'perm': [0, 3, 1, 2]})
inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr={'perm': [0, 2, 3, 1]})
else:
inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=None)
def Placeholder(self, node): def Placeholder(self, node):
shape = node.out_shapes[0] shape = node.out_shapes[0]
...@@ -265,6 +255,7 @@ class TFOpMapperNHWC(OpMapper): ...@@ -265,6 +255,7 @@ class TFOpMapperNHWC(OpMapper):
dtype = node.dtype dtype = node.dtype
value = node.value value = node.value
initializer = "Constant(0.0)" initializer = "Constant(0.0)"
if len(shape) == 0: if len(shape) == 0:
assert value.size == 1, "Unexpected situation happend" assert value.size == 1, "Unexpected situation happend"
shape = [1] shape = [1]
...@@ -300,44 +291,24 @@ class TFOpMapperNHWC(OpMapper): ...@@ -300,44 +291,24 @@ class TFOpMapperNHWC(OpMapper):
def MaxPool(self, node): def MaxPool(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = node.get_attr("ksize") k_size = node.get_attr("ksize")
strides = node.get_attr("strides") strides = node.get_attr("strides")
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode() pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW" channel_first = data_format == "NCHW"
if not channel_first:
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
k_size = [k_size[i] for i in [0, 3, 1, 2]]
input = node
attr = { attr = {
"pool_size": k_size[2:4], "pool_size": k_size[1:3],
"pool_type": string("max"), "pool_type": string("max"),
"pool_stride": strides[2:4], "pool_stride": strides[1:3],
"pool_padding": string(pad_mode) "pool_padding": string(pad_mode),
"data_format": string("NHWC")
} }
node.fluid_code.add_layer("pool2d", node.fluid_code.add_layer("pool2d",
inputs=input, inputs=input,
output=node, output=node,
param_attr=attr) param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
def Conv2D(self, node): def Conv2D(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
kernel = self.graph.get_node(node.layer.input[1], copy=True) kernel = self.graph.get_node(node.layer.input[1], copy=True)
...@@ -345,51 +316,33 @@ class TFOpMapperNHWC(OpMapper): ...@@ -345,51 +316,33 @@ class TFOpMapperNHWC(OpMapper):
self.add_omit_nodes(kernel.layer_name, node.layer_name) self.add_omit_nodes(kernel.layer_name, node.layer_name)
in_shape = input.out_shapes[0] in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2: if in_shape[3] < 0:
in_shape = self.decoder.infer_tensor(input).shape in_shape = self.decoder.infer_tensor(input).shape
k_size = kernel.out_shapes[0] k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2: if k_size.count(-1) > 0:
k_size = self.decoder.infer_tensor(kernel).shape k_size = self.decoder.infer_tensor(kernel).shape
strides = node.get_attr("strides") strides = node.get_attr("strides")
dilations = node.get_attr("dilations") dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode() pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW"
self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose( self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose(
kernel.value, (3, 2, 0, 1)) kernel.value, (3, 2, 0, 1))
if not channel_first:
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"bias_attr": False, "bias_attr": False,
"param_attr": string(kernel.layer_name), "param_attr": string(kernel.layer_name),
"num_filters": k_size[3], "num_filters": k_size[3],
"filter_size": k_size[0:2], "filter_size": k_size[0:2],
"stride": strides[2:4], "stride": strides[1:3],
"dilation": dilations[2:4], "dilation": dilations[1:3],
"padding": string(pad_mode) "padding": string(pad_mode),
"data_format": string("NHWC")
} }
node.fluid_code.add_layer("conv2d", node.fluid_code.add_layer("conv2d",
inputs=input, inputs=input,
output=node, output=node,
param_attr=attr) param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
def BiasAdd(self, node): def BiasAdd(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
...@@ -406,8 +359,6 @@ class TFOpMapperNHWC(OpMapper): ...@@ -406,8 +359,6 @@ class TFOpMapperNHWC(OpMapper):
beta = self.graph.get_node(node.layer.input[2], copy=True) beta = self.graph.get_node(node.layer.input[2], copy=True)
moving_mean = self.graph.get_node(node.layer.input[3], copy=True) moving_mean = self.graph.get_node(node.layer.input[3], copy=True)
moving_var = self.graph.get_node(node.layer.input[4], copy=True) moving_var = self.graph.get_node(node.layer.input[4], copy=True)
data_format = node.get_attr("data_format").decode()
channel_first = data_format == "NCHW"
assert gamma.layer_type == "Const" assert gamma.layer_type == "Const"
assert beta.layer_type == "Const" assert beta.layer_type == "Const"
...@@ -418,21 +369,14 @@ class TFOpMapperNHWC(OpMapper): ...@@ -418,21 +369,14 @@ class TFOpMapperNHWC(OpMapper):
self.add_omit_nodes(moving_mean.layer_name, node.layer_name) self.add_omit_nodes(moving_mean.layer_name, node.layer_name)
self.add_omit_nodes(moving_var.layer_name, node.layer_name) self.add_omit_nodes(moving_var.layer_name, node.layer_name)
if not channel_first:
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"epsilon": node.get_attr("epsilon"), "epsilon": node.get_attr("epsilon"),
"param_attr": string(gamma.layer_name), "param_attr": string(gamma.layer_name),
"bias_attr": string(beta.layer_name), "bias_attr": string(beta.layer_name),
"moving_mean_name": string(moving_mean.layer_name), "moving_mean_name": string(moving_mean.layer_name),
"moving_variance_name": string(moving_var.layer_name), "moving_variance_name": string(moving_var.layer_name),
"is_test": True "is_test": True,
"data_layout": string("NHWC")
} }
node.fluid_code.add_layer("batch_norm", node.fluid_code.add_layer("batch_norm",
...@@ -440,13 +384,6 @@ class TFOpMapperNHWC(OpMapper): ...@@ -440,13 +384,6 @@ class TFOpMapperNHWC(OpMapper):
output=node, output=node,
param_attr=attr) param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
def DepthwiseConv2dNative(self, node): def DepthwiseConv2dNative(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
kernel = self.graph.get_node(node.layer.input[1], copy=True) kernel = self.graph.get_node(node.layer.input[1], copy=True)
...@@ -454,10 +391,10 @@ class TFOpMapperNHWC(OpMapper): ...@@ -454,10 +391,10 @@ class TFOpMapperNHWC(OpMapper):
self.add_omit_nodes(kernel.layer_name, node.layer_name) self.add_omit_nodes(kernel.layer_name, node.layer_name)
in_shape = input.out_shapes[0] in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2: if in_shape[3] < 0:
in_shape = self.decoder.infer_tensor(input).shape in_shape = self.decoder.infer_tensor(input).shape
k_size = kernel.out_shapes[0] k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2: if k_size.count(-1) > 0:
k_size = self.decoder.infer_tensor(kernel).shape k_size = self.decoder.infer_tensor(kernel).shape
strides = node.get_attr("strides") strides = node.get_attr("strides")
...@@ -469,136 +406,103 @@ class TFOpMapperNHWC(OpMapper): ...@@ -469,136 +406,103 @@ class TFOpMapperNHWC(OpMapper):
self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose( self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose(
kernel.value, (2, 3, 0, 1)) kernel.value, (2, 3, 0, 1))
if not channel_first:
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"bias_attr": False, "bias_attr": False,
"param_attr": string(kernel.layer_name), "param_attr": string(kernel.layer_name),
"num_filters": in_shape[1], "num_filters": in_shape[3],
"filter_size": k_size[0:2], "filter_size": k_size[0:2],
"stride": strides[2:4], "stride": strides[1:3],
"dilation": dilations[2:4], "dilation": dilations[1:3],
"groups": k_size[3] * in_shape[1], "groups": k_size[3] * in_shape[3],
"use_cudnn": False, "use_cudnn": False,
"padding": string(pad_mode) "padding": string(pad_mode),
"data_format": string("NHWC")
} }
node.fluid_code.add_layer("conv2d", node.fluid_code.add_layer("conv2d",
inputs=input, inputs=input,
output=node, output=node,
param_attr=attr) param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
def Reshape(self, node): def Reshape(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
param = self.graph.get_node(node.layer.input[1], copy=True) param = self.graph.get_node(node.layer.input[1], copy=True)
is_variable = False attr = None
if param.layer_type == "Const": if param.layer_type == "Const":
attr = {"shape": param.value.tolist()} attr = {"shape": param.value.tolist()}
inputs = {"x": input}
self.add_omit_nodes(param.layer_name, node.layer_name) self.add_omit_nodes(param.layer_name, node.layer_name)
else: else:
# Here is a trick method to solove tensor parameter in tensorflow inputs = {"x": input, "shape": param}
shape = self.decoder.infer_shape_tensor(param, node.out_shapes[0]) shape_value = self.decoder.infer_shape_tensor(param)
if shape.count(-1) <= 1: if param.layer_type == "Pack":
attr = {"shape": shape} pack_inputs = [
self.add_omit_nodes(param.layer_name, node.layer_name) self.graph.get_node(name, copy=True)
else: for name in param.layer.input
assert len(param.out_shapes[0] ]
) == 1, "Unexpected situation of shape parameter" all_const_value = 0
attr = {"shape": [-1]} for i in range(len(pack_inputs)):
node.fluid_code.add_layer("reshape", if pack_inputs[i].layer_type == "Const":
inputs=param, pack_inputs[i] = pack_inputs[i].value
output="shape_param", all_const_value += 1
param_attr=attr) elif shape_value[i] > 0:
attr = {"num_or_sections": param.out_shapes[0][0], "dim": 0} pack_inputs[i] = shape_value[i]
node.fluid_code.add_layer("split", all_const_value += 1
inputs="shape_param", else:
output=node, if hasattr(pack_inputs[i], "index"):
param_attr=attr) index = pack_inputs[i].index
new_param = "[" pack_inputs[i] = pack_inputs[
for i in range(param.out_shapes[0][0]): i].layer_name + "[{}]".format(index)
new_param += (node.layer_name + "[{}]".format(i) + ", ") else:
new_param = new_param.strip(", ") + "]" pack_inputs[i] = pack_inputs[i].layer_name
attr = {"shape": new_param}
is_variable = True ### special optimize for paddle-lite
# to change [192, -1]->[-1, 192], allways put -1 in the first dimension in_size = 1
# optimization for Paddle-Lite in_shape = input.out_shapes[0]
in_shape = input.out_shapes[0] for i in range(len(in_shape)):
if not is_variable and in_shape.count(-1) < 1: in_size *= in_shape[i]
total_size = 1 if all_const_value == len(pack_inputs) and in_size > 0:
for i in range(len(in_shape)): if pack_inputs[0] > 0 and pack_inputs.count(-1) == 1:
total_size *= in_shape[i] for i in range(len(pack_inputs)):
for i in range(len(attr["shape"])): in_size /= pack_inputs[i]
if attr["shape"][i] == 0: index = pack_inputs.index(-1)
attr["shape"][i] = in_shape[i] pack_inputs[index] = in_size * -1
if attr["shape"][i] != -1: pack_inputs[0] = -1
total_size /= attr["shape"][i]
if attr["shape"].count(-1) > 0: if all_const_value == len(pack_inputs) and pack_inputs.count(
index = attr["shape"].index(-1) -1) == 0:
attr["shape"][index] = int(total_size) pack_inputs[0] = -1
attr["shape"][0] = -1 ###################################
string_params = "["
for i in range(len(pack_inputs)):
string_params += "{}, ".format(pack_inputs[i])
string_params = string_params.strip(", ") + "]"
inputs["shape"] = string_params
node.fluid_code.add_layer("reshape", node.fluid_code.add_layer("reshape",
inputs=input, inputs=inputs,
output=node, output=node,
param_attr=attr) param_attr=attr)
def AvgPool(self, node): def AvgPool(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = node.get_attr("ksize") k_size = node.get_attr("ksize")
strides = node.get_attr("strides") strides = node.get_attr("strides")
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode() pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW"
if not channel_first:
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
k_size = [k_size[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"pool_size": k_size[2:4], "pool_size": k_size[1:3],
"pool_type": string("avg"), "pool_type": string("avg"),
"pool_stride": strides[2:4], "pool_stride": strides[1:3],
"pool_padding": string(pad_mode) "pool_padding": string(pad_mode),
"data_format": string("NHWC")
} }
node.fluid_code.add_layer("pool2d", node.fluid_code.add_layer("pool2d",
inputs=input, inputs=input,
output=node, output=node,
param_attr=attr) param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
def SplitV(self, node): def SplitV(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
num_sections = self.graph.get_node(node.layer.input[1], copy=True) num_sections = self.graph.get_node(node.layer.input[1], copy=True)
...@@ -638,14 +542,9 @@ class TFOpMapperNHWC(OpMapper): ...@@ -638,14 +542,9 @@ class TFOpMapperNHWC(OpMapper):
def Tile(self, node): def Tile(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
expand_times = self.graph.get_node(node.layer.input[1], copy=True) expand_times = self.graph.get_node(node.layer.input[1], copy=True)
self.add_omit_nodes(expand_times.layer_name, node.layer_name)
if expand_times.layer_type == "Const": if expand_times.layer_type == "Const":
expand_times = expand_times.value.tolist() expand_times = expand_times.value.tolist()
else: self.add_omit_nodes(expand_times.layer_name, node.layer_name)
expand_times = self.decoder.infer_shape_tensor(expand_times)
for i in range(len(expand_times)):
if expand_times[i] < 0:
expand_times[i] = 1
attr = {"expand_times": expand_times} attr = {"expand_times": expand_times}
node.fluid_code.add_layer("expand", node.fluid_code.add_layer("expand",
inputs=input, inputs=input,
...@@ -656,12 +555,26 @@ class TFOpMapperNHWC(OpMapper): ...@@ -656,12 +555,26 @@ class TFOpMapperNHWC(OpMapper):
inputs = [ inputs = [
self.graph.get_node(name, copy=True) for name in node.layer.input self.graph.get_node(name, copy=True) for name in node.layer.input
] ]
if len(inputs) == 1 and len(inputs[0].out_shapes[0]) == 0:
input_name = inputs[0].layer_name
if hasattr(inputs[0], "index"):
input_name += "[{}]".format(inputs[0].index)
node.fluid_code.add_note("{} = {}".format(node.layer_name,
input_name))
return
axis = node.get_attr("axis") axis = node.get_attr("axis")
attr = {"axis": axis} attr = {"axis": axis}
node.fluid_code.add_layer("stack", node.fluid_code.add_layer("stack",
inputs=inputs, inputs=inputs,
output=node, output=node,
param_attr=attr) param_attr=attr)
input_shape_sample = inputs[0].out_shapes[0]
if len(input_shape_sample) == 0:
attr = {"shape": [-1]}
node.fluid_code.add_layer("reshape",
inputs=node,
output=node,
param_attr=attr)
def Pad(self, node): def Pad(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
...@@ -669,36 +582,17 @@ class TFOpMapperNHWC(OpMapper): ...@@ -669,36 +582,17 @@ class TFOpMapperNHWC(OpMapper):
assert paddings.layer_type == "Const", "Padding should be Const" assert paddings.layer_type == "Const", "Padding should be Const"
self.add_omit_nodes(paddings.layer_name, node.layer_name) self.add_omit_nodes(paddings.layer_name, node.layer_name)
paddings = paddings.value.flatten().tolist() paddings = paddings.value.flatten().tolist()
data_format = input.tf_data_format
if len(input.out_shapes[0]) == 4: if len(input.out_shapes[0]) == 4:
new_padding = None new_padding = None
if input.tf_data_format == "NHWC": if paddings[0] + paddings[1] + paddings[6] + paddings[7] == 0:
if paddings[0] + paddings[1] + paddings[6] + paddings[7] == 0: new_padding = paddings[2:6]
new_padding = paddings[2:6]
else:
if paddings[0] + paddings[1] + paddings[2] + paddings[3] == 0:
new_padding = paddings[4:]
if new_padding is not None: if new_padding is not None:
if input.tf_data_format == "NHWC": attr = {"paddings": new_padding, "data_format": string("NHWC")}
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = {"paddings": new_padding}
node.fluid_code.add_layer("pad2d", node.fluid_code.add_layer("pad2d",
inputs=input, inputs=input,
output=node, output=node,
param_attr=attr) param_attr=attr)
if input.tf_data_format == "NHWC":
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
return return
attr = {"paddings": paddings} attr = {"paddings": paddings}
...@@ -711,21 +605,19 @@ class TFOpMapperNHWC(OpMapper): ...@@ -711,21 +605,19 @@ class TFOpMapperNHWC(OpMapper):
start = self.graph.get_node(node.layer.input[0], copy=True) start = self.graph.get_node(node.layer.input[0], copy=True)
limit = self.graph.get_node(node.layer.input[1], copy=True) limit = self.graph.get_node(node.layer.input[1], copy=True)
delta = self.graph.get_node(node.layer.input[2], copy=True) delta = self.graph.get_node(node.layer.input[2], copy=True)
self.add_omit_nodes(start.layer_name, node.layer_name) all_param_const = -2
self.add_omit_nodes(limit.layer_name, node.layer_name)
self.add_omit_nodes(delta.layer_name, node.layer_name)
if start.layer_type == "Const": if start.layer_type == "Const":
self.add_omit_nodes(start.layer_name, node.layer_name)
start = start.value start = start.value
else: all_param_const += 1
start = self.decoder.infer_tensor(start)
if limit.layer_type == "Const": if limit.layer_type == "Const":
self.add_omit_nodes(limit.layer_name, node.layer_name)
limit = limit.value limit = limit.value
else: all_param_const += 1
limit = self.decoder.infer_tensor(limit)
if delta.layer_type == "Const": if delta.layer_type == "Const":
self.add_omit_nodes(delta.layer_name, node.layer_name)
delta = delta.value delta = delta.value
else: all_param_const += 1
delta = self.decoder.infer_tensor(delta)
dtype = node.dtype dtype = node.dtype
inputs = { inputs = {
"start": start, "start": start,
...@@ -760,14 +652,24 @@ class TFOpMapperNHWC(OpMapper): ...@@ -760,14 +652,24 @@ class TFOpMapperNHWC(OpMapper):
inputs = {"x": x, "y": y} inputs = {"x": x, "y": y}
# fix paddle shape infer problem # fix paddle shape infer problem
# should be removed after paddle 1.6 # should be removed after paddle 1.6
if x.out_shapes[0][-1] < 0 and y.out_shapes[0][0] > 0: x_last_dim = x.out_shapes[0][-1]
shape = x.out_shapes[0] y_last_dim = y.out_shapes[0][0]
shape[-1] = y.out_shapes[0][0] certain_dim = x_last_dim if x_last_dim > y_last_dim else y_last_dim
attr = {"shape": shape} shape = x.out_shapes[0]
node.fluid_code.add_layer("reshape", shape[-1] = certain_dim
inputs=x, attr = {"shape": shape}
output=x, node.fluid_code.add_layer("reshape",
param_attr=attr) inputs=x,
output=x,
param_attr=attr)
shape = y.out_shapes[0]
shape[0] = certain_dim
attr = {"shape": shape}
node.fluid_code.add_layer("reshape",
inputs=y,
output=y,
param_attr=attr)
attr = {"transpose_x": transpose_a, "transpose_y": transpose_b} attr = {"transpose_x": transpose_a, "transpose_y": transpose_b}
node.fluid_code.add_layer("matmul", node.fluid_code.add_layer("matmul",
inputs=inputs, inputs=inputs,
...@@ -874,31 +776,32 @@ class TFOpMapperNHWC(OpMapper): ...@@ -874,31 +776,32 @@ class TFOpMapperNHWC(OpMapper):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
begin = self.graph.get_node(node.layer.input[1], copy=True) begin = self.graph.get_node(node.layer.input[1], copy=True)
size = self.graph.get_node(node.layer.input[2], copy=True) size = self.graph.get_node(node.layer.input[2], copy=True)
self.add_omit_nodes(begin.layer_name, node.layer_name) attr = dict()
self.add_omit_nodes(size.layer_name, node.layer_name) inputs = {"x": input}
if begin.layer_type == "Const": if begin.layer_type == "Const":
self.add_omit_nodes(begin.layer_name, node.layer_name)
begin = begin.value.tolist() begin = begin.value.tolist()
attr["offsets"] = begin
else: else:
begin = self.decoder.infer_tensor(begin).tolist() inputs["offsets"] = begin
if size.layer_type == "const": if size.layer_type == "Const":
self.add_omit_nodes(size.layer_name, node.layer_name)
size = size.value.tolist() size = size.value.tolist()
attr["shape"] = size
else: else:
size = self.decoder.infer_tensor(size).tolist() inputs["shape"] = size
for i in range(len(size)): if isinstance(begin, TFGraphNode) and begin.layer_type == "Pack":
if size[i] < 0: begin = process_pack_shape(self.graph, begin,
size[i] = 99999999 self.decoder.infer_shape_tensor(begin))
else: inputs["offsets"] = begin
size[i] = size[i] + begin[i] if isinstance(size, TFGraphNode) and size.layer_type == "Pack":
size = process_pack_shape(self.graph, size,
attr = { self.decoder.infer_shape_tensor(size))
"axes": [i for i in range(len(size))], inputs["shape"] = size
"starts": begin,
"ends": size node.fluid_code.add_layer("crop_tensor",
} inputs=inputs,
node.fluid_code.add_layer("slice",
inputs=input,
output=node, output=node,
param_attr=attr) param_attr=attr)
...@@ -919,10 +822,10 @@ class TFOpMapperNHWC(OpMapper): ...@@ -919,10 +822,10 @@ class TFOpMapperNHWC(OpMapper):
node.out_shapes[0]) node.out_shapes[0])
in_shape = input.out_shapes[0] in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2: if in_shape[3] < 0:
in_shape = self.decoder.infer_tensor(input).shape in_shape = self.decoder.infer_tensor(input).shape
k_size = kernel.out_shapes[0] k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2: if k_size.count(-1) > 0:
k_size = self.decoder.infer_tensor(kernel).shape k_size = self.decoder.infer_tensor(kernel).shape
pad_mode = node.get_attr("padding").decode() pad_mode = node.get_attr("padding").decode()
...@@ -933,41 +836,23 @@ class TFOpMapperNHWC(OpMapper): ...@@ -933,41 +836,23 @@ class TFOpMapperNHWC(OpMapper):
self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose( self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose(
kernel.value, (3, 2, 0, 1)) kernel.value, (3, 2, 0, 1))
if not channel_first:
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
input = node
else:
self.data_format_propagation(node)
attr = { attr = {
"bias_attr": False, "bias_attr": False,
"param_attr": string(kernel.layer_name), "param_attr": string(kernel.layer_name),
"num_filters": k_size[2], "num_filters": k_size[2],
"filter_size": k_size[0:2], "filter_size": k_size[0:2],
"stride": strides[2:4], "stride": strides[1:3],
"dilation": dilations[2:4], "dilation": dilations[1:3],
"output_size": out_shape[1:3], "output_size": out_shape[1:3],
"padding": string(pad_mode) "padding": string(pad_mode),
"data_format": string("NHWC")
} }
node.fluid_code.add_layer("conv2d_transpose", node.fluid_code.add_layer("conv2d_transpose",
inputs=input, inputs=input,
output=node, output=node,
param_attr=attr) param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
def Max(self, node): def Max(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
reduce_idx = self.graph.get_node(node.layer.input[1], copy=True) reduce_idx = self.graph.get_node(node.layer.input[1], copy=True)
...@@ -1038,56 +923,35 @@ class TFOpMapperNHWC(OpMapper): ...@@ -1038,56 +923,35 @@ class TFOpMapperNHWC(OpMapper):
def ResizeNearestNeighbor(self, node): def ResizeNearestNeighbor(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
resize_shape = self.graph.get_node(node.layer.input[1], copy=True) resize_shape = self.graph.get_node(node.layer.input[1], copy=True)
self.add_omit_nodes(resize_shape.layer_name, node.layer_name)
if resize_shape.layer_type == "Const": if resize_shape.layer_type == "Const":
self.add_omit_nodes(resize_shape.layer_name, node.layer_name)
resize_shape = resize_shape.value.tolist() resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
align_corners = node.get_attr("align_corners") align_corners = node.get_attr("align_corners")
attr = {"perm": [0, 3, 1, 2]} attr = {
node.fluid_code.add_layer("transpose", "align_corners": align_corners,
inputs=input, "out_shape": resize_shape,
output=node, "data_format": string("NHWC")
param_attr=attr) }
attr = {"align_corners": align_corners, "out_shape": resize_shape}
node.fluid_code.add_layer("resize_nearest", node.fluid_code.add_layer("resize_nearest",
inputs=node, inputs=input,
output=node,
param_attr=attr)
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node, output=node,
param_attr=attr) param_attr=attr)
def ResizeBilinear(self, node): def ResizeBilinear(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
resize_shape = self.graph.get_node(node.layer.input[1], copy=True) resize_shape = self.graph.get_node(node.layer.input[1], copy=True)
self.add_omit_nodes(resize_shape.layer_name, node.layer_name)
if resize_shape.layer_type == "Const": if resize_shape.layer_type == "Const":
self.add_omit_nodes(resize_shape.layer_name, node.layer_name)
resize_shape = resize_shape.value.tolist() resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
align_corners = node.get_attr("align_corners") align_corners = node.get_attr("align_corners")
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
attr = { attr = {
"align_corners": align_corners, "align_corners": align_corners,
"out_shape": resize_shape, "out_shape": resize_shape,
"align_mode": 1 "align_mode": 1,
"data_format": string("NHWC")
} }
node.fluid_code.add_layer("resize_bilinear", node.fluid_code.add_layer("resize_bilinear",
inputs=node, inputs=input,
output=node,
param_attr=attr)
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node, output=node,
param_attr=attr) param_attr=attr)
...@@ -1102,23 +966,21 @@ class TFOpMapperNHWC(OpMapper): ...@@ -1102,23 +966,21 @@ class TFOpMapperNHWC(OpMapper):
def RandomUniform(self, node): def RandomUniform(self, node):
shape = self.graph.get_node(node.layer.input[0], copy=True) shape = self.graph.get_node(node.layer.input[0], copy=True)
self.add_omit_nodes(shape.layer_name, node.layer_name)
if shape.layer_type == "Const": if shape.layer_type == "Const":
self.add_omit_nodes(shape.layer_name, node.layer_name)
shape = shape.value.tolist() shape = shape.value.tolist()
else: if not isinstance(shape, list):
shape = self.decoder.infer_shape_tensor(shape) attr = {"dtype": string("int64")}
attr = {"shape": shape, "min": 0.0, "max": 0.9999} node.fluid_code.add_layer("cast",
if shape[0] < 0: inputs=shape,
input = self.batch_node output=shape,
node.fluid_code.add_layer("uniform_random_batch_size_like",
inputs=input,
output=node,
param_attr=attr)
else:
node.fluid_code.add_layer("uniform_random",
inputs=None,
output=node,
param_attr=attr) param_attr=attr)
attr = {"min": 0.0, "max": 0.9999}
inputs = {"shape": shape}
node.fluid_code.add_layer("uniform_random",
inputs=inputs,
output=node,
param_attr=attr)
def SquaredDifference(self, node): def SquaredDifference(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True) x = self.graph.get_node(node.layer.input[0], copy=True)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册