提交 bbc964e8 编写于 作者: C Channingss

Merge branch 'develop' into paddle_onnx

# X2Paddle支持OP列表
> 目前X2Paddle支持40+的TensorFlow OP,30+的Caffe Layer,覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。
> 目前X2Paddle支持50+的TensorFlow OP,30+的Caffe Layer,覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。
**注:** 目前,部分OP暂未支持,如您在转换过程中出现OP不支持的情况,可自行添加或反馈给我们。欢迎通过[ISSUE反馈](https://github.com/PaddlePaddle/X2Paddle/issues/new)的方式告知我们(模型名,代码实现或模型获取方式),我们会及时跟进:)
......@@ -20,7 +20,7 @@
| 41 | Cast | 42 | Split | 43 | Squeeze | 44 | ResizeNearestNeighbor |
| 45 | Softmax | 46 | Range | 47 | ConcatV2 | 48 | MirrorPad |
| 49 | Identity | 50 | GreaterEqual | 51 | StopGradient | 52 | Minimum |
| 53 | RadnomUniform | | | | | | |
| 53 | RadnomUniform | 54 | Fill | 55 | Floor | 56 | DepthToSpace |
## Caffe
......
......@@ -42,7 +42,6 @@ class PaddleOpMapper(object):
op_nodes = list()
input_nodes = list()
output_nodes = list()
unsupported_ops = set()
print("Translating PaddlePaddle to ONNX...\n")
......@@ -185,6 +184,41 @@ class PaddleOpMapper(object):
alpha=op.attr('alpha'))
return node
def swish(self, op, block):
"""
The activation swish, y = x / (1 + exp(-beta * x))
"""
beta = op.attr('beta')
beta_name = self.get_name(op.type, 'beta')
beta_node = onnx.helper.make_node(
'Constant',
name=beta_name,
inputs=[],
outputs=[beta_name],
value=onnx.helper.make_tensor(
name=beta_name,
data_type=onnx.TensorProto.FLOAT,
dims=(),
vals=[beta]))
beta_x_name = self.get_name(op.type, 'beta_x')
beta_x_node = onnx.helper.make_node(
'Mul',
name=beta_x_name,
inputs=[op.input('X')[0], beta_name],
outputs=[beta_x_name])
sigmoid_name = self.get_name(op.type, 'sigmoid')
sigmoid_node = onnx.helper.make_node(
'Sigmoid',
name=sigmoid_name,
inputs=[beta_x_name],
outputs=[sigmoid_name])
swish_node = onnx.helper.make_node(
'Mul',
inputs=[op.input('X')[0], sigmoid_name],
outputs=op.output('Out'))
return [beta_node, beta_x_node, sigmoid_node, swish_node]
def elementwise_add(self, op, block):
axis = op.attr('axis')
x_shape = block.var(op.input('X')[0]).shape
......
......@@ -46,7 +46,8 @@ class TFOpMapperNHWC(OpMapper):
'Softplus': ['softplus'],
'LeakyRelu': ['leaky_relu', {
'alpha': 'alpha'
}]
}],
'Floor': ['floor']
}
elementwise_ops = {
'Add': 'elementwise_add',
......@@ -54,6 +55,7 @@ class TFOpMapperNHWC(OpMapper):
'RealDiv': 'elementwise_div',
'Sub': 'elementwise_sub',
'Maximum': 'elementwise_max',
'Minimum': 'elementwise_min',
'Mul': 'elementwise_mul',
'FloorDiv': 'elementwise_floordiv'
}
......@@ -95,8 +97,9 @@ class TFOpMapperNHWC(OpMapper):
func = getattr(self, op)
try:
func(node)
except:
except Exception as e:
unsupported_ops.add(op)
print(e)
else:
unsupported_ops.add(op)
if len(unsupported_ops) > 0:
......@@ -147,91 +150,9 @@ class TFOpMapperNHWC(OpMapper):
op_type = self.elementwise_ops[node.layer_type]
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0]
if len(x_shape) == 0:
x_shape = [1]
if len(y_shape) == 0:
y_shape = [1]
# incomplement broadcasting support for paddle
x_input = x
y_input = y
if len(x_shape) < len(y_shape):
unrevertable_ops = [
"elementwise_sub", "elementwise_div", "elementwise_floordiv",
"elementwise_mod", "elementwise_pow"
]
if op_type not in unrevertable_ops:
x_input = y
y_input = x
x_shape = y.out_shapes[0]
if len(x_shape) == 0:
x_shape = [1]
y_shape = x.out_shapes[0]
if len(y_shape) == 0:
y_shape = [1]
else:
raise Exception("Unexpected situation happend")
if len(x_shape) == 4 and len(y_shape) == 1:
inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer(op_type, inputs=inputs, output=node)
return
is_sub_seq = True
for i in range(len(y_shape)):
index = -1 * i - 1
if y_shape[index] != x_shape[index]:
is_sub_seq = False
if not is_sub_seq:
x_expand_times = [1] * len(x_shape)
y_expand_times = [1] * len(y_shape)
x_need_expand = False
y_need_expand = False
for i in range(len(y_shape)):
index = -1 * i - 1
if y_shape[index] != x_shape[index]:
if y_shape[index] == 1:
y_expand_times[index] = x_shape[index]
y_need_expand = True
elif x_shape[index] == 1:
x_expand_times[index] = y_shape[index]
x_need_expand = True
else:
raise Exception("Unexpected situation happend")
if x_need_expand:
attr = {"expand_times": x_expand_times}
node.fluid_code.add_layer(
"expand", inputs=x_input, output="x_tmp", param_attr=attr)
x_input = "x_tmp"
if y_need_expand:
attr = {"expand_times": y_expand_times}
node.fluid_code.add_layer(
"expand", inputs=y_input, output="y_tmp", param_attr=attr)
y_input = "y_tmp"
if len(x_shape) == 4 and len(y_shape) == 4:
node.fluid_code.add_layer(
"transpose",
inputs=x_input,
output=x_input,
param_attr={'perm': [0, 3, 1, 2]})
node.fluid_code.add_layer(
"transpose",
inputs=y_input,
output=y_input,
param_attr={'perm': [0, 3, 1, 2]})
inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=None)
node.fluid_code.add_layer(
"transpose",
inputs=node,
output=node,
param_attr={'perm': [0, 2, 3, 1]})
else:
inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=None)
inputs = {"x": x, "y": y}
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=None)
def Placeholder(self, node):
shape = node.out_shapes[0]
......@@ -283,12 +204,54 @@ class TFOpMapperNHWC(OpMapper):
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
def MaxPool(self, node):
def Fill(self, node):
dims = self.graph.get_node(node.layer.input[0], copy=True)
input_value = self.graph.get_node(node.layer.input[1], copy=True)
assert input_value.layer_type == "Const", "Value of fill OP should be Const"
self.add_omit_nodes(input_value.layer_name, node.layer_name)
input_value = input_value.value
input_dtype = string(input_value.dtype)
attr = {'value': input_value, 'dtype': input_dtype}
node.fluid_code.add_layer(
"fill_constant", inputs=dims, output=node, param_attr=attr)
def DepthToSpace(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
block_size = node.get_attr("block_size")
data_format = node.get_attr("data_format").decode()
if data_format == "NHWC":
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer(
"transpose", inputs=input, output=input, param_attr=attr)
n, h, w, c = input.out_shapes[0]
attr = {'shape': [0, block_size * block_size, -1, h, w]}
node.fluid_code.add_layer(
"reshape", inputs=input, output=input, param_attr=attr)
attr = {'perm': [0, 2, 1, 3, 4]}
node.fluid_code.add_layer(
"transpose", inputs=input, output=input, param_attr=attr)
attr = {'shape': [0, c, h, w]}
node.fluid_code.add_layer(
"reshape", inputs=input, output=input, param_attr=attr)
attr = {'upscale_factor': block_size}
node.fluid_code.add_layer(
"pixel_shuffle", inputs=input, output=node, param_attr=attr)
if data_format == "NHWC":
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
def MaxPool(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
k_size = node.get_attr("ksize")
strides = node.get_attr("strides")
......@@ -300,7 +263,6 @@ class TFOpMapperNHWC(OpMapper):
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
k_size = [k_size[i] for i in [0, 3, 1, 2]]
input = node
......@@ -324,13 +286,7 @@ class TFOpMapperNHWC(OpMapper):
kernel = self.graph.get_node(node.layer.input[1], copy=True)
self.add_omit_nodes(kernel.layer_name, node.layer_name)
in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2:
k_size = self.decoder.infer_tensor(kernel).shape
strides = node.get_attr("strides")
dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode()
......@@ -339,23 +295,27 @@ class TFOpMapperNHWC(OpMapper):
if kernel.layer_type == 'Const':
kernel_value = kernel.value
kernel_weight_name = kernel.layer_name.replace('/', '_')
else:
kernel_value = self.decoder.infer_tensor(kernel)
self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose(
kernel_value, (3, 2, 0, 1))
if kernel.layer_type == 'Split':
kernel_weight_name = "{}_{}_kernel".format(node.layer_name,
kernel.layer_name)
else:
kernel_weight_name = kernel.layer_name.replace('/', '_')
self.weights[kernel_weight_name] = numpy.transpose(kernel_value,
(3, 2, 0, 1))
if not channel_first:
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
input = node
attr = {
"bias_attr": False,
"param_attr": string(kernel.layer_name),
"param_attr": string(kernel_weight_name),
"num_filters": k_size[3],
"filter_size": k_size[0:2],
"stride": strides[2:4],
......@@ -366,7 +326,6 @@ class TFOpMapperNHWC(OpMapper):
if hasattr(node, 'dilation') and attr['dilation'] == [1, 1]:
if len(node.dilation) == 1:
attr['dilation'] = [1, node.dilation[0]]
node.fluid_code.add_layer(
"conv2d", inputs=input, output=node, param_attr=attr)
if not channel_first:
......@@ -429,12 +388,7 @@ class TFOpMapperNHWC(OpMapper):
self.add_omit_nodes(kernel.layer_name, node.layer_name)
in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2:
k_size = self.decoder.infer_tensor(kernel).shape
strides = node.get_attr("strides")
dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode()
......@@ -475,61 +429,25 @@ class TFOpMapperNHWC(OpMapper):
def Reshape(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
param = self.graph.get_node(node.layer.input[1], copy=True)
is_variable = False
if param.layer_type == "Const":
attr = {"shape": param.value.tolist()}
self.add_omit_nodes(param.layer_name, node.layer_name)
shape = param.value.tolist()
else:
# Here is a trick method to solove tensor parameter in tensorflow
shape = self.decoder.infer_shape_tensor(param, node.out_shapes[0])
if shape.count(-1) <= 1:
attr = {"shape": shape}
self.add_omit_nodes(param.layer_name, node.layer_name)
else:
assert len(param.out_shapes[
0]) == 1, "Unexpected situation of shape parameter"
attr = {"shape": [-1]}
shape = param
inputs = {"x": input, "shape": shape}
node.fluid_code.add_layer(
"reshape", inputs=inputs, output=node, param_attr=None)
if param.layer_type != "Const":
out_shape = numpy.array(node.out_shapes[0])
if (out_shape > 0).any():
out_shape[out_shape < 0] = 0
attr = {'shape': out_shape.tolist()}
node.fluid_code.add_layer(
"reshape",
inputs=param,
output="shape_param",
param_attr=attr)
attr = {"num_or_sections": param.out_shapes[0][0], "dim": 0}
node.fluid_code.add_layer(
"split", inputs="shape_param", output=node, param_attr=attr)
new_param = "["
for i in range(param.out_shapes[0][0]):
new_param += (node.layer_name + "[{}]".format(i) + ", ")
new_param = new_param.strip(", ") + "]"
attr = {"shape": new_param}
is_variable = True
# to change [192, -1]->[-1, 192], allways put -1 in the first dimension
# optimization for Paddle-Lite
in_shape = input.out_shapes[0]
if not is_variable and in_shape.count(-1) < 1:
total_size = 1
for i in range(len(in_shape)):
total_size *= in_shape[i]
for i in range(len(attr["shape"])):
if attr["shape"][i] == 0:
attr["shape"][i] = in_shape[i]
if attr["shape"][i] != -1:
total_size /= attr["shape"][i]
if attr["shape"].count(-1) > 0:
index = attr["shape"].index(-1)
attr["shape"][index] = int(total_size)
attr["shape"][0] = -1
node.fluid_code.add_layer(
"reshape", inputs=input, output=node, param_attr=attr)
"reshape", inputs=node, output=node, param_attr=attr)
def AvgPool(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = node.get_attr("ksize")
strides = node.get_attr("strides")
data_format = node.get_attr("data_format").decode()
......@@ -537,7 +455,6 @@ class TFOpMapperNHWC(OpMapper):
channel_first = data_format == "NCHW"
if not channel_first:
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
k_size = [k_size[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
......@@ -586,7 +503,6 @@ class TFOpMapperNHWC(OpMapper):
axis = axis.value
if axis < 0:
axis += len(inputs[0].out_shapes[0])
attr = {"axis": axis}
node.fluid_code.add_layer(
"concat", inputs=inputs, output=node, param_attr=attr)
......@@ -594,25 +510,38 @@ class TFOpMapperNHWC(OpMapper):
def Tile(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
expand_times = self.graph.get_node(node.layer.input[1], copy=True)
self.add_omit_nodes(expand_times.layer_name, node.layer_name)
if expand_times.layer_type == "Const":
self.add_omit_nodes(expand_times.layer_name, node.layer_name)
expand_times = expand_times.value.tolist()
else:
expand_times = self.decoder.infer_shape_tensor(expand_times)
for i in range(len(expand_times)):
if expand_times[i] < 0:
expand_times[i] = 1
attr = {"expand_times": expand_times}
expand_times = expand_times
inputs = {"x": input, "expand_times": expand_times}
node.fluid_code.add_layer(
"expand", inputs=input, output=node, param_attr=attr)
"expand", inputs=inputs, output=node, param_attr=None)
def Pack(self, node):
inputs = [
self.graph.get_node(
name, copy=True) for name in node.layer.input
]
reshape_shape = list()
for input_node in inputs:
k_size = input_node.out_shapes[0]
if len(k_size) and k_size[-1] != -1:
reshape_shape = [0] * len(k_size)
reshape_shape[-1] = k_size[-1]
break
if len(reshape_shape):
for i, input_node in enumerate(inputs):
node.fluid_code.add_layer(
"reshape",
inputs=input_node,
output='tmp_{}'.format(i),
param_attr={"shape": reshape_shape})
axis = node.get_attr("axis")
attr = {"axis": axis}
if len(reshape_shape):
inputs = ['tmp_{}'.format(i) for i in range(len(inputs))]
node.fluid_code.add_layer(
"stack", inputs=inputs, output=node, param_attr=attr)
......@@ -656,21 +585,17 @@ class TFOpMapperNHWC(OpMapper):
start = self.graph.get_node(node.layer.input[0], copy=True)
limit = self.graph.get_node(node.layer.input[1], copy=True)
delta = self.graph.get_node(node.layer.input[2], copy=True)
self.add_omit_nodes(start.layer_name, node.layer_name)
self.add_omit_nodes(limit.layer_name, node.layer_name)
self.add_omit_nodes(delta.layer_name, node.layer_name)
if start.layer_type == "Const":
self.add_omit_nodes(start.layer_name, node.layer_name)
start = start.value
else:
start = self.decoder.infer_tensor(start)
if limit.layer_type == "Const":
self.add_omit_nodes(limit.layer_name, node.layer_name)
limit = limit.value
else:
limit = self.decoder.infer_tensor(limit)
if delta.layer_type == "Const":
self.add_omit_nodes(delta.layer_name, node.layer_name)
delta = delta.value
else:
delta = self.decoder.infer_tensor(delta)
dtype = node.dtype
inputs = {
"start": start,
......@@ -802,31 +727,27 @@ class TFOpMapperNHWC(OpMapper):
input = self.graph.get_node(node.layer.input[0], copy=True)
begin = self.graph.get_node(node.layer.input[1], copy=True)
size = self.graph.get_node(node.layer.input[2], copy=True)
self.add_omit_nodes(begin.layer_name, node.layer_name)
self.add_omit_nodes(size.layer_name, node.layer_name)
if begin.layer_type == "Const":
self.add_omit_nodes(begin.layer_name, node.layer_name)
begin = begin.value.tolist()
else:
begin = self.decoder.infer_tensor(begin).tolist()
if size.layer_type == "const":
begin = begin
shape = begin.out_shapes[0]
attr = {"shape": shape}
node.fluid_code.add_layer(
"reshape", inputs=begin, output=begin, param_attr=attr)
if size.layer_type == "Const":
self.add_omit_nodes(size.layer_name, node.layer_name)
size = size.value.tolist()
else:
size = self.decoder.infer_tensor(size).tolist()
for i in range(len(size)):
if size[i] < 0:
size[i] = 99999999
else:
size[i] = size[i] + begin[i]
attr = {
"axes": [i for i in range(len(size))],
"starts": begin,
"ends": size
}
size = size
shape = size.out_shapes[0]
attr = {"shape": shape}
node.fluid_code.add_layer(
"reshape", inputs=size, output=size, param_attr=attr)
inputs = {"x": input, "offsets": begin, "shape": size}
node.fluid_code.add_layer(
"slice", inputs=input, output=node, param_attr=attr)
"crop_tensor", inputs=inputs, output=node, param_attr=None)
def Conv2DBackpropInput(self, node):
out_shape = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -946,19 +867,27 @@ class TFOpMapperNHWC(OpMapper):
def ResizeNearestNeighbor(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
resize_shape = self.graph.get_node(node.layer.input[1], copy=True)
self.add_omit_nodes(resize_shape.layer_name, node.layer_name)
if resize_shape.layer_type == "Const":
self.add_omit_nodes(resize_shape.layer_name, node.layer_name)
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(resize_shape,
node.out_shapes[0])
resize_shape = resize_shape
shape = resize_shape.out_shapes[0]
attr = {"shape": shape}
node.fluid_code.add_layer(
"reshape",
inputs=resize_shape,
output=resize_shape,
param_attr=attr)
align_corners = node.get_attr("align_corners")
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
attr = {"align_corners": align_corners, "out_shape": resize_shape}
inputs = {"input": node, "out_shape": resize_shape}
attr = {"align_corners": align_corners}
node.fluid_code.add_layer(
"resize_nearest", inputs=node, output=node, param_attr=attr)
"resize_nearest", inputs=inputs, output=node, param_attr=attr)
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
......@@ -966,23 +895,29 @@ class TFOpMapperNHWC(OpMapper):
def ResizeBilinear(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
resize_shape = self.graph.get_node(node.layer.input[1], copy=True)
self.add_omit_nodes(resize_shape.layer_name, node.layer_name)
if resize_shape.layer_type == "Const":
self.add_omit_nodes(resize_shape.layer_name, node.layer_name)
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(resize_shape,
node.out_shapes[0])
shape = resize_shape.out_shapes[0]
attr = {"shape": shape}
node.fluid_code.add_layer(
"reshape",
inputs=resize_shape,
output=resize_shape,
param_attr=attr)
align_corners = node.get_attr("align_corners")
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
inputs = {"input": node, "out_shape": resize_shape}
attr = {
#"out_shape": resize_shape,
"align_corners": align_corners,
"out_shape": resize_shape,
"align_mode": 1
}
node.fluid_code.add_layer(
"resize_bilinear", inputs=node, output=node, param_attr=attr)
"resize_bilinear", inputs=inputs, output=node, param_attr=attr)
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
......@@ -996,23 +931,15 @@ class TFOpMapperNHWC(OpMapper):
def RandomUniform(self, node):
shape = self.graph.get_node(node.layer.input[0], copy=True)
self.add_omit_nodes(shape.layer_name, node.layer_name)
if shape.layer_type == "Const":
self.add_omit_nodes(shape.layer_name, node.layer_name)
shape = shape.value.tolist()
else:
shape = self.decoder.infer_shape_tensor(shape)
attr = {"shape": shape, "min": 0.0, "max": 0.9999}
shape = shape
attr = {"min": 0.0, "max": 0.9999}
if shape[0] < 0:
input = self.batch_node
node.fluid_code.add_layer(
"uniform_random_batch_size_like",
inputs=input,
output=node,
param_attr=attr)
else:
node.fluid_code.add_layer(
"uniform_random", inputs=None, output=node, param_attr=attr)
node.fluid_code.add_layer(
"uniform_random", inputs=shape, output=node, param_attr=attr)
def SquaredDifference(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1028,11 +955,11 @@ class TFOpMapperNHWC(OpMapper):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
if y.layer_type == 'Const':
self.add_omit_nodes(y.layer_name, node.layer_name)
dim = y.value.tolist()
attr = {'axes': [dim]}
else:
dim = self.decoder.infer_tensor(y)
self.add_omit_nodes(y.layer_name, node.layer_name)
attr = {'axes': [dim]}
attr = {'axes': y}
node.fluid_code.add_layer(
"unsqueeze", inputs=x, output=node, param_attr=attr)
......
......@@ -236,26 +236,18 @@ class TFOptimizer(object):
def remove_transpose(self):
graph_copy = cp.deepcopy(self.graph)
nhwc_insensitive_ops = [
'Relu', 'Relu6', 'Abs', 'Sigmoid', 'Exp', 'Rsqrt', 'swish_f32',
'LeakyRelu', 'Cast', 'Tanh'
]
elementwise_ops = [
'Sub', 'Add', 'RealDiv', 'Maximum', 'Mul', 'FloorDiv',
'GreaterEqual'
]
optimize_ops = [
'Conv2D', 'MaxPool', 'FusedBatchNorm', 'DepthwiseConv2dNative',
'AvgPool', 'Pad', 'Conv2DBackpropInput', 'ResizeNearestNeighbor',
'ResizeBilinear', "Placeholder"
'GreateerEqual'
]
can_be_optimized_ops = [
'Conv2D', 'MaxPool', 'FusedBatchNorm', 'DepthwiseConv2dNative',
'AvgPool', 'Pad', 'Conv2DBackpropInput', 'ResizeNearestNeighbor',
'ResizeBilinear', "Placeholder", 'Relu', 'Relu6', 'Abs', 'Sigmoid',
'Exp', 'Rsqrt', 'swish_f32', 'LeakyRelu', 'Cast', 'Tanh'
'Placeholder', 'Relu', 'Relu6', 'Abs', 'Sigmoid', 'Exp', 'Rsqrt',
'swish_f32', 'LeakyRelu', 'Cast', 'Tanh'
]
# These ops may have one more Variable input
can_be_optimized_special_ops = ['ResizeBilinear']
for node_name in self.graph.topo_sort:
node = graph_copy.get_node(node_name)
if node is None:
......@@ -278,9 +270,10 @@ class TFOptimizer(object):
0].param_attr["perm"] != [0, 3, 1, 2]:
can_be_removed = False
break
elif out_node.layer_type in elementwise_ops:
elif out_node.layer_type in elementwise_ops or out_node.layer_type in can_be_optimized_special_ops:
can_be_removed = False
break
if can_be_removed and len(node.fluid_code.layers) > 1:
true_node = self.graph.get_node(node_name)
if true_node.layer_type == "Placeholder":
......@@ -298,6 +291,7 @@ class TFOptimizer(object):
-2].output = true_node.fluid_code.layers[-1].output
node.removed = True
del true_node.fluid_code.layers[-1]
for out_name in output_names:
out_node = self.graph.get_node(out_name)
out_node.fluid_code.layers[
......
# X2Paddle模型测试库
> 目前X2Paddle支持40+的TensorFlow OP,40+的Caffe Layer,覆盖了大部分CV分类模型常用的操作。我们在如下模型列表中测试了X2Paddle的转换。
> 目前X2Paddle支持50+的TensorFlow OP,40+的Caffe Layer,覆盖了大部分CV分类模型常用的操作。我们在如下模型列表中测试了X2Paddle的转换。
**注:** 受限于不同框架的差异,部分模型可能会存在目前无法转换的情况,如TensorFlow中包含控制流的模型,NLP模型等。对于CV常见的模型,如若您发现无法转换或转换失败,存在较大diff等问题,欢迎通过[ISSUE反馈](https://github.com/PaddlePaddle/X2Paddle/issues/new)的方式告知我们(模型名,代码实现或模型获取方式),我们会及时跟进:)
......@@ -22,7 +22,8 @@
| UNet | [code1](https://github.com/jakeret/tf_unet )/[code2](https://github.com/lyatdawn/Unet-Tensorflow) |-|
|MTCNN | [code](https://github.com/AITTSMD/MTCNN-Tensorflow) |-|
|YOLO-V3| [code](https://github.com/YunYang1994/tensorflow-yolov3) | 转换需要关闭NHWC->NCHW的优化,见[文档Q2](FAQ.md) |
|Inception_ResNet_V2| [code](https://github.com/tensorflow/models/tree/master/research/slim/nets) | - |
| FALSR | [code](https://github.com/xiaomi-automl/FALSR) | - |
| DCSCN | [code](https://modelzoo.co/model/dcscn-super-resolution) | - |
## Caffe
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册