提交 f2245c32 编写于 作者: M mamingjie-China

update

上级 15a8c186
...@@ -10,7 +10,7 @@ X2Paddle在多个主流的CV模型上,测试过TensorFlow/Caffe/ONNX模型的 ...@@ -10,7 +10,7 @@ X2Paddle在多个主流的CV模型上,测试过TensorFlow/Caffe/ONNX模型的
## 环境依赖 ## 环境依赖
python == 2.7 | python >= 3.5 python == 2.7 | python >= 3.5
paddlepaddle >= 1.6.0 paddlepaddle >= 1.5.0
**按需安装以下依赖** **按需安装以下依赖**
tensorflow : tensorflow == 1.14.0 tensorflow : tensorflow == 1.14.0
......
...@@ -188,7 +188,7 @@ def main(): ...@@ -188,7 +188,7 @@ def main():
if args.version: if args.version:
import x2paddle import x2paddle
print("x2paddle-{} with python>=3.5, paddlepaddle>=1.6.0\n".format( print("x2paddle-{} with python>=3.5, paddlepaddle>=1.5.0\n".format(
x2paddle.__version__)) x2paddle.__version__))
return return
...@@ -198,8 +198,8 @@ def main(): ...@@ -198,8 +198,8 @@ def main():
try: try:
import paddle import paddle
v0, v1, v2 = paddle.__version__.split('.') v0, v1, v2 = paddle.__version__.split('.')
if int(v0) != 1 or int(v1) < 6: if int(v0) != 1 or int(v1) < 5:
print("paddlepaddle>=1.6.0 is required") print("paddlepaddle>=1.5.0 is required")
return return
except: except:
print("paddlepaddle not installed, use \"pip install paddlepaddle\"") print("paddlepaddle not installed, use \"pip install paddlepaddle\"")
......
...@@ -278,7 +278,6 @@ class TFOpMapper(OpMapper): ...@@ -278,7 +278,6 @@ class TFOpMapper(OpMapper):
'name': string(node.layer_name), 'name': string(node.layer_name),
'append_batch_size': False 'append_batch_size': False
} }
if shape[0] < 0: if shape[0] < 0:
self.batch_node = node self.batch_node = node
...@@ -383,6 +382,7 @@ class TFOpMapper(OpMapper): ...@@ -383,6 +382,7 @@ class TFOpMapper(OpMapper):
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode() pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW" channel_first = data_format == "NCHW"
padding = 0
if not channel_first: if not channel_first:
in_shape = [in_shape[i] for i in [0, 3, 1, 2]] in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
...@@ -391,10 +391,22 @@ class TFOpMapper(OpMapper): ...@@ -391,10 +391,22 @@ class TFOpMapper(OpMapper):
else: else:
self.graph.data_format_propagation(node) self.graph.data_format_propagation(node)
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[2], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[3], strides[3])
pad_h = pad_h[0] + pad_h[1]
pad_w = pad_w[0] + pad_w[1]
if pad_h != 0 or pad_w != 0:
attr = {"paddings": [0, pad_h, 0, pad_w], "pad_value": -10000.0}
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"pool_size": k_size[2:4], "pool_size": k_size[2:4],
"pool_type": string("max"), "pool_type": string("max"),
"pool_padding": string(pad_mode), "pool_padding": padding,
"pool_stride": strides[2:4] "pool_stride": strides[2:4]
} }
node.fluid_code.add_layer("pool2d", node.fluid_code.add_layer("pool2d",
...@@ -420,6 +432,7 @@ class TFOpMapper(OpMapper): ...@@ -420,6 +432,7 @@ class TFOpMapper(OpMapper):
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode() pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW" channel_first = data_format == "NCHW"
padding = 0
self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose( self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose(
kernel.value, (3, 2, 0, 1)) kernel.value, (3, 2, 0, 1))
...@@ -431,6 +444,18 @@ class TFOpMapper(OpMapper): ...@@ -431,6 +444,18 @@ class TFOpMapper(OpMapper):
else: else:
self.graph.data_format_propagation(node) self.graph.data_format_propagation(node)
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[0], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[1], strides[3])
if pad_h[0] == pad_h[1] and pad_w[0] == pad_w[1]:
padding = [pad_h[0], pad_w[0]]
else:
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"bias_attr": False, "bias_attr": False,
"param_attr": string(kernel.layer_name), "param_attr": string(kernel.layer_name),
...@@ -438,7 +463,7 @@ class TFOpMapper(OpMapper): ...@@ -438,7 +463,7 @@ class TFOpMapper(OpMapper):
"filter_size": k_size[0:2], "filter_size": k_size[0:2],
"stride": strides[2:4], "stride": strides[2:4],
"dilation": dilations[2:4], "dilation": dilations[2:4],
"padding": string(pad_mode) "padding": padding
} }
node.fluid_code.add_layer("conv2d", node.fluid_code.add_layer("conv2d",
inputs=input, inputs=input,
...@@ -510,6 +535,7 @@ class TFOpMapper(OpMapper): ...@@ -510,6 +535,7 @@ class TFOpMapper(OpMapper):
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode() pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW" channel_first = data_format == "NCHW"
padding = 0
self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose( self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose(
kernel.value, (2, 3, 0, 1)) kernel.value, (2, 3, 0, 1))
...@@ -521,6 +547,19 @@ class TFOpMapper(OpMapper): ...@@ -521,6 +547,19 @@ class TFOpMapper(OpMapper):
else: else:
self.data_format_propagation(node) self.data_format_propagation(node)
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[0], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[1], strides[3])
if pad_h[0] == pad_h[1] and pad_w[0] == pad_w[1]:
padding = [pad_h[0], pad_w[0]]
else:
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"bias_attr": False, "bias_attr": False,
"param_attr": string(kernel.layer_name), "param_attr": string(kernel.layer_name),
...@@ -530,7 +569,7 @@ class TFOpMapper(OpMapper): ...@@ -530,7 +569,7 @@ class TFOpMapper(OpMapper):
"dilation": dilations[2:4], "dilation": dilations[2:4],
"groups": k_size[3] * in_shape[1], "groups": k_size[3] * in_shape[1],
"use_cudnn": False, "use_cudnn": False,
"padding": string(pad_mode) "padding": padding
} }
node.fluid_code.add_layer("conv2d", node.fluid_code.add_layer("conv2d",
inputs=input, inputs=input,
...@@ -652,9 +691,14 @@ class TFOpMapper(OpMapper): ...@@ -652,9 +691,14 @@ class TFOpMapper(OpMapper):
attr = { attr = {
"pool_size": k_size[2:4], "pool_size": k_size[2:4],
"pool_type": string("avg"), "pool_type": string("avg"),
"pool_stride": strides[2:4], "pool_stride": strides[2:4]
"pool_padding": string(pad_mode)
} }
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[2], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[3], strides[3])
assert pad_h[0] == pad_h[1] and pad_w[0] == pad_w[
1], "Cannot map AvgPool"
attr["pool_padding"] = [pad_h[0], pad_w[0]]
node.fluid_code.add_layer("pool2d", node.fluid_code.add_layer("pool2d",
inputs=input, inputs=input,
output=node, output=node,
...@@ -949,6 +993,20 @@ class TFOpMapper(OpMapper): ...@@ -949,6 +993,20 @@ class TFOpMapper(OpMapper):
else: else:
self.data_format_propagation(node) self.data_format_propagation(node)
padding = 0
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[0], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[1], strides[3])
if pad_h[0] == pad_h[1] and pad_w[0] == pad_w[1]:
padding = [pad_h[0], pad_w[0]]
else:
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"bias_attr": False, "bias_attr": False,
"param_attr": string(kernel.layer_name), "param_attr": string(kernel.layer_name),
...@@ -956,14 +1014,29 @@ class TFOpMapper(OpMapper): ...@@ -956,14 +1014,29 @@ class TFOpMapper(OpMapper):
"filter_size": k_size[0:2], "filter_size": k_size[0:2],
"stride": strides[2:4], "stride": strides[2:4],
"dilation": dilations[2:4], "dilation": dilations[2:4],
"padding": string(pad_mode), "padding": padding
"output_size": out_shape[1:3]
} }
node.fluid_code.add_layer("conv2d_transpose", node.fluid_code.add_layer("conv2d_transpose",
inputs=input, inputs=input,
output=node, output=node,
param_attr=attr) param_attr=attr)
if pad_mode == "SAME":
if node.tf_data_format == "NHWC":
out_shape = [out_shape[i] for i in [0, 3, 1, 2]]
for i in range(4):
if out_shape[i] < 0:
out_shape[i] = 999999
attr = {
"axes": [0, 1, 2, 3],
"starts": [0, 0, 0, 0],
"ends": out_shape
}
node.fluid_code.add_layer("slice",
inputs=node,
output=node,
param_attr=attr)
def Max(self, node): def Max(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
reduce_idx = self.graph.get_node(node.layer.input[1], copy=True) reduce_idx = self.graph.get_node(node.layer.input[1], copy=True)
......
...@@ -321,11 +321,22 @@ class TFOpMapperNHWC(OpMapper): ...@@ -321,11 +321,22 @@ class TFOpMapperNHWC(OpMapper):
k_size = [k_size[i] for i in [0, 3, 1, 2]] k_size = [k_size[i] for i in [0, 3, 1, 2]]
input = node input = node
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[2], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[3], strides[3])
pad_h = pad_h[0] + pad_h[1]
pad_w = pad_w[0] + pad_w[1]
attr = {"paddings": [0, pad_h, 0, pad_w], "pad_value": -10000.0}
if pad_h + pad_w != 0:
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"pool_size": k_size[2:4], "pool_size": k_size[2:4],
"pool_type": string("max"), "pool_type": string("max"),
"pool_stride": strides[2:4], "pool_stride": strides[2:4]
"pool_padding": string(pad_mode)
} }
node.fluid_code.add_layer("pool2d", node.fluid_code.add_layer("pool2d",
inputs=input, inputs=input,
...@@ -357,6 +368,7 @@ class TFOpMapperNHWC(OpMapper): ...@@ -357,6 +368,7 @@ class TFOpMapperNHWC(OpMapper):
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode() pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW" channel_first = data_format == "NCHW"
padding = 0
self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose( self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose(
kernel.value, (3, 2, 0, 1)) kernel.value, (3, 2, 0, 1))
...@@ -372,6 +384,18 @@ class TFOpMapperNHWC(OpMapper): ...@@ -372,6 +384,18 @@ class TFOpMapperNHWC(OpMapper):
param_attr=attr) param_attr=attr)
input = node input = node
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[0], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[1], strides[3])
if pad_h[0] == pad_h[1] and pad_w[0] == pad_w[1]:
padding = [pad_h[0], pad_w[0]]
else:
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"bias_attr": False, "bias_attr": False,
"param_attr": string(kernel.layer_name), "param_attr": string(kernel.layer_name),
...@@ -379,7 +403,7 @@ class TFOpMapperNHWC(OpMapper): ...@@ -379,7 +403,7 @@ class TFOpMapperNHWC(OpMapper):
"filter_size": k_size[0:2], "filter_size": k_size[0:2],
"stride": strides[2:4], "stride": strides[2:4],
"dilation": dilations[2:4], "dilation": dilations[2:4],
"padding": string(pad_mode) "padding": padding
} }
node.fluid_code.add_layer("conv2d", node.fluid_code.add_layer("conv2d",
inputs=input, inputs=input,
...@@ -466,6 +490,7 @@ class TFOpMapperNHWC(OpMapper): ...@@ -466,6 +490,7 @@ class TFOpMapperNHWC(OpMapper):
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode() pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW" channel_first = data_format == "NCHW"
padding = 0
self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose( self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose(
kernel.value, (2, 3, 0, 1)) kernel.value, (2, 3, 0, 1))
...@@ -481,6 +506,19 @@ class TFOpMapperNHWC(OpMapper): ...@@ -481,6 +506,19 @@ class TFOpMapperNHWC(OpMapper):
param_attr=attr) param_attr=attr)
input = node input = node
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[0], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[1], strides[3])
if pad_h[0] == pad_h[1] and pad_w[0] == pad_w[1]:
padding = [pad_h[0], pad_w[0]]
else:
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"bias_attr": False, "bias_attr": False,
"param_attr": string(kernel.layer_name), "param_attr": string(kernel.layer_name),
...@@ -490,7 +528,7 @@ class TFOpMapperNHWC(OpMapper): ...@@ -490,7 +528,7 @@ class TFOpMapperNHWC(OpMapper):
"dilation": dilations[2:4], "dilation": dilations[2:4],
"groups": k_size[3] * in_shape[1], "groups": k_size[3] * in_shape[1],
"use_cudnn": False, "use_cudnn": False,
"padding": string(pad_mode) "padding": padding
} }
node.fluid_code.add_layer("conv2d", node.fluid_code.add_layer("conv2d",
inputs=input, inputs=input,
...@@ -585,9 +623,14 @@ class TFOpMapperNHWC(OpMapper): ...@@ -585,9 +623,14 @@ class TFOpMapperNHWC(OpMapper):
attr = { attr = {
"pool_size": k_size[2:4], "pool_size": k_size[2:4],
"pool_type": string("avg"), "pool_type": string("avg"),
"pool_stride": strides[2:4], "pool_stride": strides[2:4]
"pool_padding": string(pad_mode)
} }
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[2], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[3], strides[3])
assert pad_h[0] == pad_h[1] and pad_w[0] == pad_w[
1], "Cannot map AvgPool"
attr["pool_padding"] = [pad_h[0], pad_w[0]]
node.fluid_code.add_layer("pool2d", node.fluid_code.add_layer("pool2d",
inputs=input, inputs=input,
output=node, output=node,
...@@ -947,6 +990,20 @@ class TFOpMapperNHWC(OpMapper): ...@@ -947,6 +990,20 @@ class TFOpMapperNHWC(OpMapper):
else: else:
self.data_format_propagation(node) self.data_format_propagation(node)
padding = 0
if pad_mode == "SAME":
pad_h = get_same_padding(in_shape[2], k_size[0], strides[2])
pad_w = get_same_padding(in_shape[3], k_size[1], strides[3])
if pad_h[0] == pad_h[1] and pad_w[0] == pad_w[1]:
padding = [pad_h[0], pad_w[0]]
else:
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
input = node
attr = { attr = {
"bias_attr": False, "bias_attr": False,
"param_attr": string(kernel.layer_name), "param_attr": string(kernel.layer_name),
...@@ -954,14 +1011,29 @@ class TFOpMapperNHWC(OpMapper): ...@@ -954,14 +1011,29 @@ class TFOpMapperNHWC(OpMapper):
"filter_size": k_size[0:2], "filter_size": k_size[0:2],
"stride": strides[2:4], "stride": strides[2:4],
"dilation": dilations[2:4], "dilation": dilations[2:4],
"padding": string(pad_mode), "padding": padding
"output_size": out_shape[1:3]
} }
node.fluid_code.add_layer("conv2d_transpose", node.fluid_code.add_layer("conv2d_transpose",
inputs=input, inputs=input,
output=node, output=node,
param_attr=attr) param_attr=attr)
if pad_mode == "SAME":
if node.tf_data_format == "NHWC":
out_shape = [out_shape[i] for i in [0, 3, 1, 2]]
for i in range(4):
if out_shape[i] < 0:
out_shape[i] = 999999
attr = {
"axes": [0, 1, 2, 3],
"starts": [0, 0, 0, 0],
"ends": out_shape
}
node.fluid_code.add_layer("slice",
inputs=node,
output=node,
param_attr=attr)
if not channel_first: if not channel_first:
attr = {"perm": [0, 2, 3, 1]} attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose", node.fluid_code.add_layer("transpose",
...@@ -1109,7 +1181,6 @@ class TFOpMapperNHWC(OpMapper): ...@@ -1109,7 +1181,6 @@ class TFOpMapperNHWC(OpMapper):
else: else:
shape = self.decoder.infer_shape_tensor(shape) shape = self.decoder.infer_shape_tensor(shape)
attr = {"shape": shape, "min": 0.0, "max": 0.9999} attr = {"shape": shape, "min": 0.0, "max": 0.9999}
if shape[0] < 0: if shape[0] < 0:
input = self.batch_node input = self.batch_node
node.fluid_code.add_layer("uniform_random_batch_size_like", node.fluid_code.add_layer("uniform_random_batch_size_like",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册