提交 2921ad4e 编写于 作者: J jiangjiajun@baidu.com

support tdnn

上级 64233f6b
......@@ -90,11 +90,13 @@ def tf2paddle(model_path,
version = tf.__version__
if version >= '2.0.0' or version < '1.0.0':
print(
"1.0.0<=tensorflow<2.0.0 is required, and v1.14.0 is recommended"
"[ERROR] 1.0.0<=tensorflow<2.0.0 is required, and v1.14.0 is recommended"
)
return
except:
print("Tensorflow is not installed, use \"pip install tensorflow\".")
print(
"[ERROR] Tensorflow is not installed, use \"pip install tensorflow\"."
)
return
from x2paddle.decoder.tf_decoder import TFDecoder
......@@ -140,7 +142,7 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto, params_merge=False):
if (int(ver_part[0]) == 3 and int(ver_part[1]) >= 6) \
or (int(ver_part[0]) > 3):
version_satisfy = True
assert version_satisfy, 'google.protobuf >= 3.6.0 is required'
assert version_satisfy, '[ERROR] google.protobuf >= 3.6.0 is required'
print("Now translating model from caffe to paddle.")
model = CaffeDecoder(proto, weight, caffe_proto)
mapper = CaffeOpMapper(model)
......@@ -156,10 +158,10 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
import onnx
version = onnx.version.version
if version != '1.6.0':
print("onnx==1.6.0 is required")
print("[ERROR] onnx==1.6.0 is required")
return
except:
print("onnx is not installed, use \"pip install onnx==1.6.0\".")
print("[ERROR] onnx is not installed, use \"pip install onnx==1.6.0\".")
return
print("Now translating model from onnx to paddle.")
......@@ -199,21 +201,23 @@ def main():
import onnxruntime as rt
version = rt.__version__
if version != '1.0.0':
print("onnxruntime==1.0.0 is required")
print("[ERROR] onnxruntime==1.0.0 is required")
return
except:
print(
"onnxruntime is not installed, use \"pip install onnxruntime==1.0.0\"."
"[ERROR] onnxruntime is not installed, use \"pip install onnxruntime==1.0.0\"."
)
try:
import paddle
v0, v1, v2 = paddle.__version__.split('.')
if int(v0) != 1 or int(v1) < 6:
print("paddlepaddle>=1.6.0 is required")
print("[ERROR] paddlepaddle>=1.6.0 is required")
return
except:
print("paddlepaddle not installed, use \"pip install paddlepaddle\"")
print(
"[ERROR] paddlepaddle not installed, use \"pip install paddlepaddle\""
)
if args.framework == "tensorflow":
assert args.model is not None, "--model should be defined while translating tensorflow model"
......
......@@ -36,6 +36,8 @@ class Layer(object):
if self.is_custom_layer:
layer_code = layer_code + self.op + "("
elif self.op == "=":
layer_code = layer_code
else:
layer_code = layer_code + "fluid.layers." + self.op + "("
......@@ -70,11 +72,15 @@ class Layer(object):
elif isinstance(self.inputs, GraphNode):
if hasattr(self.inputs, "index"):
layer_code += (self.inputs.layer_name +
"[{}]".format(self.inputs.index) + ", ")
"[{}]".format(self.inputs.index))
else:
layer_code += (self.inputs.layer_name + ", ")
layer_code += (self.inputs.layer_name)
if self.op != "=":
layer_code += ", "
elif isinstance(self.inputs, six.string_types):
layer_code += (self.inputs + ", ")
layer_code += (self.inputs)
if self.op != "=":
layer_code += ", "
else:
raise Exception("Unknown type of inputs.")
......@@ -85,7 +91,9 @@ class Layer(object):
layer_code = layer_code + key + "={}, ".format(value)
layer_code = layer_code.strip(", ")
return layer_code + ")"
if self.op != "=":
layer_code += ")"
return layer_code
class FluidCode(object):
......
......@@ -136,6 +136,7 @@ class TFGraph(Graph):
# tensorflow graph optimize
self._remove_isolated_node()
self._optimize_dialiation_conv()
self._remove_identity_node()
self._remove_cast_node()
......@@ -175,6 +176,34 @@ class TFGraph(Graph):
idx = self.topo_sort.index(node_name)
del self.topo_sort[idx]
def _optimize_dialiation_conv(self):
for name in list(self.node_map.keys()):
node = self.node_map[name]
if node.layer_type == "SpaceToBatchND":
is_dilation = True
out_node0 = self.node_map[node.outputs[0]]
if out_node0.layer_type != 'ExpandDims':
is_dilation = False
continue
out_node1 = self.node_map[out_node0.outputs[0]]
if out_node1.layer_type != 'Conv2D':
is_dilation = False
continue
out_node2 = self.node_map[out_node1.outputs[0]]
if out_node2.layer_type != 'Squeeze':
is_dilation = False
continue
out_node3 = self.node_map[out_node2.outputs[0]]
if out_node3.layer_type != 'BatchToSpaceND':
is_dilation = False
continue
if is_dilation:
node.skip = True
out_node3.skip = True
block_shape = self.node_map[node.inputs[1]]
out_node1.dilation = block_shape.value.tolist()
def _remove_isolated_node(self):
# delete isolated nodes
isolated_nodes = list()
......
......@@ -40,6 +40,7 @@ class TFOpMapperNHWC(OpMapper):
'Sigmoid': ['sigmoid'],
'Exp': ['exp'],
'Rsqrt': ['rsqrt'],
'Sqrt': ['sqrt'],
'swish_f32': ['swish'],
'Tanh': ['tanh'],
'LeakyRelu': ['leaky_relu', {
......@@ -48,6 +49,7 @@ class TFOpMapperNHWC(OpMapper):
}
elementwise_ops = {
'Add': 'elementwise_add',
'AddV2': 'elementwise_add',
'RealDiv': 'elementwise_div',
'Sub': 'elementwise_sub',
'Maximum': 'elementwise_max',
......@@ -90,10 +92,12 @@ class TFOpMapperNHWC(OpMapper):
if len(unsupported_ops) > 0:
continue
func = getattr(self, op)
try:
func(node)
except:
unsupported_ops.add(op)
else:
unsupported_ops.add(op)
continue
if len(unsupported_ops) > 0:
print("========= {} OPs are not supported yet ===========".format(
len(unsupported_ops)))
......@@ -342,7 +346,6 @@ class TFOpMapperNHWC(OpMapper):
def Conv2D(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
kernel = self.graph.get_node(node.layer.input[1], copy=True)
assert kernel.layer_type == "Const", "Kernel of Conv2D should be Const"
self.add_omit_nodes(kernel.layer_name, node.layer_name)
in_shape = input.out_shapes[0]
......@@ -358,8 +361,12 @@ class TFOpMapperNHWC(OpMapper):
pad_mode = node.get_attr("padding").decode()
channel_first = data_format == "NCHW"
if kernel.layer_type == 'Const':
kernel_value = kernel.value
else:
kernel_value = self.decoder.infer_tensor(kernel)
self.weights[kernel.layer_name.replace('/', '_')] = numpy.transpose(
kernel.value, (3, 2, 0, 1))
kernel_value, (3, 2, 0, 1))
if not channel_first:
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
......@@ -381,6 +388,11 @@ class TFOpMapperNHWC(OpMapper):
"dilation": dilations[2:4],
"padding": string(pad_mode)
}
if hasattr(node, 'dilation') and attr['dilation'] == [1, 1]:
if len(node.dilation) == 1:
attr['dilation'] = [1, node.dilation[0]]
node.fluid_code.add_layer("conv2d",
inputs=input,
output=node,
......@@ -1135,3 +1147,39 @@ class TFOpMapperNHWC(OpMapper):
inputs=inputs,
output=node,
param_attr=None)
def ExpandDims(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
if y.layer_type == 'Const':
dim = y.value.tolist()
else:
dim = self.decoder.infer_tensor(y)
self.add_omit_nodes(y.layer_name, node.layer_name)
attr = {'axes': [dim]}
node.fluid_code.add_layer("unsqueeze",
inputs=x,
output=node,
param_attr=attr)
def BatchToSpaceND(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
if hasattr(node, 'skip') and node.skip:
node.fluid_code.add_layer("=",
inputs=x,
output=node,
param_attr=None)
else:
raise Exception("BatchToSpaceND is not supported")
def SpaceToBatchND(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
if hasattr(node, 'skip') and node.skip:
node.fluid_code.add_layer("=",
inputs=x,
output=node,
param_attr=None)
else:
raise Exception("SpaceToBatchND is not supported")
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册