未验证 提交 2b78942c 编写于 作者: W WJJ1995 提交者: GitHub

add BatchToSpaceND and SpaceToBatchND op convert (#557)

* add BatchToSpaceND and SpaceToBatchND op convert

* deal with comments
上级 64c41a03
......@@ -58,8 +58,7 @@ class TFOpMapper(OpMapper):
'swish_f32': ['paddle.nn.Swish'],
'Tanh': ['paddle.nn.Tanh'],
'Softplus': ['paddle.nn.Softplus'],
'LeakyRelu': ['paddle.nn.LeakyReLU',
dict(alpha='negative_slope')],
'LeakyRelu': ['paddle.nn.LeakyReLU', dict(alpha='negative_slope')],
'Softmax': ['paddle.nn.Softmax'],
'Floor': ['paddle.floor'],
'Erf': ['paddle.erf'],
......@@ -96,7 +95,8 @@ class TFOpMapper(OpMapper):
self.nn_name2id = dict()
self.input_index = 0
self.inputs_info = dict()
self.paddle_graph = PaddleGraph(parent_layer=None, graph_type="dygraph", source_type="tf")
self.paddle_graph = PaddleGraph(
parent_layer=None, graph_type="dygraph", source_type="tf")
self.paddle_graph.outputs = self.graph.output_nodes
not_placeholder = list()
......@@ -149,8 +149,8 @@ class TFOpMapper(OpMapper):
return True
else:
if len(unsupported_ops) > 0:
print("\n========= {} OPs are not supported yet ===========".format(
len(unsupported_ops)))
print("\n========= {} OPs are not supported yet ===========".
format(len(unsupported_ops)))
for op in unsupported_ops:
print("========== {} ============".format(op))
return False
......@@ -196,7 +196,10 @@ class TFOpMapper(OpMapper):
inputs={"x": x.name,
"y": y.name},
outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
self.paddle_graph.layers[layer_id].input_shapes = {
"x": x_shape,
"y": y_shape
}
def bool_map(self, node):
op_type = self.bool_ops[node.layer_type]
......@@ -251,7 +254,8 @@ class TFOpMapper(OpMapper):
if perm.layer_type == "Const":
perm = perm.value.tolist()
else:
perm = self.decoder.infer_tensor(perm, use_diff_inputs=False).tolist()
perm = self.decoder.infer_tensor(
perm, use_diff_inputs=False).tolist()
self.paddle_graph.add_layer(
"paddle.transpose",
......@@ -263,9 +267,7 @@ class TFOpMapper(OpMapper):
if len(node.layer.input) == 1:
cond = self.graph.get_input_node(node, 0)
self.paddle_graph.add_layer(
"paddle.nonzero",
inputs={"x": cond.name},
outputs=[node.name])
"paddle.nonzero", inputs={"x": cond.name}, outputs=[node.name])
else:
cond = self.graph.get_input_node(node, 0)
x = self.graph.get_input_node(node, 1)
......@@ -300,10 +302,7 @@ class TFOpMapper(OpMapper):
layer_attrs["fill_value"] = input_value.value
self.paddle_graph.add_layer(
"paddle.full",
inputs=inputs,
outputs=[node.name],
**layer_attrs)
"paddle.full", inputs=inputs, outputs=[node.name], **layer_attrs)
def DepthToSpace(self, node):
input = self.graph.get_input_node(node, 0)
......@@ -419,7 +418,8 @@ class TFOpMapper(OpMapper):
if kernel.layer_type == 'Const':
kernel_value = kernel.value
else:
kernel_value = self.decoder.infer_tensor(kernel, use_diff_inputs=False)
kernel_value = self.decoder.infer_tensor(
kernel, use_diff_inputs=False)
kernel_weight_name = op_name + ".weight"
self.params[kernel_weight_name] = numpy.transpose(kernel_value,
(3, 2, 0, 1))
......@@ -444,7 +444,6 @@ class TFOpMapper(OpMapper):
outputs=[input_name],
shape=[0, k_size[2], 0, 0])
self.paddle_graph.add_layer(
kernel="paddle.nn.Conv2D",
inputs={"input": input_name},
......@@ -485,7 +484,8 @@ class TFOpMapper(OpMapper):
if kernel.layer_type == 'Const':
kernel_value = kernel.value
else:
kernel_value = self.decoder.infer_tensor(kernel, use_diff_inputs=False)
kernel_value = self.decoder.infer_tensor(
kernel, use_diff_inputs=False)
kernel_weight_name = op_name + ".weight"
self.params[kernel_weight_name] = numpy.transpose(kernel_value,
(4, 3, 0, 1, 2))
......@@ -569,10 +569,14 @@ class TFOpMapper(OpMapper):
else:
n, c, h, w = input.out_shapes[0]
self.params["{}_{}".format(node.name, gamma.name)] = self.params[gamma.name]
self.params["{}_{}".format(node.name, beta.name)] = self.params[beta.name]
self.params["{}_{}".format(node.name, moving_mean.name)] = self.params[moving_mean.name]
self.params["{}_{}".format(node.name, moving_var.name)] = self.params[moving_var.name]
self.params["{}_{}".format(node.name, gamma.name)] = self.params[
gamma.name]
self.params["{}_{}".format(node.name, beta.name)] = self.params[
beta.name]
self.params["{}_{}".format(node.name, moving_mean.name)] = self.params[
moving_mean.name]
self.params["{}_{}".format(node.name, moving_var.name)] = self.params[
moving_var.name]
self.paddle_graph.add_layer(
kernel="paddle.nn.BatchNorm",
inputs={"input": input_name},
......@@ -581,8 +585,10 @@ class TFOpMapper(OpMapper):
epsilon=node.get_attr("epsilon"),
param_attr=string("{}_{}".format(node.name, gamma.name)),
bias_attr=string("{}_{}".format(node.name, beta.name)),
moving_mean_name=string("{}_{}".format(node.name, moving_mean.name)),
moving_variance_name=string("{}_{}".format(node.name, moving_var.name)),
moving_mean_name=string("{}_{}".format(node.name,
moving_mean.name)),
moving_variance_name=string("{}_{}".format(node.name,
moving_var.name)),
is_test=True)
if data_format == "NHWC":
......@@ -659,7 +665,6 @@ class TFOpMapper(OpMapper):
def MirrorPad(self, node):
self.Pad(node)
def PadV2(self, node):
self.Pad(node)
......@@ -688,15 +693,12 @@ class TFOpMapper(OpMapper):
inputs={"input": input_name},
outputs=[node.name])
self.paddle_graph.add_layer(
kernel="paddle.prod",
inputs={"x": node.name},
outputs=[node.name])
kernel="paddle.prod", inputs={"x": node.name}, outputs=[node.name])
def Ceil(self, node):
input = self.graph.get_input_node(node, 0)
self.paddle_graph.add_layer(
kernel="paddle.ceil",
inputs={"x": input.name},
kernel="paddle.ceil", inputs={"x": input.name},
outputs=[node.name])
def ArgMax(self, node):
......@@ -765,7 +767,6 @@ class TFOpMapper(OpMapper):
self.params[kernel_weight_name] = numpy.transpose(kernel.value,
(2, 3, 0, 1))
input_name = input.name
if data_format == "NHWC":
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
......@@ -833,15 +834,6 @@ class TFOpMapper(OpMapper):
stride=strides[2:4],
padding=string(pad_mode))
# self.paddle_graph.add_layer(
# kernel="fluid.layers.pool2d",
# inputs={"input": input_name},
# outputs=[node.name],
# pool_size=k_size[2:4],
# pool_type=string("avg"),
# pool_stride=strides[2:4],
# pool_padding=string(pad_mode))
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
......@@ -884,7 +876,9 @@ class TFOpMapper(OpMapper):
axis = 1
else:
raise Exception("Unexpected situation happend in Unpack OP")
layer_outputs = ["{}_p{}".format(node.layer_name, i) for i in range(num)]
layer_outputs = [
"{}_p{}".format(node.layer_name, i) for i in range(num)
]
if len(layer_outputs) == 1:
layer_outputs[0] = "[{}]".format(node.layer_name)
self.paddle_graph.add_layer(
......@@ -1087,7 +1081,8 @@ class TFOpMapper(OpMapper):
kernel="paddle.split",
inputs={"x": input.name},
outputs=[
"{}_p{}".format(node.layer_name, i) for i in range(len(size_splits))
"{}_p{}".format(node.layer_name, i)
for i in range(len(size_splits))
],
num_or_sections=size_splits,
axis=dim)
......@@ -1103,7 +1098,8 @@ class TFOpMapper(OpMapper):
begin = begin.value.tolist()
attrs['offsets'] = begin
else:
begin = self.decoder.infer_tensor(begin, use_diff_inputs=False).tolist()
begin = self.decoder.infer_tensor(
begin, use_diff_inputs=False).tolist()
attrs['offsets'] = begin
if size.layer_type == "Const":
size = size.value.tolist()
......@@ -1118,19 +1114,18 @@ class TFOpMapper(OpMapper):
shape=shape)
inputs['shape'] = reshape_name
self.paddle_graph.add_layer(
kernel="paddle.crop",
inputs=inputs,
outputs=[node.name],
**attrs)
kernel="paddle.crop", inputs=inputs, outputs=[node.name], **attrs)
def ResizeNearestNeighbor(self, node):
input = self.graph.get_input_node(node, 0)
resize_shape = self.graph.get_input_node(node, 1)
data_format = "NHWC"
inputs = {"x": input.name}
attrs = {"align_corners": node.get_attr("align_corners"),
attrs = {
"align_corners": node.get_attr("align_corners"),
"mode": string("nearest"),
"align_mode": 1}
"align_mode": 1
}
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
......@@ -1172,9 +1167,11 @@ class TFOpMapper(OpMapper):
resize_shape = self.graph.get_input_node(node, 1)
data_format = "NHWC"
inputs = {"x": input.name}
attrs = {"align_corners": node.get_attr("align_corners"),
attrs = {
"align_corners": node.get_attr("align_corners"),
"mode": string("bilinear"),
"align_mode": 1}
"align_mode": 1
}
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
......@@ -1279,15 +1276,17 @@ class TFOpMapper(OpMapper):
if out_shape.layer_type == "Const":
out_shape = out_shape.value.tolist()
else:
out_shape = self.decoder.infer_tensor(out_shape,
out_shape=node.out_shapes[0])
out_shape = self.decoder.infer_tensor(
out_shape, out_shape=node.out_shapes[0])
in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input, use_diff_inputs=False).shape
in_shape = self.decoder.infer_tensor(
input, use_diff_inputs=False).shape
k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2:
k_size = self.decoder.infer_tensor(kernel, use_diff_inputs=False).shape
k_size = self.decoder.infer_tensor(
kernel, use_diff_inputs=False).shape
pad_mode = node.get_attr("padding").decode()
strides = node.get_attr("strides")
......@@ -1310,19 +1309,6 @@ class TFOpMapper(OpMapper):
perm=[0, 3, 1, 2])
input_name = transpose_name
# TODO(syf): The output_size is not set.
# self.paddle_graph.add_layer(
# kernel="paddle.nn.Conv2DTranspose",
# inputs={"input": input_name},
# outputs=layer_outputs,
# weight_attr=string(kernel_name),
# bias_attr=False,
# in_channels=k_size[3],
# out_channels=k_size[2],
# kernel_size=k_size[0:2],
# stride=strides[2:4],
# dilation=dilations[2:4],
# padding=string(pad_mode))
self.paddle_graph.add_layer(
"self.create_parameter",
inputs={},
......@@ -1332,8 +1318,11 @@ class TFOpMapper(OpMapper):
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.conv2d_transpose",
inputs={"x": input_name,
"weight": "{}_{}".format(node.name, kernel_name).replace(".", "_")},
inputs={
"x": input_name,
"weight":
"{}_{}".format(node.name, kernel_name).replace(".", "_")
},
outputs=[node.name],
bias=None,
stride=strides[2:4],
......@@ -1361,10 +1350,7 @@ class TFOpMapper(OpMapper):
inputs["repeat_times"] = repeat_times.name
self.paddle_graph.add_layer(
kernel="paddle.tile",
inputs=inputs,
outputs=[node.name],
**attr)
kernel="paddle.tile", inputs=inputs, outputs=[node.name], **attr)
def Range(self, node):
start = self.graph.get_input_node(node, 0)
......@@ -1397,10 +1383,7 @@ class TFOpMapper(OpMapper):
attr["dtype"] = string(node.dtype)
self.paddle_graph.add_layer(
kernel="paddle.arange",
inputs=inputs,
outputs=[node.name],
**attr)
kernel="paddle.arange", inputs=inputs, outputs=[node.name], **attr)
def SquaredDifference(self, node):
x = self.graph.get_input_node(node, 0)
......@@ -1411,14 +1394,20 @@ class TFOpMapper(OpMapper):
# TODO(syf)
layer_id = self.paddle_graph.add_layer(
"paddle.subtract", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
self.paddle_graph.layers[layer_id].input_shapes = {
"x": x_shape,
"y": y_shape
}
inputs = {"x": node.name, "y": node.name}
x_shape = node.out_shapes[0]
y_shape = node.out_shapes[0]
layer_id = self.paddle_graph.add_layer(
"paddle.multiply", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
self.paddle_graph.layers[layer_id].input_shapes = {
"x": x_shape,
"y": y_shape
}
def OneHot(self, node):
input = self.graph.get_input_node(node, 0)
......@@ -1472,10 +1461,7 @@ class TFOpMapper(OpMapper):
outputs=[input_name],
dtype=string("bool"))
self.paddle_graph.add_layer(
"paddle.all",
inputs={"x": input_name},
outputs=[node.name],
**attr)
"paddle.all", inputs={"x": input_name}, outputs=[node.name], **attr)
node.layer.attr['dtype'].type = 10
......@@ -1496,10 +1482,7 @@ class TFOpMapper(OpMapper):
shape=[-1])
inputs = {'x': embeddings.name, 'index': index_name}
self.paddle_graph.add_layer(
"paddle.gather",
inputs=inputs,
outputs=[node.name],
axis=axis)
"paddle.gather", inputs=inputs, outputs=[node.name], axis=axis)
if len(index.out_shapes[0]) != 1:
out_shape = node.out_shapes[0]
self.paddle_graph.add_layer(
......@@ -1513,9 +1496,7 @@ class TFOpMapper(OpMapper):
index = self.graph.get_input_node(node, 1)
inputs = {'x': x.name, 'index': index.name}
self.paddle_graph.add_layer(
"paddle.gather_nd",
inputs=inputs,
outputs=[node.name])
"paddle.gather_nd", inputs=inputs, outputs=[node.name])
def ExpandDims(self, node):
x = self.graph.get_input_node(node, 0, copy=True)
......@@ -1530,10 +1511,7 @@ class TFOpMapper(OpMapper):
else:
inputs['axis'] = y.name
self.paddle_graph.add_layer(
"paddle.unsqueeze",
inputs=inputs,
outputs=[node.name],
**attr)
"paddle.unsqueeze", inputs=inputs, outputs=[node.name], **attr)
def ReverseV2(self, node):
x = self.graph.get_input_node(node, 0)
......@@ -1548,7 +1526,114 @@ class TFOpMapper(OpMapper):
else:
inputs['axis'] = axis.name
self.paddle_graph.add_layer(
"paddle.flip",
inputs=inputs,
"paddle.flip", inputs=inputs, outputs=[node.name], **attr)
def BatchToSpaceND(self, node):
'''
reshape->transpose->reshape->crop
'''
x = self.graph.get_input_node(node, 0)
block_shape = self.graph.get_input_node(node, 1)
crops = self.graph.get_input_node(node, 2)
if block_shape.layer_type == "Const":
block_shape = block_shape.value.tolist()
if crops.layer_type == "Const":
crops = crops.value.tolist()
data_format = x.get_attr("data_format").decode()
if data_format == "NHWC":
n, h, w, c = x.out_shapes[0]
else:
n, c, h, w = x.out_shapes[0]
input_name = x.name
#reshape
shape = block_shape + [-1, h, w, c]
reshape_name = gen_name("batch_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": input_name},
outputs=[reshape_name],
shape=shape)
#transpose
perm = [len(block_shape)] + list(j for i in range(len(block_shape)) for j in (i + len(block_shape) + 1, i)) +\
list(i + 2*len(block_shape) + 1 for i in range(len(x.out_shapes[0]) - len(block_shape) - 1))
transpose_name = gen_name("batch_to_space", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": reshape_name},
outputs=[transpose_name],
perm=perm)
#reshape
shape = [-1] + list(i * j
for i, j in zip(block_shape, x.out_shapes[0][
1:])) + x.out_shapes[0][1 + len(block_shape):]
reshape_name = gen_name("batch_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": transpose_name},
outputs=[reshape_name],
shape=shape)
#crop
attrs = {}
crop_shape = shape
crop_offsets = [0] * len(shape)
for i in range(len(crops)):
crop_shape[i + 1] = crop_shape[i + 1] - crops[i][0] - crops[i][1]
crop_offsets[i + 1] = crops[i][0]
attrs['shape'] = crop_shape
attrs['offsets'] = crop_offsets
self.paddle_graph.add_layer(
kernel="paddle.crop",
inputs={"x": reshape_name},
outputs=[node.name],
**attr)
**attrs)
def SpaceToBatchND(self, node):
'''
zero-pad->reshape->transpose->reshape
'''
x = self.graph.get_input_node(node, 0)
block_shape = self.graph.get_input_node(node, 1)
paddings = self.graph.get_input_node(node, 2)
if block_shape.layer_type == "Const":
block_shape = block_shape.value.tolist()
if paddings.layer_type == "Const":
paddings = paddings.value.flatten().tolist()
input_name = x.name
#zero-pad
constant_values = 0
pad_name = gen_name("space_to_batch", "pad")
paddings = [0, 0] + paddings + [0, 0]
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.pad",
inputs={"x": input_name},
outputs=[pad_name],
pad=paddings,
value=constant_values)
#reshape
n, h, w, c = x.out_shapes[0]
h = h + paddings[2] + paddings[3]
w = w + paddings[4] + paddings[5]
shape = [
n, h // block_shape[0], block_shape[0], w // block_shape[1],
block_shape[1], c
]
reshape_name = gen_name("space_to_batch", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": pad_name},
outputs=[reshape_name],
shape=shape)
#transpose
transpose_name = gen_name("space_to_batch", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": reshape_name},
outputs=[transpose_name],
perm=[2, 4, 0, 1, 3, 5])
#reshape
shape = [-1, h // block_shape[0], w // block_shape[1], c]
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": transpose_name},
outputs=[node.name],
shape=shape)
......@@ -60,8 +60,8 @@ class TFOpMapper(OpMapper):
'swish_f32': ['paddle.nn.functional.swish'],
'Tanh': ['paddle.tanh'],
'Softplus': ['paddle.nn.functional.softplus'],
'LeakyRelu': ['paddle.nn.functional.leaky_relu',
dict(alpha='negative_slope')],
'LeakyRelu':
['paddle.nn.functional.leaky_relu', dict(alpha='negative_slope')],
'Floor': ['paddle.floor'],
'Erf': ['paddle.erf'],
'Square': ['paddle.square']
......@@ -95,7 +95,8 @@ class TFOpMapper(OpMapper):
if not self.op_checker():
raise Exception("Model is not supported yet.")
self.params = dict()
self.paddle_graph = PaddleGraph(parent_layer=None, graph_type="static", source_type="tf")
self.paddle_graph = PaddleGraph(
parent_layer=None, graph_type="static", source_type="tf")
self.params_output2id = dict()
not_placeholder = list()
......@@ -150,8 +151,8 @@ class TFOpMapper(OpMapper):
return True
else:
if len(unsupported_ops) > 0:
print("\n========= {} OPs are not supported yet ===========".format(
len(unsupported_ops)))
print("\n========= {} OPs are not supported yet ===========".
format(len(unsupported_ops)))
for op in unsupported_ops:
print("========== {} ============".format(op))
return False
......@@ -186,7 +187,10 @@ class TFOpMapper(OpMapper):
inputs={"x": x.name,
"y": y.name},
outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
self.paddle_graph.layers[layer_id].input_shapes = {
"x": x_shape,
"y": y_shape
}
def bool_map(self, node):
op_type = self.bool_ops[node.layer_type]
......@@ -241,7 +245,8 @@ class TFOpMapper(OpMapper):
if perm.layer_type == "Const":
perm = perm.value.tolist()
else:
perm = self.decoder.infer_tensor(perm, use_diff_inputs=False).tolist()
perm = self.decoder.infer_tensor(
perm, use_diff_inputs=False).tolist()
self.paddle_graph.add_layer(
kernel="paddle.transpose",
......@@ -263,10 +268,7 @@ class TFOpMapper(OpMapper):
attr["fill_value"] = input_value.value
self.paddle_graph.add_layer(
"paddle.full",
inputs=inputs,
outputs=[node.name],
**attr)
"paddle.full", inputs=inputs, outputs=[node.name], **attr)
if dims.layer_type != "Const":
self.paddle_graph.add_layer(
"paddle.reshape",
......@@ -333,9 +335,7 @@ class TFOpMapper(OpMapper):
if len(node.layer.input) == 1:
cond = self.graph.get_input_node(node, 0)
self.paddle_graph.add_layer(
"paddle.nonzero",
inputs={"x": cond.name},
outputs=[node.name])
"paddle.nonzero", inputs={"x": cond.name}, outputs=[node.name])
else:
cond = self.graph.get_input_node(node, 0)
x = self.graph.get_input_node(node, 1)
......@@ -409,7 +409,8 @@ class TFOpMapper(OpMapper):
kernel_value = kernel.value
kernel_weight_name = kernel.name.replace('/', '_')
else:
kernel_value = self.decoder.infer_tensor(kernel, use_diff_inputs=False)
kernel_value = self.decoder.infer_tensor(
kernel, use_diff_inputs=False)
if kernel.layer_type == 'Split':
kernel_weight_name = "{}_{}_kernel".format(node.name,
kernel.name)
......@@ -447,7 +448,8 @@ class TFOpMapper(OpMapper):
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.conv2d",
inputs={"x": input_name, "weight": kernel_weight_name},
inputs={"x": input_name,
"weight": kernel_weight_name},
outputs=[node.name],
bias=None,
stride=strides[2:4],
......@@ -479,7 +481,8 @@ class TFOpMapper(OpMapper):
kernel_value = kernel.value
kernel_weight_name = kernel.name.replace('/', '_')
else:
kernel_value = self.decoder.infer_tensor(kernel, use_diff_inputs=False)
kernel_value = self.decoder.infer_tensor(
kernel, use_diff_inputs=False)
if kernel.layer_type == 'Split':
kernel_weight_name = "{}_{}_kernel".format(node.name,
kernel.name)
......@@ -517,7 +520,8 @@ class TFOpMapper(OpMapper):
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.conv3d",
inputs={"x": input_name, "weight": kernel_weight_name},
inputs={"x": input_name,
"weight": kernel_weight_name},
outputs=[node.name],
bias=None,
stride=strides[2:5],
......@@ -565,11 +569,13 @@ class TFOpMapper(OpMapper):
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.batch_norm",
inputs={"x": input_name,
inputs={
"x": input_name,
"running_mean": moving_mean.name,
"running_var": moving_var.name,
"weight": gamma.name,
"bias": beta.name},
"bias": beta.name
},
outputs=[node.name],
epsilon=node.get_attr("epsilon"))
......@@ -647,7 +653,6 @@ class TFOpMapper(OpMapper):
def MirrorPad(self, node):
self.Pad(node)
def PadV2(self, node):
self.Pad(node)
......@@ -676,15 +681,12 @@ class TFOpMapper(OpMapper):
inputs={"input": input_name},
outputs=[node.name])
self.paddle_graph.add_layer(
kernel="paddle.prod",
inputs={"x": node.name},
outputs=[node.name])
kernel="paddle.prod", inputs={"x": node.name}, outputs=[node.name])
def Ceil(self, node):
input = self.graph.get_input_node(node, 0)
self.paddle_graph.add_layer(
kernel="paddle.ceil",
inputs={"x": input.name},
kernel="paddle.ceil", inputs={"x": input.name},
outputs=[node.name])
def ArgMax(self, node):
......@@ -861,7 +863,9 @@ class TFOpMapper(OpMapper):
axis = 1
else:
raise Exception("Unexpected situation happend in Unpack OP")
layer_outputs = ["{}_p{}".format(node.layer_name, i) for i in range(num)]
layer_outputs = [
"{}_p{}".format(node.layer_name, i) for i in range(num)
]
if len(layer_outputs) == 1:
layer_outputs[0] = "[{}]".format(node.layer_name)
self.paddle_graph.add_layer(
......@@ -1064,7 +1068,8 @@ class TFOpMapper(OpMapper):
kernel="paddle.split",
inputs={"x": input.name},
outputs=[
"{}_p{}".format(node.layer_name, i) for i in range(len(size_splits))
"{}_p{}".format(node.layer_name, i)
for i in range(len(size_splits))
],
num_or_sections=size_splits,
axis=dim)
......@@ -1080,15 +1085,8 @@ class TFOpMapper(OpMapper):
begin = begin.value.tolist()
attrs['offsets'] = begin
else:
# shape = begin.out_shapes[0]
# reshape_name = gen_name("slice", "reshape")
# self.paddle_graph.add_layer(
# kernel="fluid.layers.reshape",
# inputs={"x": begin.name},
# outputs=[reshape_name],
# shape=shape)
# inputs['offsets'] = reshape_name
begin = self.decoder.infer_tensor(begin, use_diff_inputs=False).tolist()
begin = self.decoder.infer_tensor(
begin, use_diff_inputs=False).tolist()
attrs['offsets'] = begin
if size.layer_type == "Const":
size = size.value.tolist()
......@@ -1103,19 +1101,18 @@ class TFOpMapper(OpMapper):
shape=shape)
inputs['shape'] = reshape_name
self.paddle_graph.add_layer(
kernel="paddle.crop",
inputs=inputs,
outputs=[node.name],
**attrs)
kernel="paddle.crop", inputs=inputs, outputs=[node.name], **attrs)
def ResizeNearestNeighbor(self, node):
input = self.graph.get_input_node(node, 0)
resize_shape = self.graph.get_input_node(node, 1)
data_format = "NHWC"
inputs = {"x": input.name}
attrs = {"align_corners": node.get_attr("align_corners"),
attrs = {
"align_corners": node.get_attr("align_corners"),
"mode": string("nearest"),
"align_mode": 1}
"align_mode": 1
}
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
......@@ -1157,9 +1154,11 @@ class TFOpMapper(OpMapper):
resize_shape = self.graph.get_input_node(node, 1)
data_format = "NHWC"
inputs = {"x": input.name}
attrs = {"align_corners": node.get_attr("align_corners"),
attrs = {
"align_corners": node.get_attr("align_corners"),
"mode": string("bilinear"),
"align_mode": 1}
"align_mode": 1
}
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
......@@ -1261,15 +1260,17 @@ class TFOpMapper(OpMapper):
if out_shape.layer_type == "Const":
out_shape = out_shape.value.tolist()
else:
out_shape = self.decoder.infer_tensor(out_shape,
out_shape=node.out_shapes[0])
out_shape = self.decoder.infer_tensor(
out_shape, out_shape=node.out_shapes[0])
in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input, use_diff_inputs=False).shape
in_shape = self.decoder.infer_tensor(
input, use_diff_inputs=False).shape
k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2:
k_size = self.decoder.infer_tensor(kernel, use_diff_inputs=False).shape
k_size = self.decoder.infer_tensor(
kernel, use_diff_inputs=False).shape
pad_mode = node.get_attr("padding").decode()
strides = node.get_attr("strides")
......@@ -1302,8 +1303,11 @@ class TFOpMapper(OpMapper):
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.conv2d_transpose",
inputs={"x": input_name,
"weight": "{}_{}".format(node.name, kernel_name).replace(".", "_")},
inputs={
"x": input_name,
"weight":
"{}_{}".format(node.name, kernel_name).replace(".", "_")
},
outputs=[node.name],
bias=None,
stride=strides[2:4],
......@@ -1330,12 +1334,10 @@ class TFOpMapper(OpMapper):
inputs["repeat_times"] = repeat_times.name
self.paddle_graph.add_layer(
kernel="paddle.tile",
inputs=inputs,
outputs=[node.name],
**attr)
kernel="paddle.tile", inputs=inputs, outputs=[node.name], **attr)
if not isinstance(repeat_times, list) and repeat_times.layer_type != "Const":
if not isinstance(repeat_times,
list) and repeat_times.layer_type != "Const":
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": node.name},
......@@ -1372,10 +1374,7 @@ class TFOpMapper(OpMapper):
attr["dtype"] = string(node.dtype)
self.paddle_graph.add_layer(
kernel="paddle.arange",
inputs=inputs,
outputs=[node.name],
**attr)
kernel="paddle.arange", inputs=inputs, outputs=[node.name], **attr)
if start.layer_type != "Const" or \
limit.layer_type != "Const" or \
delta.layer_type != "Const":
......@@ -1394,14 +1393,20 @@ class TFOpMapper(OpMapper):
# TODO(syf)
layer_id = self.paddle_graph.add_layer(
"paddle.subtract", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
self.paddle_graph.layers[layer_id].input_shapes = {
"x": x_shape,
"y": y_shape
}
inputs = {"x": node.name, "y": node.name}
x_shape = node.out_shapes[0]
y_shape = node.out_shapes[0]
layer_id = self.paddle_graph.add_layer(
"paddle.multiply", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
self.paddle_graph.layers[layer_id].input_shapes = {
"x": x_shape,
"y": y_shape
}
def OneHot(self, node):
input = self.graph.get_input_node(node, 0)
......@@ -1455,10 +1460,7 @@ class TFOpMapper(OpMapper):
outputs=[input_name],
dtype=string("bool"))
self.paddle_graph.add_layer(
"paddle.all",
inputs={"x": input_name},
outputs=[node.name],
**attr)
"paddle.all", inputs={"x": input_name}, outputs=[node.name], **attr)
node.layer.attr['dtype'].type = 10
......@@ -1479,10 +1481,7 @@ class TFOpMapper(OpMapper):
shape=[-1])
inputs = {'x': embeddings.name, 'index': index_name}
self.paddle_graph.add_layer(
"paddle.gather",
inputs=inputs,
outputs=[node.name],
axis=axis)
"paddle.gather", inputs=inputs, outputs=[node.name], axis=axis)
if len(index.out_shapes[0]) != 1:
out_shape = node.out_shapes[0]
self.paddle_graph.add_layer(
......@@ -1496,9 +1495,7 @@ class TFOpMapper(OpMapper):
index = self.graph.get_input_node(node, 1)
inputs = {'x': x.name, 'index': index.name}
self.paddle_graph.add_layer(
"paddle.gather_nd",
inputs=inputs,
outputs=[node.name])
"paddle.gather_nd", inputs=inputs, outputs=[node.name])
def ExpandDims(self, node):
x = self.graph.get_input_node(node, 0, copy=True)
......@@ -1513,10 +1510,7 @@ class TFOpMapper(OpMapper):
else:
inputs['axis'] = y.name
self.paddle_graph.add_layer(
"paddle.unsqueeze",
inputs=inputs,
outputs=[node.name],
**attr)
"paddle.unsqueeze", inputs=inputs, outputs=[node.name], **attr)
def ReverseV2(self, node):
x = self.graph.get_input_node(node, 0)
......@@ -1531,8 +1525,114 @@ class TFOpMapper(OpMapper):
else:
inputs['axis'] = axis.name
self.paddle_graph.add_layer(
"paddle.flip",
inputs=inputs,
"paddle.flip", inputs=inputs, outputs=[node.name], **attr)
def BatchToSpaceND(self, node):
'''
reshape->transpose->reshape->crop
'''
x = self.graph.get_input_node(node, 0)
block_shape = self.graph.get_input_node(node, 1)
crops = self.graph.get_input_node(node, 2)
if block_shape.layer_type == "Const":
block_shape = block_shape.value.tolist()
if crops.layer_type == "Const":
crops = crops.value.tolist()
data_format = x.get_attr("data_format").decode()
if data_format == "NHWC":
n, h, w, c = x.out_shapes[0]
else:
n, c, h, w = x.out_shapes[0]
input_name = x.name
#reshape
shape = block_shape + [-1, h, w, c]
reshape_name = gen_name("batch_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": input_name},
outputs=[reshape_name],
shape=shape)
#transpose
perm = [len(block_shape)] + list(j for i in range(len(block_shape)) for j in (i + len(block_shape) + 1, i)) +\
list(i + 2*len(block_shape) + 1 for i in range(len(x.out_shapes[0]) - len(block_shape) - 1))
transpose_name = gen_name("batch_to_space", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": reshape_name},
outputs=[transpose_name],
perm=perm)
#reshape
shape = [-1] + list(i * j
for i, j in zip(block_shape, x.out_shapes[0][
1:])) + x.out_shapes[0][1 + len(block_shape):]
reshape_name = gen_name("batch_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": transpose_name},
outputs=[reshape_name],
shape=shape)
#crop
attrs = {}
crop_shape = shape
crop_offsets = [0] * len(shape)
for i in range(len(crops)):
crop_shape[i + 1] = crop_shape[i + 1] - crops[i][0] - crops[i][1]
crop_offsets[i + 1] = crops[i][0]
attrs['shape'] = crop_shape
attrs['offsets'] = crop_offsets
self.paddle_graph.add_layer(
kernel="paddle.crop",
inputs={"x": reshape_name},
outputs=[node.name],
**attr)
**attrs)
def SpaceToBatchND(self, node):
'''
zero-pad->reshape->transpose->reshape
'''
x = self.graph.get_input_node(node, 0)
block_shape = self.graph.get_input_node(node, 1)
paddings = self.graph.get_input_node(node, 2)
if block_shape.layer_type == "Const":
block_shape = block_shape.value.tolist()
if paddings.layer_type == "Const":
paddings = paddings.value.flatten().tolist()
input_name = x.name
#zero-pad
constant_values = 0
pad_name = gen_name("space_to_batch", "pad")
paddings = [0, 0] + paddings + [0, 0]
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.pad",
inputs={"x": input_name},
outputs=[pad_name],
pad=paddings,
value=constant_values)
#reshape
n, h, w, c = x.out_shapes[0]
h = h + paddings[2] + paddings[3]
w = w + paddings[4] + paddings[5]
shape = [
n, h // block_shape[0], block_shape[0], w // block_shape[1],
block_shape[1], c
]
reshape_name = gen_name("space_to_batch", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": pad_name},
outputs=[reshape_name],
shape=shape)
#transpose
transpose_name = gen_name("space_to_batch", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": reshape_name},
outputs=[transpose_name],
perm=[2, 4, 0, 1, 3, 5])
#reshape
shape = [-1, h // block_shape[0], w // block_shape[1], c]
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": transpose_name},
outputs=[node.name],
shape=shape)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册