提交 ee48679c 编写于 作者: S SunAhong1993

add tf static

上级 fd3c33a8
......@@ -132,18 +132,9 @@ def tf2paddle(model_path,
graph_opt = GraphOptimizer(source_frame="tf", paddle_type=paddle_type)
graph_opt.optimize(mapper.paddle_graph)
else:
from x2paddle.optimizer.tensorflow.bias import BiasOpt
from x2paddle.optimizer.tensorflow.transpose import TransposeOpt
from x2paddle.optimizer.tensorflow.batch_norm import BatchNormOpt
from x2paddle.optimizer.tensorflow.prelu import PReLUOpt
bias_opt = BiasOpt()
transpose_opt = TransposeOpt()
batch_norm_opt = BatchNormOpt()
prelu_opt = PReLUOpt()
bias_opt.run(mapper.paddle_graph)
batch_norm_opt.run(mapper.paddle_graph)
prelu_opt.run(mapper.paddle_graph)
transpose_opt.run(mapper.paddle_graph)
from x2paddle.optimizer.optimizer import GraphOptimizer
graph_opt = GraphOptimizer(source_frame="tf", paddle_type=paddle_type)
graph_opt.optimize(mapper.paddle_graph)
mapper.paddle_graph.gen_model(save_dir)
......
......@@ -285,7 +285,6 @@ class TFOpMapper(OpMapper):
layer_attrs["dtype"] = string(input_value.dtype)
layer_attrs["fill_value"] = input_value.value
self.paddle_graph.add_layer(
"paddle.full",
inputs=inputs,
......@@ -579,6 +578,9 @@ class TFOpMapper(OpMapper):
outputs=[node.name],
perm=[0, 2, 3, 1])
def FusedBatchNormV3(self, node):
self.FusedBatchNorm(node)
def Mean(self, node):
input = self.graph.get_input_node(node, 0)
reduce_idx = self.graph.get_input_node(node, 1)
......@@ -930,6 +932,23 @@ class TFOpMapper(OpMapper):
outputs=[node.name],
axis=axis)
def Concat(self, node):
inputs_list = list()
for i in range(1, len(node.inputs)):
inputs_list.append(self.graph.get_input_node(node, i))
axis = self.graph.get_input_node(node, 0)
assert axis.layer_type == "Const", "axis for ConcatV2 must be type Const"
axis = axis.value
if axis < 0:
axis += len(inputs_list[0].out_shapes[0])
input_names = [i.name for i in inputs_list]
self.paddle_graph.add_layer(
kernel="paddle.concat",
inputs={"x": input_names},
outputs=[node.name],
axis=axis)
def AddN(self, node):
inputs_list = list()
for i in range(len(node.inputs) - 1):
......@@ -1400,6 +1419,7 @@ class TFOpMapper(OpMapper):
inputs = {"x": x.name, "y": y.name}
x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0]
# TODO(syf)
layer_id = self.paddle_graph.add_layer(
"fluid.layers.elementwise_sub", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
......
......@@ -89,6 +89,7 @@ class CaffeOpMapper(OpMapper):
input_shape.append(last_node.output_shape[idx])
node.input_shape = input_shape
func_name = 'shape_' + node.layer_type.lower()
if is_fluid_op:
node.output_shape = getattr(caffe_shape, func_name)(node.layer,
......
......@@ -49,28 +49,29 @@ def get_same_padding(in_size, kernel_size, stride):
class TFOpMapper(OpMapper):
directly_map_ops = {
'Relu': ['relu'],
'Relu6': ['relu6'],
'Abs': ['abs'],
'Sigmoid': ['sigmoid'],
'Exp': ['exp'],
'Rsqrt': ['rsqrt'],
'Sqrt': ['sqrt'],
'swish_f32': ['swish'],
'Tanh': ['tanh'],
'Softplus': ['softplus'],
'LeakyRelu': ['leaky_relu', {
'alpha': 'alpha'
}],
'Floor': ['floor'],
'Erf': ['erf'],
'Square': ['square']
'Relu': ['paddle.nn.functional.relu'],
'Relu6': ['paddle.nn.functional.relu6'],
'Abs': ['paddle.abs'],
'Sigmoid': ['paddle.nn.functional.sigmoid'],
'Softmax': ['paddle.nn.functional.softmax'],
'Exp': ['paddle.exp'],
'Rsqrt': ['paddle.rsqrt'],
'Sqrt': ['paddle.sqrt'],
'swish_f32': ['paddle.nn.functional.swish'],
'Tanh': ['paddle.tanh'],
'Softplus': ['paddle.nn.functional.softplus'],
'LeakyRelu': ['paddle.nn.functional.leaky_relu',
dict(alpha='negative_slope')],
'Floor': ['paddle.floor'],
'Erf': ['paddle.erf'],
'Square': ['paddle.square']
}
elementwise_ops = {
'Add': 'paddle.add',
'AddV2': 'paddle.add',
'RealDiv': 'paddle.divide',
'DivNoNan': 'paddle.divide',
# TODO (syf): replace
'Sub': 'fluid.layers.elementwise_sub',
'Maximum': 'paddle.maximum',
'Minimum': 'paddle.minimum',
......@@ -161,7 +162,7 @@ class TFOpMapper(OpMapper):
attr[pd_param_name] = tf_param
self.paddle_graph.add_layer(
kernel="fluid.layers.{}".format(op_info[0]),
kernel=op_info[0],
inputs={"x": input.name},
outputs=[node.name],
**attr)
......@@ -186,7 +187,7 @@ class TFOpMapper(OpMapper):
node.layer_name)
dtype = node.dtype
self.paddle_graph.add_layer(
kernel="fluid.data",
kernel="paddle.static.data",
inputs={},
outputs=[node.name],
dtype=string(dtype),
......@@ -197,30 +198,29 @@ class TFOpMapper(OpMapper):
shape = node.out_shapes[0]
dtype = node.dtype
value = node.value
initializer = "Constant(0.0)"
if len(shape) == 0:
assert value.size == 1, "Unexpected situation happend"
shape = [1]
if value == float('inf'):
value = "float('inf')"
self.paddle_graph.add_layer(
kernel="fluid.layers.fill_constant",
kernel="paddle.full",
inputs={},
outputs=[node.name],
dtype=string(dtype),
shape=[1],
value=value)
fill_value=value)
return
self.params[node.name] = node.value
self.paddle_graph.add_layer(
kernel="fluid.layers.create_parameter",
kernel="paddle.static.create_parameter",
inputs={},
outputs=[node.name],
dtype=string(dtype),
shape=shape,
name=string(node.name),
default_initializer=initializer)
default_initializer="paddle.nn.initializer.Constant(value=0.0)")
def Transpose(self, node):
input = self.graph.get_node(node.layer.input[0])
......@@ -229,7 +229,7 @@ class TFOpMapper(OpMapper):
perm = perm.value.tolist()
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[node.name],
perm=perm)
......@@ -245,10 +245,10 @@ class TFOpMapper(OpMapper):
else:
inputs["shape"] = dims.name
attr["dtype"] = string(input_value.dtype)
attr["value"] = input_value.value
attr["fill_value"] = input_value.value
self.paddle_graph.add_layer(
"fluid.layers.fill_constant",
"paddle.full",
inputs=inputs,
outputs=[node.name],
**attr)
......@@ -273,7 +273,7 @@ class TFOpMapper(OpMapper):
if data_format == "NHWC":
transpose_name = gen_name("depth_to_space", "transpose")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
......@@ -282,21 +282,21 @@ class TFOpMapper(OpMapper):
shape = [0, block_size * block_size, -1, h, w]
reshape_name = gen_name("depth_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
kernel="paddle.reshape",
inputs={"x": input_name},
outputs=[reshape_name],
shape=shape)
transpose_name = gen_name("depth_to_space", "transpose")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": reshape_name},
outputs=[transpose_name],
perm=[0, 2, 1, 3, 4])
reshape_name = gen_name("depth_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
kernel="paddle.reshape",
inputs={"x": transpose_name},
outputs=[reshape_name],
shape=[0, c, h, w])
......@@ -309,7 +309,7 @@ class TFOpMapper(OpMapper):
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
......@@ -353,7 +353,7 @@ class TFOpMapper(OpMapper):
if data_format == "NHWC":
transpose_name = gen_name("max_pool", "transpose")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
......@@ -362,17 +362,16 @@ class TFOpMapper(OpMapper):
input_name = transpose_name
self.paddle_graph.add_layer(
kernel="fluid.layers.pool2d",
inputs={"input": input_name},
kernel="paddle.nn.functional.max_pool2d",
inputs={"x": input_name},
outputs=[node.name],
pool_size=k_size[2:4],
pool_type=string("max"),
pool_stride=strides[2:4],
pool_padding=string(pad_mode))
kernel_size=k_size[2:4],
stride=strides[2:4],
padding=string(pad_mode))
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
......@@ -403,6 +402,13 @@ class TFOpMapper(OpMapper):
kernel_weight_name = kernel.name.replace('/', '_')
self.params[kernel_weight_name] = numpy.transpose(kernel_value,
(3, 2, 0, 1))
self.paddle_graph.add_layer(
kernel="paddle.static.nn.create_parameter",
inputs={},
outputs=[kernel_weight_name],
shape=self.params[kernel_weight_name].shape,
dtype=string(str(self.params[kernel_weight_name].dtype)),
name=string(kernel_weight_name))
input_name = input.name
if data_format == "NHWC":
......@@ -410,7 +416,7 @@ class TFOpMapper(OpMapper):
dilations = [dilations[i] for i in [0, 3, 1, 2]]
transpose_name = gen_name("conv2d", "transpose")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
......@@ -421,26 +427,23 @@ class TFOpMapper(OpMapper):
node.fluid_code.add_layer(
"reshape", inputs=input, output=input, param_attr=attr)
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
kernel="paddle.reshape",
inputs={"x": input_name},
outputs=[input_name],
shape=[0, k_size[2], 0, 0])
self.paddle_graph.add_layer(
kernel="fluid.layers.conv2d",
inputs={"input": input_name},
kernel="paddle.nn.functional.conv2d",
inputs={"x": input_name, "weight": kernel_weight_name},
outputs=[node.name],
bias_attr=False,
param_attr=string(kernel_weight_name),
num_filters=k_size[3],
filter_size=k_size[0:2],
bias=None,
stride=strides[2:4],
dilation=dilations[2:4],
padding=string(pad_mode))
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
......@@ -462,15 +465,6 @@ class TFOpMapper(OpMapper):
if kernel.layer_type == 'Const':
kernel_value = kernel.value
kernel_weight_name = kernel.name.replace('/', '_')
self.paddle_graph.add_layer(
kernel="paddle.static.nn.create_parameter",
inputs={},
outputs=[kernel_weight_name],
shape=self.params[kernel_weight_name].shape,
dtype=string(str(self.params[kernel_weight_name].dtype)),
name=string(kernel_weight_name))
self.params[kernel_weight_name] = numpy.transpose(kernel_value,
(4, 3, 0, 1, 2))
else:
kernel_value = self.decoder.infer_tensor(kernel, use_diff_inputs=False)
if kernel.layer_type == 'Split':
......@@ -478,12 +472,15 @@ class TFOpMapper(OpMapper):
kernel.name)
else:
kernel_weight_name = kernel.name.replace('/', '_')
self.params[kernel_weight_name] = numpy.transpose(kernel_value,
(4, 3, 0, 1, 2))
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": kernel_weight_name},
kernel="paddle.static.nn.create_parameter",
inputs={},
outputs=[kernel_weight_name],
perm=[4, 3, 0, 1, 2])
shape=self.params[kernel_weight_name].shape,
dtype=string(str(self.params[kernel_weight_name].dtype)),
name=string(kernel_weight_name))
input_name = input.name
if data_format == "NDHWC":
......@@ -507,9 +504,8 @@ class TFOpMapper(OpMapper):
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.conv3d",
inputs={"x": input_name},
inputs={"x": input_name, "weight": kernel_weight_name},
outputs=[node.name],
weight=kernel_weight_name,
bias=None,
stride=strides[2:5],
dilation=dilations[2:5],
......@@ -526,7 +522,7 @@ class TFOpMapper(OpMapper):
input = self.graph.get_node(node.layer.input[0])
bias = self.graph.get_node(node.layer.input[1])
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_add",
kernel="paddle.add",
inputs={"x": input.name,
"y": bias.name},
outputs=[node.name])
......@@ -548,30 +544,32 @@ class TFOpMapper(OpMapper):
if data_format == "NHWC":
transpose_name = gen_name("batch_norm", "transpose")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
input_name = transpose_name
self.paddle_graph.add_layer(
kernel="fluid.layers.batch_norm",
inputs={"input": input_name},
kernel="paddle.nn.functional.batch_norm",
inputs={"x": input_name,
"running_mean": moving_mean.name,
"running_var": moving_var.name,
"weight": gamma.name,
"bias": beta.name},
outputs=[node.name],
epsilon=node.get_attr("epsilon"),
param_attr=string(gamma.name),
bias_attr=string(beta.name),
moving_mean_name=string(moving_mean.name),
moving_variance_name=string(moving_var.name),
is_test=True)
epsilon=node.get_attr("epsilon"))
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def FusedBatchNormV3(self, node):
self.FusedBatchNorm(node)
def Mean(self, node):
input = self.graph.get_node(node.layer.input[0])
reduce_idx = self.graph.get_node(node.layer.input[1])
......@@ -614,48 +612,6 @@ class TFOpMapper(OpMapper):
inputs={"x": node.name},
outputs=[node.name],
shape=out_shape.tolist())
# input = self.graph.get_node(node.layer.input[0])
# param = self.graph.get_node(node.layer.input[1])
# input_name = input.name
# if input.dtype == 'bool':
# cast_name = gen_name('reshape', 'cast')
# self.paddle_graph.add_layer(
# kernel="fluid.layers.cast",
# inputs={"x": input_name},
# outputs=[cast_name],
# dtype="'int32'")
# input_name = cast_name
# if param.layer_type == "Const":
# shape = param.value.tolist()
# self.paddle_graph.add_layer(
# kernel="fluid.layers.reshape",
# inputs={"x": input_name},
# outputs=[node.name],
# shape=shape)
# else:
# self.paddle_graph.add_layer(
# kernel="fluid.layers.reshape",
# inputs={"x": input_name,
# "shape": param.name},
# outputs=[node.name])
# if param.layer_type != "Const":
# out_shape = numpy.array(node.out_shapes[0])
# if (out_shape > 0).any():
# out_shape[out_shape < 0] = 0
# self.paddle_graph.add_layer(
# kernel="fluid.layers.reshape",
# inputs={"x": node.name},
# outputs=[node.name],
# shape=out_shape.tolist())
# if input.dtype == 'bool':
# self.paddle_graph.add_layer(
# kernel="fluid.layers.cast",
# inputs={"x": node.name},
# outputs=[node.name],
# dtype="'bool'")
def Pad(self, node):
input = self.graph.get_node(node.layer.input[0])
......@@ -668,37 +624,33 @@ class TFOpMapper(OpMapper):
new_padding = paddings[2:6]
transpose_name = gen_name("pad", "transpose")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
self.paddle_graph.add_layer(
kernel="fluid.layers.pad2d",
inputs={"input": transpose_name},
kernel="paddle.nn.functional.pad",
inputs={"x": transpose_name},
outputs=[node.name],
paddings=new_padding)
pad=new_padding)
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
return
self.paddle_graph.add_layer(
kernel="fluid.layers.pad",
kernel="paddle.nn.functional.pad",
inputs={"x": input.name},
outputs=[node.name],
paddings=paddings)
pad=paddings)
def MirrorPad(self, node):
op_name = name_generator("pad", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
input = self.graph.get_input_node(node, 0)
paddings = self.graph.get_input_node(node, 1)
assert paddings.layer_type == "Const", "Padding should be Const"
paddings = np.flip(paddings.value, 0).flatten().tolist()
dim = int(len(paddings) / 2)
transpose_name = gen_name("pad", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
......@@ -706,9 +658,9 @@ class TFOpMapper(OpMapper):
outputs=[transpose_name],
perm=[0, 3, 1, 2])
self.paddle_graph.add_layer(
kernel="paddle.nn.Pad{}D".format(dim),
kernel="paddle.nn.functional.pad".format(dim),
inputs={"x": transpose_name},
outputs=layer_outputs,
outputs=[node.name],
pad=new_padding)
self.paddle_graph.add_layer(
kernel="paddle.transpose",
......@@ -717,22 +669,13 @@ class TFOpMapper(OpMapper):
perm=[0, 2, 3, 1])
def Squeeze(self, node):
input = self.graph.get_node(node.layer.input[0])
input = self.graph.get_input_node(node, 0)
squeeze_dims = node.get_attr('squeeze_dims')
self.paddle_graph.add_layer(
kernel="fluid.layers.squeeze",
inputs={"input": input.name},
outputs=[node.name],
axes=squeeze_dims)
def Softmax(self, node):
input = self.graph.get_node(node.layer.input[0])
axis = node.get_attr("axis")
self.paddle_graph.add_layer(
kernel="fluid.layers.softmax",
inputs={"input": input.name},
kernel="paddle.squeeze",
inputs={"x": input.name},
outputs=[node.name],
axis=axis)
axis=squeeze_dims)
def Shape(self, node):
input = self.graph.get_input_node(node, 0)
......@@ -762,12 +705,12 @@ class TFOpMapper(OpMapper):
outputs=[node.name])
def ArgMax(self, node):
input = self.graph.get_node(node.layer.input[0])
axis = self.graph.get_node(node.layer.input[1])
input = self.graph.get_input_node(node, 0)
axis = self.graph.get_input_node(node, 1)
assert axis.layer_type == "Const", "ArgMax only support Const parameter"
axis = axis.value
self.paddle_graph.add_layer(
kernel="fluid.layers.argmax",
kernel="paddle.argmax",
inputs={"x": input.name},
outputs=[node.name],
axis=axis)
......@@ -786,8 +729,8 @@ class TFOpMapper(OpMapper):
sorted=sort)
def MatMul(self, node):
x = self.graph.get_node(node.layer.input[0])
y = self.graph.get_node(node.layer.input[1])
x = self.graph.get_input_node(node, 0)
y = self.graph.get_input_node(node, 1)
transpose_a = node.get_attr('transpose_a')
transpose_b = node.get_attr('transpose_b')
if transpose_a is None:
......@@ -795,7 +738,7 @@ class TFOpMapper(OpMapper):
if transpose_b is None:
transpose_b = node.get_attr('adj_y')
self.paddle_graph.add_layer(
kernel="fluid.layers.matmul",
kernel="paddle.matmul",
inputs={"x": x.name,
"y": y.name},
outputs=[node.name],
......@@ -820,8 +763,11 @@ class TFOpMapper(OpMapper):
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode()
self.params[kernel.layer_name.replace(
'/', '_')] = numpy.transpose(kernel.value, (2, 3, 0, 1))
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": kernel.name},
outputs=[kernel.name],
perm=[2, 3, 0, 1])
input_name = input.name
if data_format == "NHWC":
......@@ -830,34 +776,32 @@ class TFOpMapper(OpMapper):
dilations = [dilations[i] for i in [0, 3, 1, 2]]
transpose_name = gen_name('depthwise_conv2d', 'transpose')
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
input_name = transpose_name
self.paddle_graph.add_layer(
kernel="fluid.layers.conv2d",
inputs={"input": input_name},
kernel="paddle.nn.functional.conv2d",
inputs={"x": input_name,
"weight": kernel.name},
outputs=[node.name],
num_filters=in_shape[1],
filter_size=k_size[0:2],
stride=strides[2:4],
dilation=dilations[2:4],
groups=k_size[3] * in_shape[1],
padding=string(pad_mode),
param_attr=string(kernel.layer_name),
bias_attr=False)
bias=None)
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def AvgPool(self, node):
input = self.graph.get_node(node.layer.input[0])
input = self.graph.get_input_node(node, 0)
k_size = node.get_attr("ksize")
strides = node.get_attr("strides")
......@@ -868,7 +812,7 @@ class TFOpMapper(OpMapper):
if data_format == "NHWC":
transpose_name = gen_name("avg_pool", "transpose")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
......@@ -876,6 +820,8 @@ class TFOpMapper(OpMapper):
k_size = [k_size[i] for i in [0, 3, 1, 2]]
input_name = transpose_name
# TODO(syf): The op has diff.
self.paddle_graph.add_layer(
kernel="fluid.layers.pool2d",
inputs={"input": input_name},
......@@ -887,29 +833,31 @@ class TFOpMapper(OpMapper):
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def Pack(self, node):
inputs = [self.graph.get_node(name) for name in node.layer.input]
input_names = [i.name for i in inputs]
inputs_list = list()
for i in range(len(node.inputs)):
inputs_list.append(self.graph.get_input_node(node, i))
input_names = [i.name for i in inputs_list]
axis = node.get_attr("axis")
self.paddle_graph.add_layer(
kernel="fluid.layers.stack",
kernel="paddle.stack",
inputs={"x": input_names},
outputs=[node.name],
axis=axis)
if len(node.out_shapes[0]) == 1:
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
kernel="paddle.reshape",
inputs={"x": node.name},
outputs=[node.name],
shape=[-1])
def Unpack(self, node):
input = self.graph.get_node(node.layer.input[0])
input = self.graph.get_input_node(node, 0)
axis = node.get_attr("axis")
num = node.get_attr("num")
shape = input.out_shapes[0]
......@@ -917,10 +865,10 @@ class TFOpMapper(OpMapper):
if len(shape) == 1:
if shape[0] > 0 and num == shape[0]:
self.paddle_graph.add_layer(
kernel="fluid.layers.unsqueeze",
inputs={"input": input.name},
kernel="paddle.unsqueeze",
inputs={"x": input.name},
outputs=[node.name],
axes=[0])
axis=[0])
input_name = node.name
axis = 1
else:
......@@ -929,41 +877,45 @@ class TFOpMapper(OpMapper):
if len(layer_outputs) == 1:
layer_outputs[0] = "[{}]".format(node.layer_name)
self.paddle_graph.add_layer(
kernel="fluid.layers.unstack",
kernel="paddle.unstack",
inputs={"x": input_name},
outputs=layer_outputs,
axis=axis,
num=num)
def ConcatV2(self, node):
inputs = [self.graph.get_node(name) for name in node.layer.input[:-1]]
axis = self.graph.get_node(node.layer.input[-1])
inputs_list = list()
for i in range(len(node.inputs) - 1):
inputs_list.append(self.graph.get_input_node(node, i))
axis = self.graph.get_input_node(node, -1)
assert axis.layer_type == "Const", "axis for ConcatV2 must be type Const"
axis = axis.value
if axis < 0:
axis += len(inputs[0].out_shapes[0])
axis += len(inputs_list[0].out_shapes[0])
input_names = [i.name for i in inputs]
for i, ipt in enumerate(inputs):
if ipt.dtype == 'bool':
cast_name = gen_name('concat', 'cast')
self.paddle_graph.add_layer(
kernel="fluid.layers.cast",
inputs={"x": ipt.name},
outputs=[cast_name],
dtype="'int32'")
input_names[i] = cast_name
input_names = [i.name for i in inputs_list]
self.paddle_graph.add_layer(
kernel="fluid.layers.concat",
inputs={"input": input_names},
kernel="paddle.concat",
inputs={"x": input_names},
outputs=[node.name],
axis=axis)
if node.dtype == 'bool':
def Concat(self, node):
inputs_list = list()
for i in range(1, len(node.inputs)):
inputs_list.append(self.graph.get_input_node(node, i))
axis = self.graph.get_input_node(node, 0)
assert axis.layer_type == "Const", "axis for ConcatV2 must be type Const"
axis = axis.value
if axis < 0:
axis += len(inputs_list[0].out_shapes[0])
input_names = [i.name for i in inputs_list]
self.paddle_graph.add_layer(
kernel="fluid.layers.cast",
inputs={"x": node.name},
kernel="paddle.concat",
inputs={"x": input_names},
outputs=[node.name],
dtype="'bool'")
axis=axis)
def AddN(self, node):
inputs_list = list()
......@@ -977,10 +929,10 @@ class TFOpMapper(OpMapper):
outputs=[node.name])
def StridedSlice(self, node):
input = self.graph.get_node(node.layer.input[0])
begin = self.graph.get_node(node.layer.input[1])
end = self.graph.get_node(node.layer.input[2])
strides = self.graph.get_node(node.layer.input[3])
input = self.graph.get_input_node(node, 0)
begin = self.graph.get_input_node(node, 1)
end = self.graph.get_input_node(node, 2)
strides = self.graph.get_input_node(node, 3)
if strides.layer_type == "Const":
strides = strides.value.tolist()
......@@ -1043,28 +995,43 @@ class TFOpMapper(OpMapper):
else:
new_end.append(end[i])
if input.dtype == "bool":
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": input.name},
outputs=[input.name],
dtype=string("int32"))
self.paddle_graph.add_layer(
kernel="fluid.layers.slice",
kernel="paddle.slice",
inputs={"input": input.name},
outputs=[node.name],
axes=[i for i in range(len(new_begin))],
starts=new_begin,
ends=new_end)
if input.dtype == "bool":
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": node.name},
outputs=[node.name],
dtype=string("bool"))
if len(new_axes) > 0:
self.paddle_graph.add_layer(
kernel="fluid.layers.unsqueeze",
inputs={"input": node.name},
kernel="paddle.unsqueeze",
inputs={"x": node.name},
outputs=[node.name],
axes=new_axes)
axis=new_axes)
if len(shrink_axes) > 0:
if len(input.out_shapes[0]) + len(new_axes) <= 1:
pass
else:
self.paddle_graph.add_layer(
kernel="fluid.layers.squeeze",
inputs={"input": node.name},
kernel="paddle.squeeze",
inputs={"x": node.name},
outputs=[node.name],
axes=shrink_axes)
axis=shrink_axes)
def Prod(self, node):
input = self.graph.get_input_node(node, 0)
......@@ -1081,25 +1048,25 @@ class TFOpMapper(OpMapper):
axis=axis)
def Split(self, node):
dim = self.graph.get_node(node.layer.input[0])
input = self.graph.get_node(node.layer.input[1])
dim = self.graph.get_input_node(node, 0)
input = self.graph.get_input_node(node, 1)
assert dim.layer_type == "Const"
num_split = node.get_attr('num_split')
dim = dim.value
self.paddle_graph.add_layer(
kernel="fluid.layers.split",
inputs={"input": input.name},
kernel="paddle.split",
inputs={"x": input.name},
outputs=[
"{}_p{}".format(node.layer_name, i) for i in range(num_split)
],
num_or_sections=num_split,
dim=dim)
axis=dim)
def Slice(self, node):
input = self.graph.get_node(node.layer.input[0])
begin = self.graph.get_node(node.layer.input[1])
size = self.graph.get_node(node.layer.input[2])
input = self.graph.get_input_node(node, 0)
begin = self.graph.get_input_node(node, 1)
size = self.graph.get_input_node(node, 2)
inputs = {"x": input.name}
attrs = {}
......@@ -1124,143 +1091,147 @@ class TFOpMapper(OpMapper):
shape = size.out_shapes[0]
reshape_name = gen_name("slice", "reshape")
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
kernel="paddle.reshape",
inputs={"x": size.name},
outputs=[reshape_name],
shape=shape)
inputs['shape'] = reshape_name
self.paddle_graph.add_layer(
kernel="fluid.layers.crop_tensor",
kernel="paddle.crop",
inputs=inputs,
outputs=[node.name],
**attrs)
def ResizeNearestNeighbor(self, node):
input = self.graph.get_node(node.layer.input[0])
resize_shape = self.graph.get_node(node.layer.input[1])
input = self.graph.get_input_node(node, 0)
resize_shape = self.graph.get_input_node(node, 1)
data_format = "NHWC"
inputs = {"input": input.name}
attrs = {"align_corners": node.get_attr("align_corners")}
inputs = {"x": input.name}
attrs = {"align_corners": node.get_attr("align_corners"),
"mode": string("nearest"),
"align_mode": 1}
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
attrs["out_shape"] = resize_shape
attrs["size"] = resize_shape
else:
shape = resize_shape.out_shapes[0]
reshape_name = gen_name("resize_nearest", "reshape")
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
kernel="paddle.reshape",
inputs={"x": resize_shape.name},
outputs=[reshape_name],
shape=shape)
inputs["out_shape"] = reshape_name
inputs["size"] = reshape_name
if data_format == "NHWC":
transpose_name = gen_name("resize_nearest", "reshape")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
inputs["input"] = transpose_name
inputs["x"] = transpose_name
self.paddle_graph.add_layer(
kernel="fluid.layers.resize_nearest",
kernel="paddle.nn.functional.interpolate",
inputs=inputs,
outputs=[node.name],
**attrs)
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def ResizeBilinear(self, node):
input = self.graph.get_node(node.layer.input[0])
resize_shape = self.graph.get_node(node.layer.input[1])
input = self.graph.get_input_node(node, 0)
resize_shape = self.graph.get_input_node(node, 1)
data_format = "NHWC"
inputs = {"input": input.name}
attrs = {"align_corners": node.get_attr("align_corners")}
inputs = {"x": input.name}
attrs = {"align_corners": node.get_attr("align_corners"),
"mode": string("bilinear"),
"align_mode": 1}
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
attrs["out_shape"] = resize_shape
attrs["size"] = resize_shape
else:
shape = resize_shape.out_shapes[0]
reshape_name = gen_name("resize_bilinear", "reshape")
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
kernel="paddle.reshape",
inputs={"x": resize_shape.name},
outputs=[reshape_name],
shape=shape)
inputs["out_shape"] = reshape_name
inputs["size"] = reshape_name
if data_format == "NHWC":
transpose_name = gen_name("resize_bilinear", "reshape")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
inputs["input"] = transpose_name
inputs["x"] = transpose_name
self.paddle_graph.add_layer(
kernel="fluid.layers.resize_bilinear",
kernel="paddle.nn.functional.interpolate",
inputs=inputs,
outputs=[node.name],
**attrs)
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def Cast(self, node):
input = self.graph.get_node(node.layer.input[0])
input = self.graph.get_input_node(node, 0)
dtype = node.dtype
self.paddle_graph.add_layer(
kernel="fluid.layers.cast",
kernel="paddle.cast",
inputs={"x": input.name},
outputs=[node.name],
dtype=string(dtype))
def Sum(self, node):
input = self.graph.get_node(node.layer.input[0])
reduce_idx = self.graph.get_node(node.layer.input[1])
input = self.graph.get_input_node(node, 0)
reduce_idx = self.graph.get_input_node(node, 1)
assert reduce_idx.layer_type == "Const", "Only support Const parameter[reduce_idx]"
keep_dims = node.get_attr("keep_dims")
dim = reduce_idx.value.tolist()
self.paddle_graph.add_layer(
kernel="fluid.layers.reduce_sum",
inputs={"input": input.name},
kernel="paddle.sum",
inputs={"x": input.name},
outputs=[node.name],
dim=dim,
keep_dim=keep_dims)
axis=dim,
keepdim=keep_dims)
def Max(self, node):
input = self.graph.get_node(node.layer.input[0])
reduce_idx = self.graph.get_node(node.layer.input[1])
input = self.graph.get_input_node(node, 0)
reduce_idx = self.graph.get_input_node(node, 1)
assert reduce_idx.layer_type == "Const", "Only support Const parameter[reduce_idx]"
keep_dims = node.get_attr("keep_dims")
dim = reduce_idx.value.tolist()
self.paddle_graph.add_layer(
kernel="fluid.layers.reduce_max",
inputs={"input": input.name},
kernel="paddle.max",
inputs={"x": input.name},
outputs=[node.name],
dim=dim,
keep_dim=keep_dims)
axis=dim,
keepdim=keep_dims)
def RandomUniform(self, node):
shape = self.graph.get_node(node.layer.input[0])
shape = self.graph.get_input_node(node, 0)
if shape.layer_type == "Const":
shape = shape.value.tolist()
self.paddle_graph.add_layer(
kernel="fluid.layers.uniform_random",
kernel="paddle.uniform",
inputs={},
outputs=[node.name],
shape=shape,
......@@ -1268,16 +1239,16 @@ class TFOpMapper(OpMapper):
max=0.9999)
else:
self.paddle_graph.add_layer(
kernel="fluid.layers.uniform_random",
kernel="paddle.uniform",
inputs={'shape': shape.name},
outputs=[node.name],
min=0.0,
max=0.9999)
def Conv2DBackpropInput(self, node):
out_shape = self.graph.get_node(node.layer.input[0])
kernel = self.graph.get_node(node.layer.input[1])
input = self.graph.get_node(node.layer.input[2])
out_shape = self.graph.get_input_node(node, 0)
kernel = self.graph.get_input_node(node, 1)
input = self.graph.get_input_node(node, 2)
assert kernel.layer_type == "Const", "Kernel of Conv2DBackpropInput should be Const"
......@@ -1292,15 +1263,15 @@ class TFOpMapper(OpMapper):
in_shape = self.decoder.infer_tensor(input, use_diff_inputs=False).shape
k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2:
k_size = self.decoder.infer_tensor(input, use_diff_inputs=False).shape
k_size = self.decoder.infer_tensor(kernel, use_diff_inputs=False).shape
pad_mode = node.get_attr("padding").decode()
strides = node.get_attr("strides")
dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode()
self.params[kernel.layer_name.replace(
'/', '_')] = numpy.transpose(kernel.value, (3, 2, 0, 1))
kernel_name = node.name + ".weight"
self.params[kernel_name] = numpy.transpose(kernel.value, (3, 2, 0, 1))
input_name = input.name
if data_format == "NHWC":
......@@ -1309,20 +1280,26 @@ class TFOpMapper(OpMapper):
dilations = [dilations[i] for i in [0, 3, 1, 2]]
transpose_name = gen_name("conv2dbackpropinput", "transpose")
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
input_name = transpose_name
self.paddle_graph.add_layer(
kernel="fluid.layers.conv2d_transpose",
inputs={"input": input_name},
kernel="paddle.static.create_parameter",
inputs={},
outputs=["{}_{}".format(node.name, kernel_name).replace(".", "_")],
dtype=string(str(self.params[kernel_name].dtype)),
shape=self.params[kernel_name].shape,
name=string(kernel_name))
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.conv2d_transpose",
inputs={"x": input_name,
"weight": "{}_{}".format(node.name, kernel_name).replace(".", "_")},
outputs=[node.name],
bias_attr=False,
param_attr=string(kernel.layer_name),
num_filters=k_size[2],
filter_size=k_size[0:2],
bias=None,
stride=strides[2:4],
dilation=dilations[2:4],
padding=string(pad_mode),
......@@ -1330,7 +1307,7 @@ class TFOpMapper(OpMapper):
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="fluid.layers.transpose",
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
......@@ -1403,11 +1380,12 @@ class TFOpMapper(OpMapper):
shape=node.out_shapes[0])
def SquaredDifference(self, node):
x = self.graph.get_node(node.layer.input[0])
y = self.graph.get_node(node.layer.input[1])
x = self.graph.get_input_node(node, 0)
y = self.graph.get_input_node(node, 1)
inputs = {"x": x.name, "y": y.name}
x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0]
# TODO(syf)
layer_id = self.paddle_graph.add_layer(
"fluid.layers.elementwise_sub", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
......@@ -1416,14 +1394,14 @@ class TFOpMapper(OpMapper):
x_shape = node.out_shapes[0]
y_shape = node.out_shapes[0]
layer_id = self.paddle_graph.add_layer(
"fluid.layers.elementwise_mul", inputs=inputs, outputs=[node.name])
"paddle.multiply", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape}
def OneHot(self, node):
input = self.graph.get_node(node.layer.input[0])
depth = self.graph.get_node(node.layer.input[1])
on_value = self.graph.get_node(node.layer.input[2])
off_value = self.graph.get_node(node.layer.input[3])
input = self.graph.get_input_node(node, 0)
depth = self.graph.get_input_node(node, 1)
on_value = self.graph.get_input_node(node, 2)
off_value = self.graph.get_input_node(node, 3)
assert depth.layer_type == 'Const', 'Parameter depth should be Const in OneHot'
assert on_value.layer_type == 'Const', 'Parameter on_value should be Const in OneHot'
assert off_value.layer_type == 'Const', 'Parameter off_value should be Const in OneHot'
......@@ -1437,73 +1415,72 @@ class TFOpMapper(OpMapper):
0.0) < 1e-06, "off_value should be 0 in OneHot"
self.paddle_graph.add_layer(
"fluid.one_hot",
inputs={"input": input.name},
"paddle.nn.functional.one_hot",
inputs={"x": input.name},
outputs=[node.name],
depth=depth.value)
num_classes=depth.value)
def Pow(self, node):
x = self.graph.get_node(node.layer.input[0])
factor = self.graph.get_node(node.layer.input[1])
x = self.graph.get_input_node(node, 0)
factor = self.graph.get_input_node(node, 1)
inputs = {"x": x.name}
attr = dict()
if factor.layer_type == 'Const':
attr["factor"] = factor.value.tolist()
attr["y"] = factor.value.tolist()
else:
inputs["factor"] = factor.name
inputs["y"] = factor.name
self.paddle_graph.add_layer(
"fluid.layers.pow", inputs=inputs, outputs=[node.name], **attr)
"paddle.pow", inputs=inputs, outputs=[node.name], **attr)
def All(self, node):
input = self.graph.get_node(node.layer.input[0])
reduce_idx = self.graph.get_node(node.layer.input[1])
input = self.graph.get_input_node(node, 0)
reduce_idx = self.graph.get_input_node(node, 1)
assert reduce_idx.layer_type == "Const", "Only support Const parameter[reduce_idx]"
attr = dict()
attr["dim"] = reduce_idx.value.tolist()
attr["keep_dim"] = node.get_attr("keep_dims")
attr["axis"] = reduce_idx.value.tolist()
attr["keepdim"] = node.get_attr("keep_dims")
input_name = input.name
if input.dtype != "bool":
input_name = gen_name("all", "cast")
self.paddle_graph.add_layer(
"fluid.layers.cast",
"paddle.cast",
inputs={"x": input.name},
outputs=[input_name],
dtype=string("bool"))
self.paddle_graph.add_layer(
"fluid.layers.reduce_all",
inputs={"input": input_name},
"paddle.all",
inputs={"x": input_name},
outputs=[node.name],
**attr)
node.layer.attr['dtype'].type = 10
def GatherV2(self, node):
embeddings = self.graph.get_node(node.layer.input[0])
index = self.graph.get_node(node.layer.input[1])
axis = self.graph.get_node(node.layer.input[2])
embeddings = self.graph.get_input_node(node, 0)
index = self.graph.get_input_node(node, 1)
axis = self.graph.get_input_node(node, 2)
assert axis.layer_type == 'Const', "Only support Const parameter[axis]"
axis = axis.value
assert axis == 0, "Only support axis=0 in GatherV2 OP"
index_name = index.name
if len(index.out_shapes[0]) != 1:
reshape_name = gen_name("gather", "reshape")
index_name = reshape_name
self.paddle_graph.add_layer(
"fluid.layers.reshape",
"paddle.reshape",
inputs={"x": index.name},
outputs=[reshape_name],
shape=[-1])
inputs = {'input': embeddings.name, 'index': index_name}
inputs = {'x': embeddings.name, 'index': index_name}
self.paddle_graph.add_layer(
"fluid.layers.gather",
"paddle.gather",
inputs=inputs,
outputs=[node.name],
overwrite=False)
axis=axis)
if len(index.out_shapes[0]) != 1:
out_shape = node.out_shapes[0]
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
kernel="paddle.reshape",
inputs={"x": node.name},
outputs=[node.name],
shape=out_shape)
......@@ -1518,19 +1495,19 @@ class TFOpMapper(OpMapper):
outputs=[node.name])
def ExpandDims(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
inputs = {"input": x.name}
x = self.graph.get_input_node(node, 0, copy=True)
y = self.graph.get_input_node(node, 1, copy=True)
inputs = {"x": x.name}
attr = dict()
if y.layer_type == 'Const':
dim = y.value.tolist()
if not isinstance(dim, list):
dim = [dim]
attr['axes'] = dim
attr['axis'] = dim
else:
inputs['axes'] = y.name
inputs['axis'] = y.name
self.paddle_graph.add_layer(
"fluid.layers.unsqueeze",
"paddle.unsqueeze",
inputs=inputs,
outputs=[node.name],
**attr)
......@@ -201,6 +201,7 @@ class HierarchicalTree(Tree):
code_str = gen_layer_code(self.pd_graph, sub_layers, module_name,
different_attrs=diff_attrs_column)
# print(code_str)
self.codes.append(code_str)
for sub_layers in sub_layers_list:
inputs, outputs = get_inputs_outputs(self.pd_graph, sub_layers)
......@@ -358,7 +359,7 @@ class HierarchicalTree(Tree):
run_func_list.append(" # {}: 形状为{},类型为{}。".format(k, v[0], v[1]))
run_func_list.extend(
[" paddle.disable_static()",
" params = paddle.load('{}/model.pdparams')".format(osp.abspath(save_dir)),
" params, _ = fluid.load_dygraph('{}/model')".format(save_dir),
" model = {}()".format(self.pd_graph.name),
" model.set_dict(params)",
" model.eval()",
......@@ -370,12 +371,7 @@ class HierarchicalTree(Tree):
self.update_parameters()
import_list = ["import paddle",
"import paddle.fluid as fluid",
"from paddle.fluid.initializer import Constant",
"from paddle.fluid.param_attr import ParamAttr",
"import math",
"from x2paddle.op_mapper.dygraph.pytorch2paddle " + \
"import pytorch_custom_layer as x2paddle_nn"
"\n",]
"",]
import_str = "\n".join(import_list)
if not osp.exists(save_dir):
os.makedirs(save_dir)
......
......@@ -29,9 +29,9 @@ NN_KERNEL_NAME = {"paddle.nn.BatchNorm": "bn",
"paddle.nn.Tanh": "tanh",
"paddle.nn.AvgPool2D": "pool",
"paddle.nn.MaxPool2D": "pool",
"paddle.nn.Pad1D": "pad",
"paddle.nn.Pad2D": "pad",
"paddle.nn.Pad3D": "pad",
"paddle.nn.Pad1d": "pad",
"paddle.nn.Pad2d": "pad",
"paddle.nn.Pad3d": "pad",
"paddle.nn.Dropout": "dropout",
"paddle.nn.GELU": "gelu",
"paddle.nn.Hardtanh": "tanh",
......@@ -175,11 +175,9 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()):
if layer.kernel.startswith("paddle.nn") and index == 0:
continue
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert":
or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
elif output_name not in outputs:
outputs.append(output_name)
continue
......@@ -189,22 +187,15 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()):
if layer.kernel.startswith("paddle.nn") and index == 0 and "functional" not in layer.kernel:
continue
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert":
or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
else:
outputs.append(output_name)
no_output_count = 0
for i, (layer_id, layer) in enumerate(sub_layers.items()):
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel) or \
layer.kernel.startswith("custom_layer"):
line = "self.{}".format(layer.outputs[0])
if layer.kernel.startswith("custom_layer"):
line += "= x2paddle_nn.{}(".format(layer.kernel.split(":")[-1])
else:
line += " = {}(".format(layer.kernel)
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel):
line = "self.{} = {}(".format(layer.outputs[0], layer.kernel)
for k, v in layer.attrs.items():
key_name = "{}_{}".format(layer.outputs[0], k)
if key_name in different_attrs:
......@@ -298,9 +289,6 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()):
else:
if v not in cur_outputs and v not in inputs:
inputs.append(v)
if k == "args":
line += v
else:
line += "{}={}, ".format(k, v)
for k, v in layer.attrs.items():
key_name = "{}_{}".format(layer.outputs[0], k)
......
......@@ -50,25 +50,21 @@ def get_inputs_outputs(pd_graph, layers):
for layer_id, layer in layers.items():
# 获取输出节点名字
if layer_id not in pd_graph.edges_out:
for index, output_name in enumerate(layer.outputs):
for output_name in layer.outputs:
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert":
or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
elif output_name not in outputs:
outputs.append(output_name)
else:
for out_layer_id in pd_graph.edges_out[layer_id]:
if out_layer_id not in layer_ids:
for index, output_name in enumerate(layer.outputs):
for output_name in layer.outputs:
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert":
or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
else:
outputs.append(output_name)
# 获取输入节点名字
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .transpose_elimination import StaticTransposeElimination
from .transpose_eliminate_pass import StaticTransposeEliminatePass
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.elimination.static import StaticTransposeElimination
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class StaticTransposeEliminatePass(Pass):
name = "static_transpose_eliminate_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = StaticTransposeElimination()
fuser.operate(graph)
# 用于注册
static_transpose_eliminate_pass = StaticTransposeEliminatePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import sys
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class StaticTransposeElimination(FuseBase):
def __init__(self):
super(StaticTransposeElimination, self).__init__(graph_type="static")
self.direct_layers = [
'paddle.nn.functional.relu', 'paddle.nn.functional.relu6', 'paddle.abs',
'paddle.nn.functional.sigmoid', 'paddle.exp', 'paddle.rsqrt',
'paddle.nn.functional.swish', 'paddle.tanh',
'paddle.nn.functional.softplus', 'paddle.nn.functional.leaky_relu',
'paddle.floor', 'paddle.erf', 'paddle.square'
]
self.elementwise_layers = [
'paddle.add', 'fluid.layers.elementwise_sub',
'paddle.multiply', 'paddle.divide'
]
self.reduce_layers = [
'paddle.mean', 'paddle.all',
'paddle.max', 'paddle.any',
'paddle.sum', 'paddle.prod'
]
def get_transpose_num(self, graph):
count = 0
for layer_id, layer in graph.layers.items():
if layer.kernel == "paddle.transpose":
count += 1
return count
def operate(self, graph):
total_layer_num = len(graph.layers)
scanned_layers = set()
optimized_transpose_layers = list()
optimized_reduce_layers = list()
optimized_concat_layers = list()
optimized_elementwise_layers = list()
def get_index(layer):
if layer.kernel.startswith("paddle.nn") and "functional" not in layer.kernel:
return 1
else:
return 0
def strip_transpose(_graph):
layers = copy.deepcopy(_graph.layers)
for layer_id, layer in layers.items():
if layer_id in scanned_layers:
continue
scanned_layers.add(layer_id)
percent = round(len(scanned_layers) / total_layer_num * 100, 2)
sys.stderr.write("\rOptimize Transpose Layers...{}%".format(
percent))
if layer.kernel != "paddle.transpose":
continue
if layer.attrs["perm"] != [0, 2, 3, 1]:
continue
transpose_layers = list()
propagate_layers = list()
reduce_layers = list()
concat_layers = list()
# 此elementwise_layers专用于存储shape(4) + shape(1)的形式layer
elementwise_layers = list()
can_be_optimized = True
for out in _graph.edges_out.get(layer_id, []):
if _graph.layers[out].kernel == "paddle.transpose":
if _graph.layers[out].attrs["perm"] != [0, 3, 1, 2]:
can_be_optimized = False
break
transpose_layers.append(out)
elif _graph.layers[out].kernel in self.elementwise_layers:
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.direct_layers:
ouput_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[ouput_index] in _graph.outputs:
can_be_optimized = False
break
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.reduce_layers:
ouput_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[ouput_index] in _graph.outputs:
can_be_optimized = False
break
if not _graph.layers[out].attrs.get('keepdim', False):
can_be_optimized = False
break
propagate_layers.append(out)
reduce_layers.append(out)
elif _graph.layers[out].kernel == "paddle.concat":
ouput_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[ouput_index] in _graph.outputs:
can_be_optimized = False
break
propagate_layers.append(out)
concat_layers.append(out)
else:
can_be_optimized = False
break
visited_layers = set()
while len(propagate_layers) > 0 and can_be_optimized:
current_id = propagate_layers.pop(0)
visited_layers.add(current_id)
for out in _graph.edges_out.get(current_id, []):
if _graph.layers[
out].kernel == "paddle.transpose":
if _graph.layers[out].attrs["perm"] != [0, 3, 1, 2]:
can_be_optimized = False
break
transpose_layers.append(out)
elif _graph.layers[
out].kernel in self.elementwise_layers:
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.direct_layers:
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
elif _graph.layers[out].kernel in self.reduce_layers:
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if not _graph.layers[out].attrs.get('keepdim',
False):
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
reduce_layers.append(out)
elif _graph.layers[out].kernel == "paddle.concat":
output_index = get_index(_graph.layers[out])
if _graph.layers[out].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if out not in visited_layers:
propagate_layers.append(out)
concat_layers.append(out)
else:
can_be_optimized = False
break
for ipt in _graph.edges_in.get(current_id, []):
if _graph.layers[
current_id].kernel in self.elementwise_layers:
try:
x_shape = _graph.layers[
current_id].input_shapes['x']
y_shape = _graph.layers[
current_id].input_shapes['y']
output_index = get_index(_graph.layers[ipt])
if _graph.layers[ipt].outputs[
output_index] == _graph.layers[current_id].inputs[
'x']:
if len(x_shape) <= 1:
elementwise_layers.append(current_id)
continue
elif _graph.layers[ipt].outputs[
output_index] == _graph.layers[current_id].inputs[
'y']:
if len(y_shape) <= 1:
elementwise_layers.append(current_id)
continue
else:
raise Exception(
"Unexcepted situation happend while optimizing transpose"
)
except Exception as e:
can_be_optimized = False
break
output_index = get_index(_graph.layers[ipt])
if _graph.layers[
ipt].kernel == "paddle.transpose":
if _graph.layers[ipt].attrs["perm"] != [0, 2, 3, 1]:
can_be_optimized = False
break
if ipt not in visited_layers:
transpose_layers.append(ipt)
elif _graph.layers[
ipt].kernel in self.elementwise_layers:
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
elif _graph.layers[ipt].kernel in self.direct_layers:
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
elif _graph.layers[ipt].kernel in self.reduce_layers:
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if not _graph.layers[ipt].attrs.get('keepdim',
False):
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
reduce_layers.append(ipt)
elif _graph.layers[ipt].kernel == "paddle.concat":
if _graph.layers[ipt].outputs[output_index] in _graph.outputs:
can_be_optimized = False
break
if ipt not in visited_layers:
propagate_layers.append(ipt)
concat_layers.append(ipt)
else:
can_be_optimized = False
break
if not can_be_optimized:
break
if not can_be_optimized:
continue
transpose_layers.append(layer_id)
transpose_layers = list(set(transpose_layers))
for l in transpose_layers:
output_index = get_index(graph.layers[l])
if graph.layers[l].outputs[output_index] in graph.outputs:
can_be_optimized = False
break
if not can_be_optimized:
continue
for l in transpose_layers:
_graph.del_layer(l)
optimized_transpose_layers.extend(transpose_layers)
optimized_reduce_layers.extend(reduce_layers)
optimized_concat_layers.extend(concat_layers)
optimized_elementwise_layers.extend(elementwise_layers)
return True
return False
before_transpose_num = self.get_transpose_num(graph)
opt_graph = copy.deepcopy(graph)
total_layer_num = len(opt_graph.layers)
while strip_transpose(opt_graph):
pass
for layer_id in list(set(optimized_transpose_layers)):
graph.del_layer(layer_id)
for layer_id in list(set(optimized_reduce_layers)):
dim = graph.layers[layer_id].attrs.get('axis', None)
if dim is not None:
for i in range(len(dim)):
dim[i] = [0, 2, 3, 1][dim[i]]
graph.layers[layer_id].attrs['axis'] = dim
for layer_id in list(set(optimized_concat_layers)):
axis = graph.layers[layer_id].attrs.get('axis', 0)
graph.layers[layer_id].attrs['axis'] = [0, 2, 3, 1][axis]
for layer_id in list(set(optimized_elementwise_layers)):
axis = graph.layers[layer_id].attrs.get('axis', -1)
graph.layers[layer_id].attrs['axis'] = [0, 2, 3, 1][axis]
if graph.layers[layer_id].kernel == "paddle.add":
graph.layers[layer_id].kernel = "fluid.layers.elementwise_add"
current_transpose_num = self.get_transpose_num(graph)
print(
"\nTranspose layers optimized, before: transpose_num={}, after: transpose_num={}".
format(before_transpose_num, current_transpose_num))
......@@ -105,10 +105,6 @@ class DygraphConv2DAddFuser(FuseBase):
if layer.kernel == "paddle.nn.Conv2D":
conv_id = layer_id
for layer_id, layer in matches.items():
if layer.kernel == "paddle.nn.functional.conv2d_transpose":
layer.bias = bias_name
if not is_transpose:
layer.outputs[0] = output_name
if layer.kernel == "paddle.nn.Conv2D":
layer.attrs["bias_attr"] = bias_name
if not is_transpose:
......
......@@ -14,3 +14,10 @@
from .bn_scale_fuser import Static_BNScaleFuser
from .bn_scale_fuse_pass import Static_BNScaleFusePass
from .conv2d_add_fuser import StaticConv2DAddFuser
from .conv2d_add_fuse_pass import StaticConv2DAddFusePass
from .prelu_fuser import StaticPReLUFuser
from .prelu_fuse_pass import StaticPReLUFusePass
from .tf_batchnorm_fuser import StaticTFBatchNormFuser
from .tf_batchnorm_fuse_pass import StaticTFBatchNormFusePass
......@@ -79,7 +79,6 @@ class Static_BNScaleFuser(FuseBase):
graph.layers[new_layer_id] = new_layer
matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches):
layers_id = list(matches.keys())
layer = matches[layers_id[0]]
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.static import StaticConv2DAddFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class StaticConv2DAddFusePass(Pass):
name = "static_conv2d_add_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = StaticConv2DAddFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
static_conv2d_add_fuse_pass = StaticConv2DAddFusePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class StaticConv2DAddFuser(FuseBase):
def __init__(self):
super(StaticConv2DAddFuser, self).__init__(graph_type="static")
self.patterns = list()
def build_pattern(self):
""" 描述需要替换的conv2d+add图结构。
conv2d+add层模式python实现代码示例:
模式一:
MobilenetV1_Logits_Conv2d_1c_1x1_biases = paddle.static.create_parameter(dtype='float32', shape=[1001], name='MobilenetV1_Logits_Conv2d_1c_1x1_biases', default_initializer=paddle.nn.initializer.Constant(value=0.0))
conv2d_transpose_14 = paddle.transpose(x=MobilenetV1_Logits_AvgPool_1a_AvgPool, perm=[0, 3, 1, 2])
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.nn.functional.conv2d(x=conv2d_transpose_14, weight=MobilenetV1_Logits_Conv2d_1c_1x1_weights, bias=None, stride=[1, 1], dilation=[1, 1], padding='SAME')
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.transpose(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, perm=[0, 2, 3, 1])
MobilenetV1_Logits_Conv2d_1c_1x1_BiasAdd = paddle.add(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, y=MobilenetV1_Logits_Conv2d_1c_1x1_biases)
模式二:
MobilenetV1_Logits_Conv2d_1c_1x1_biases = paddle.static.create_parameter(dtype='float32', shape=[1001], name='MobilenetV1_Logits_Conv2d_1c_1x1_biases', default_initializer=paddle.nn.initializer.Constant(value=0.0))
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.nn.functional.conv2d(x=conv2d_transpose_14, weight=MobilenetV1_Logits_Conv2d_1c_1x1_weights, bias=None, stride=[1, 1], dilation=[1, 1], padding='SAME')
MobilenetV1_Logits_Conv2d_1c_1x1_BiasAdd = paddle.add(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, y=MobilenetV1_Logits_Conv2d_1c_1x1_biases)
"""
def gen_name(id):
return "x" + str(id)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
kernel="paddle.transpose",
inputs={"x": "conv-input-0"},
outputs=[gen_name(1)],
perm=[0, 3, 1, 2])
pattern.add_layer(
kernel="paddle.nn.functional.conv2d",
inputs={"input": gen_name(1),
"weight": "conv-input-1"},
outputs=[gen_name(2)])
pattern.add_layer(
kernel="paddle.transpose",
inputs={"x": gen_name(2)},
outputs=[gen_name(2)],
perm=[0, 2, 3, 1])
pattern.add_layer(
kernel="paddle.add",
inputs={"x": gen_name(2),
"y": gen_name(0)},
outputs=[gen_name(3)])
pattern.build(inputs={"input-0": "conv-input-0",
"input-1": "conv-input-1"})
self.patterns.append(pattern)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
kernel="paddle.nn.functional.conv2d",
inputs={"input": "conv-input-0",
"weight": "conv-input-1"},
outputs=[gen_name(1)])
pattern.add_layer(
kernel="paddle.add",
inputs={"x": gen_name(1),
"y": gen_name(0)},
outputs=[gen_name(2)])
pattern.build(inputs={"input-0": "conv-input-0",
"input-1": "conv-input-1"})
self.patterns.append(pattern)
def insert_new_layer(self, graph, parameters, matches):
self.gen_new_layer(matches, graph)
matches_copy = copy.deepcopy(matches)
for layer_id, layer in matches_copy.items():
if layer.kernel not in ["paddle.add"]:
matches.pop(layer_id)
def gen_new_layer(self, matches, graph):
is_transpose = False
for layer_id, layer in matches.items():
if layer.kernel == "paddle.static.create_parameter":
bias_name = layer.attrs["name"][1: -1]
if layer.kernel == "paddle.transpose":
is_transpose = True
if layer.kernel == "paddle.add":
output_name = layer.outputs[0]
if layer.kernel == "paddle.nn.functional.conv2d":
conv_id = layer_id
for layer_id, layer in matches.items():
if layer.kernel == "paddle.nn.functional.conv2d":
layer.inputs["bias"] = bias_name
layer.attrs.pop("bias")
if not is_transpose:
layer.outputs[0] = output_name
if layer.kernel == "paddle.transpose":
if conv_id in graph.edges_in[layer_id]:
layer.outputs[0] = output_name
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.static import StaticPReLUFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class StaticPReLUFusePass(Pass):
name = "static_prelu_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = StaticPReLUFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
static_prelu_fuse_pass = StaticPReLUFusePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from collections import OrderedDict
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class StaticPReLUFuser(FuseBase):
def __init__(self):
super(StaticPReLUFuser, self).__init__(graph_type="static")
def build_pattern(self):
""" 描述需要替换的prelu图结构。
prelu层模式python实现代码示例:
conv4_alphas = paddle.static.create_parameter(dtype='float32', shape=[128], name='conv4_alphas', default_initializer=paddle.nn.initializer.Constant(value=0.0))
conv4_mul_1_y = paddle.full(dtype='float32', shape=[1], fill_value=0.5)
conv4_Relu = paddle.nn.functional.relu(x=conv4_BiasAdd)
conv4_Abs = paddle.abs(x=conv4_BiasAdd)
conv4_sub = fluid.layers.elementwise_sub(x=conv4_BiasAdd, y=conv4_Abs)
conv4_mul = paddle.multiply(x=conv4_alphas, y=conv4_sub)
conv4_mul_1 = paddle.multiply(x=conv4_mul, y=conv4_mul_1_y)
conv4_add = paddle.add(x=conv4_Relu, y=conv4_mul_1)
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
self.pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(1)],
shape=[1],
fill_value=0.5)
self.pattern.add_layer(
"paddle.nn.functional.relu",
inputs={"x": "prelu-input-0"},
outputs=[gen_name(2)])
self.pattern.add_layer(
"paddle.abs",
inputs={"x": "prelu-input-0"},
outputs=[gen_name(3)])
self.pattern.add_layer(
"fluid.layers.elementwise_sub",
inputs={"x": "prelu-input-0",
"y": gen_name(3)},
outputs=[gen_name(4)])
self.pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(0),
"y": gen_name(4)},
outputs=[gen_name(5)])
self.pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(5),
"y": gen_name(1)},
outputs=[gen_name(6)])
self.pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(2),
"y": gen_name(6)},
outputs=[gen_name(7)])
self.pattern.build(inputs={"input-0": "prelu-input-0", })
def insert_new_layer(self, graph, parameters, matches):
new_layers, last_layer_id = self.gen_new_layer(matches, parameters, graph)
matches_copy = copy.deepcopy(matches)
for layer_id, layer in matches_copy.items():
for i in range(4):
if layer_id == new_layers[i].id:
matches.pop(new_layers[i].id)
prefix_layers = OrderedDict()
mid_layers = OrderedDict()
suffix_layers = OrderedDict()
is_need_id = False
for layer_id, layer in graph.layers.items():
if is_need_id:
suffix_layers[layer_id] = layer
else:
if layer_id == last_layer_id:
for i in range(4):
mid_layers[new_layers[i].id] = new_layers[i]
is_need_id = True
prefix_layers[layer_id] = layer
prefix_layers.update(mid_layers)
prefix_layers.update(suffix_layers)
graph.layers = prefix_layers
def gen_new_layer(self, matches, parameters, graph):
layer_id_list = list(matches.keys())
layer_id_list.sort(key = int)
for layer_id, layer in matches.items():
if layer.kernel == "paddle.nn.functional.relu":
input_name = layer.inputs["x"]
if layer.kernel == "paddle.static.create_parameter":
param_layer = layer
param_name = layer.outputs[0]
if layer.kernel == "paddle.add":
output_name = layer.outputs[0]
transpose0 = PaddleLayer(
id=layer_id_list[-1] + "_1",
kernel="paddle.transpose",
inputs={"x": input_name},
outputs=["{}_transpose_for_prelu".format(input_name)],
perm=[0, 3, 1, 2])
param = parameters[param_name]
c = param.shape[0]
prelu = PaddleLayer(id=layer_id_list[-1] + "_2",
kernel="paddle.nn.functional.prelu",
inputs={"x": "{}_transpose_for_prelu".format(input_name),
"weight": param_name},
outputs=["{}_prelu".format(input_name)])
transpose1 = PaddleLayer(
id=layer_id_list[-1] + "_3",
kernel="paddle.transpose",
inputs={"x": "{}_prelu".format(input_name)},
outputs=[output_name],
perm=[0, 2, 3, 1])
return [param_layer, transpose0, prelu, transpose1], layer_id_list[-1]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.static import StaticTFBatchNormFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class StaticTFBatchNormFusePass(Pass):
name = "static_tf_batchnorm_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = StaticTFBatchNormFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
static_tf_batchnorm_fuse_pass = StaticTFBatchNormFusePass()
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from collections import OrderedDict
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class StaticTFBatchNormFuser(FuseBase):
def __init__(self):
super(StaticTFBatchNormFuser, self).__init__(graph_type="static")
self.patterns = list()
def build_pattern(self):
""" 描述需要替换的batchnorm图结构。
batchnorm层模式python实现代码示例:
"""
def gen_name(id):
return "x" + str(id)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(1)],
shape=[1])
pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(0), "y": gen_name(1)},
outputs=[gen_name(2)])
pattern.add_layer(
"paddle.rsqrt",
inputs={"x": gen_name(2)},
outputs=[gen_name(3)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(4)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(3), "y": gen_name(4)},
outputs=[gen_name(5)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(6)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(6), "y": gen_name(5)},
outputs=[gen_name(7)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(8)])
pattern.add_layer(
"fluid.layers.elementwise_sub",
inputs={"x": gen_name(8), "y": gen_name(7)},
outputs=[gen_name(9)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": "bn-input-0", "y": gen_name(5)},
outputs=[gen_name(10)])
pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(10), "y": gen_name(9)},
outputs=[gen_name(11)])
pattern.build(inputs={"input-0": "bn-input-0", })
self.patterns.append(pattern)
pattern = PaddleGraph(graph_type="dygraph")
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(0)])
pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(1)],
shape=[1])
pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(0), "y": gen_name(1)},
outputs=[gen_name(2)])
pattern.add_layer(
"paddle.rsqrt",
inputs={"x": gen_name(2)},
outputs=[gen_name(3)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(4)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(3), "y": gen_name(4)},
outputs=[gen_name(5)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": "bn-input-0", "y": gen_name(5)},
outputs=[gen_name(10)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(6)])
pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(6), "y": gen_name(5)},
outputs=[gen_name(7)])
pattern.add_layer(
"paddle.static.create_parameter",
inputs={},
outputs=[gen_name(8)])
pattern.add_layer(
"fluid.layers.elementwise_sub",
inputs={"x": gen_name(8), "y": gen_name(7)},
outputs=[gen_name(9)])
pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(10), "y": gen_name(9)},
outputs=[gen_name(11)])
pattern.build(inputs={"input-0": "bn-input-0", })
self.patterns.append(pattern)
def insert_new_layer(self, graph, parameters, matches):
new_layers, last_layer_id = self.gen_new_layer(matches, parameters, graph)
matches_copy = copy.deepcopy(matches)
for layer_id, layer in matches_copy.items():
for i in range(7):
if layer_id == new_layers[i].id:
matches.pop(new_layers[i].id)
prefix_layers = OrderedDict()
mid_layers = OrderedDict()
suffix_layers = OrderedDict()
is_need_id = False
for layer_id, layer in graph.layers.items():
if is_need_id:
suffix_layers[layer_id] = layer
else:
if layer_id == last_layer_id:
for i in range(7):
mid_layers[new_layers[i].id] = new_layers[i]
is_need_id = True
prefix_layers[layer_id] = layer
prefix_layers.update(mid_layers)
prefix_layers.update(suffix_layers)
graph.layers = prefix_layers
def gen_new_layer(self, matches, parameters, graph):
layer_id_list = list(matches.keys())
layer_id_list.sort(key = int)
for layer_id, layer in matches.items():
if layer.kernel == "paddle.full":
full_layer = layer
out_layer_id = graph.edges_out[layer_id][0]
if matches[out_layer_id].kernel == "paddle.add":
var_layer_id = graph.edges_in[out_layer_id][0]
var_layer = matches[var_layer_id]
if layer.kernel == "paddle.rsqrt":
out_layer_id = graph.edges_out[layer_id][0]
if matches[out_layer_id].kernel == "paddle.multiply":
gamma_layer_id = graph.edges_in[out_layer_id][1]
gamma_layer = matches[gamma_layer_id]
if layer.kernel == "fluid.layers.elementwise_sub":
in_layer_id = graph.edges_in[layer_id][0]
beta_layer = matches[in_layer_id]
in_layer_id = graph.edges_in[layer_id][1]
in_layer_id = graph.edges_in[in_layer_id][0]
mean_layer = matches[in_layer_id]
out_layer_id = graph.edges_out[layer_id][0]
add_layer = matches[out_layer_id]
if layer.kernel == "paddle.multiply":
in_layer_id = graph.edges_in[layer_id][1]
mul_layer = matches[in_layer_id]
if mul_layer.kernel == "paddle.multiply":
in_layer_id = graph.edges_in[layer_id][0]
if in_layer_id not in matches:
input_name = layer.inputs["x"]
transpose0 = PaddleLayer(
id=layer_id_list[-1] + "_1",
kernel="paddle.transpose",
inputs={"x": input_name},
outputs=["{}_transpose_for_bn".format(input_name)],
perm=[0, 3, 1, 2])
params = parameters[gamma_layer.outputs[0]]
c = params.shape[0]
bn = PaddleLayer(
id=layer_id_list[-1] + "_2",
kernel="paddle.nn.functional.batch_norm",
inputs={"x": "{}_transpose_for_bn".format(input_name),
"running_mean": mean_layer.outputs[0],
"running_var": var_layer.outputs[0],
"weight": gamma_layer.outputs[0],
"bias": beta_layer.outputs[0]},
outputs=["{}_bn".format(input_name)],
epsilon=full_layer.attrs["fill_value"])
transpose1 = PaddleLayer(
id=layer_id_list[-1] + "_3",
kernel="paddle.transpose",
inputs={"x": "{}_bn".format(input_name)},
outputs=add_layer.outputs,
perm=[0, 2, 3, 1])
mean_layer.id = layer_id_list[-1] + "_01"
var_layer.id = layer_id_list[-1] + "_02"
gamma_layer.id = layer_id_list[-1] + "_03"
beta_layer.id = layer_id_list[-1] + "_04"
return [mean_layer, var_layer, gamma_layer, beta_layer,
transpose0, bn, transpose1], layer_id_list[-1]
......@@ -16,13 +16,13 @@ from x2paddle.optimizer.pass_manager import PassManager
from x2paddle.optimizer.fusion.dygraph import *
from x2paddle.optimizer.fusion.static import *
from x2paddle.optimizer.elimination.dygraph import *
from x2paddle.optimizer.elimination.static import *
class GraphOptimizer(object):
def __init__(self, source_frame, paddle_type="dygraph", jit_type="trace"):
if source_frame == "pytorch":
if jit_type == "trace":
self.passes = ["dygraph_constant_fuse_pass",
"trace_fc_fuse_pass"]
self.passes = ["trace_fc_fuse_pass"]
else:
self.passes = [
"dygraph_constant_fuse_pass",
......@@ -39,12 +39,20 @@ class GraphOptimizer(object):
else:
self.passes = ["static_bn_scale_fuse_pass"]
elif source_frame == "tf":
if paddle_type == "dygraph":
self.passes = [
"dygraph_conv2d_add_fuse_pass",
"dygraph_tf_batchnorm_fuse_pass",
"dygraph_prelu_fuse_pass",
"transpose_eliminate_pass"
]
else:
self.passes = [
"static_conv2d_add_fuse_pass",
"static_tf_batchnorm_fuse_pass",
"static_prelu_fuse_pass",
"static_transpose_eliminate_pass"
]
else:
self.passes = []
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册