提交 e2ebdf8b 编写于 作者: S SunAhong1993

add optimizer and aten for shufflenet

上级 4fbb2c58
......@@ -101,11 +101,6 @@ class PaddleGraph(object):
self.clear_edges()
outputs_from_nodes = dict()
for layer_id, layer in self.layers.items():
# if "x5109" in layer.outputs or "x5110" in layer.outputs:
# print(layer.kernel)
# print(layer.inputs)
# print(layer.outputs)
# print(layer.attrs)
for input_key, input_var in layer.inputs.items():
vs = input_var
if not isinstance(vs, list):
......
......@@ -28,6 +28,7 @@ class PyTorchDecoder(object):
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
graph = torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_constant_propagation(graph)
return graph
......@@ -43,7 +43,7 @@ def aten_adaptive_avg_pool2d(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_attrs["pool_size"] = inputs_name[1]
layer_inputs["pool_size"] = inputs_name[1]
current_inputs.append(inputs_name[1])
layer_attrs["pool_type"] = string("avg")
......@@ -93,7 +93,7 @@ def aten_addmm(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[3], inputs_name[3],
current_outputs)
layer_attrs["beta"] = inputs_name[3]
layer_inputs["beta"] = inputs_name[3]
current_inputs.append(inputs_name[3])
# 处理输入4,即%151
if inputs_name[4] in mapper.attrs:
......@@ -101,7 +101,7 @@ def aten_addmm(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[4], inputs_name[4],
current_outputs)
layer_attrs["alpha"] = inputs_name[4]
layer_inputs["alpha"] = inputs_name[4]
current_inputs.append(inputs_name[4])
graph.add_layer(
......@@ -175,7 +175,7 @@ def aten_add_(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
layer_attrs["alpha"] = inputs_name[2]
layer_inputs["alpha"] = inputs_name[2]
current_inputs.append(inputs_name[2])
graph.add_layer(
......@@ -203,8 +203,7 @@ def aten___and__(mapper, graph, node):
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%288
mapper._check_input(
graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True)
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
......@@ -395,7 +394,7 @@ def aten_cat(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_attrs["axis"] = inputs_name[1]
layer_inputs["axis"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.concat",
......@@ -405,6 +404,82 @@ def aten_cat(mapper, graph, node):
return current_inputs, current_outputs
def aten_chunk(mapper, graph, node):
"""构造分割Tensor的PaddleLayer。
TorchScript示例:
%724 : Tensor[] = aten::chunk(%input.170, %720, %719)
参数含义:
%724 (Tensor): 输出,分割后的结果。
%input.170 (Tensor): 需要进行分割的Tensor。
%720 (int): 分割的块数。
%719 (int): 分割的维度。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.170
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%720
if inputs_name[1] in mapper.attrs:
layer_attrs["num_or_sections"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["num_or_sections"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入2,即%719
if inputs_name[2] in mapper.attrs:
layer_attrs["dim"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
layer_inputs["dim"] = inputs_name[2]
current_inputs.append(inputs_name[2])
graph.add_layer(
"fluid.layers.split",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_contiguous(mapper, graph, node):
""" 构造在内存中连续存储的PaddleLayer。
TorchScript示例:
%x.7 : Tensor = aten::contiguous(%4058, %4046)
参数含义:
%x.7 (Tensor): 输出,在内存中连续存储的Tensor。
%4058 (Tensor): 原始Tensor。
%4046 (int): 存储的形式。
【注意】Paddle中无此用法,所以此处翻译成赋值。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%4058
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_conv2d(mapper, graph, node):
""" 构造conv2d的PaddleLayer。
......@@ -645,6 +720,35 @@ def aten_flatten(mapper, graph, node):
return current_inputs, current_outputs
def aten_floordiv(mapper, graph, node):
""" 构造向上取整除法的PaddleLayer。
TorchScript示例:
%channels_per_group.2 : int = aten::floordiv(%num_channels.2, %3690)
参数含义:
%channels_per_group.2 (-): 除后的结果。
%%num_channels.2 (-): 被除数。
%2 (int): 除数。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.floordiv", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten___getitem__(mapper, graph, node):
""" 构造获取list中元素的PaddleLayer。
......@@ -920,6 +1024,54 @@ def aten_matmul(mapper, graph, node):
return current_inputs, current_outputs
def aten_mean(mapper, graph, node):
""" 构造求均值的PaddleLayer。
TorchScript示例:
%x.28 : Tensor = aten::mean(%result.1, %4967, %3, %2)
参数含义:
%x.28 (Tensor): 输出,求均值后的结果。
%result.1 (Tensor): 输入,需要求均值的Tensor。
%4967 (int/list): 求平均值运算的维度。
%3 (bool): 是否在输出Tensor中保留减小的维度。
%2 (Tensor): 结果Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
current_inputs = list(layer_inputs.values())
# 处理输入1,即%4967
if inputs_name[1] in mapper.attrs:
layer_attrs["dim"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["dim"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入2,即%3
if inputs_name[1] in mapper.attrs:
layer_attrs["keep_dim"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
layer_inputs["keep_dim"] = inputs_name[2]
current_inputs.append(inputs_name[2])
graph.add_layer(
"fluid.layers.reduce_mean",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_mul(mapper, graph, node):
""" 构造数值相乘的PaddleLayer。
......@@ -1136,14 +1288,22 @@ def aten_reshape(mapper, graph, node):
# 处理输入0,即%4700
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%4703
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["shape"] = inputs_name[1]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%4703
if inputs_name[1] in mapper.attrs:
layer_attrs["shape"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["shape"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.reshape", inputs=layer_inputs, outputs=layer_outputs)
"fluid.layers.reshape",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
......@@ -1305,6 +1465,72 @@ def aten_t(mapper, graph, node):
return current_inputs, current_outputs
def aten_transpose(mapper, graph, node):
""" 构造矩阵转置的PaddleLayer。
TorchScript示例:
%715 : Tensor = aten::transpose(%x.21, %704, %705)
参数含义:
%715 (Tensor): 输出,转置后的矩阵。
%x.21 (Tensor): 需要转置的Tensor。
%704 (int): 转置的维度1。
%705 (int): 转置的维度2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%x.21
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%704
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
dim1 = inputs_name[1]
# 处理输入2,即%705
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
dim2 = inputs_name[2]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.shape",
inputs={"input": inputs_name[0]},
outputs=[output_name + "_shape"])
current_outputs.append(output_name + "_shape")
graph.add_layer(
"prim.len",
inputs={"input": output_name + "_shape"},
outputs=[output_name + "_len"])
current_outputs.append(output_name + "_len")
current_inputs.append(output_name + "_shape")
graph.add_layer(
"prim.len2list",
inputs={"len": output_name + "_len"},
outputs=[output_name + "_list"])
current_outputs.append(output_name + "_list")
current_inputs.append(output_name + "_len")
graph.add_layer(
"prim.replaceitem",
inputs={"list": output_name + "_list",
"index": dim1,
"item": dim2},
outputs=[])
graph.add_layer(
"prim.replaceitem",
inputs={"list": output_name + "_list",
"index": dim2,
"item": dim1},
outputs=[])
graph.add_layer(
"fluid.layers.transpose",
inputs=layer_inputs,
outputs=layer_outputs,
perm=output_name + "_list")
return current_inputs, current_outputs
def aten_unsqueeze(mapper, graph, node):
""" 构造插入维度的PaddleLayer。
......@@ -1333,7 +1559,7 @@ def aten_unsqueeze(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_attrs["axes"] = inputs_name[1]
layer_inputs["axes"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.unsqueeze",
......@@ -1343,6 +1569,51 @@ def aten_unsqueeze(mapper, graph, node):
return current_inputs, current_outputs
def aten_view(mapper, graph, node):
""" 构造调整大小的PaddleLayer。
TorchScript示例:
%input.152 : Tensor = aten::view(%x.20, %430)
参数含义:
%input.152 (Tensor): 输出,view后的Tensor。
%x.20 (Tensor): 需要view的Tensor。
%430 (list): 形状大小组成的list。
【注意】view 函数只能用于contiguous后的Tensor上,
也就是只能用于内存中连续存储的Tensor。
如果对Tensor调用过transpose,permute等操作的话会使该Tensor在内存中变得不再连续,
此时就不能再调用view函数。因此,需要先使用contiguous来返回一个contiguous copy。
reshape则不需要依赖目标Tensor是否在内存中是连续的。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%x.20
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%430
if inputs_name[1] in mapper.attrs:
layer_attrs["shape"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["shape"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.reshape",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_warn(mapper, graph, node):
""" 构造warning的PaddleLayer。
......@@ -1370,7 +1641,7 @@ def aten_warn(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_attrs["stacklevel"] = inputs_name[1]
layer_inputs["stacklevel"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
......
......@@ -35,6 +35,34 @@ def prim_Constant(mapper, graph, node):
return [], [output_name]
def prim_data(mapper, graph, node):
""" 构造Tensor的PaddleLayer。
TorchScript示例:
%4336 : Tensor = prim::data(%out.6)
参数含义:
%4336 (Tensor): 输出Tensor。
%out.6 (Tensor): 原始Tensor。
【注意】Paddle中无此用法,所以此处翻译成赋值。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%4336
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def prim_GetAttr(mapper, graph, node):
""" 获取attribute信息。
......@@ -66,6 +94,46 @@ def prim_GetAttr(mapper, graph, node):
return [], [output_name]
def prim_If(mapper, graph, node):
""" 构造if控制流的PaddleLayer。
TorchScript示例:
%input.5 : Tensor = prim::If(%107)
block0():
%109 : Tensor = aten::t(%102)
%ret.2 : Tensor = aten::addmm(%103, %101, %109, %104, %104)
-> (%ret.2)
block1():
%111 : Tensor = aten::t(%102)
...
-> (%output.4)
参数含义:
%107 (bool): if判断条件。
%input.5 (Tensor): if控制流的输出,与%output.4对应。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
script_input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
graph.add_layer("prim.if", {'input': input_node_name}, [output_name])
current_layer = list(graph.layers.values())[-1]
block0 = list(node.blocks())[0]
block0_graph, graph_inputs0 = mapper.traverse(block0, current_layer)
len0 = 0
for i, input_name in enumerate(graph_inputs0):
current_layer.inputs['input-{}'.format(i)] = input_name
len0 = i
current_layer.add_block(block0_graph)
block1 = list(node.blocks())[1]
block1_graph, graph_inputs1 = mapper.traverse(block1, current_layer)
for i, input_name in enumerate(graph_inputs1):
current_layer.inputs['input-{}'.format(len0 + 1 + i)] = input_name
current_layer.add_block(block1_graph)
return list(current_layer.inputs.values()), node_outputs
def prim_ListConstruct(mapper, graph, node):
""" 构造list的PaddleLayer。
......@@ -92,28 +160,30 @@ def prim_ListConstruct(mapper, graph, node):
return current_inputs, current_outputs
def prim_RaiseException(mapper, graph, node):
""" 构造抛出异常的PaddleLayer。
def prim_ListUnpack(mapper, graph, node):
""" 构造获取list中元素的PaddleLayer。
TorchScript示例:
= prim::RaiseException(%76)
%x1.4 : Tensor, %x2.4 : Tensor = prim::ListUnpack(%4354)
参数含义:
%76 (str): 异常信息。
%x1.4 (Tensor): 输出,list的第一个元素。
%x2.4 (Tensor): 输出,list的第二个元素。
%4354 (list): 列表。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
outputs_name = mapper._get_outputs_name(node)
layer_outputs = outputs_name.copy()
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%76
current_outputs = layer_outputs.copy()
# 处理输入0,即%4354
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.exception", inputs=layer_inputs, outputs=layer_outputs)
"prim.list_unpack", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
......@@ -180,46 +250,6 @@ def prim_Loop(mapper, graph, node):
return list(current_layer.inputs.values()), node_outputs
def prim_If(mapper, graph, node):
""" 构造if控制流的PaddleLayer。
TorchScript示例:
%input.5 : Tensor = prim::If(%107)
block0():
%109 : Tensor = aten::t(%102)
%ret.2 : Tensor = aten::addmm(%103, %101, %109, %104, %104)
-> (%ret.2)
block1():
%111 : Tensor = aten::t(%102)
...
-> (%output.4)
参数含义:
%107 (bool): if判断条件。
%input.5 (Tensor): if控制流的输出,与%output.4对应。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
script_input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
graph.add_layer("prim.if", {'input': input_node_name}, [output_name])
current_layer = list(graph.layers.values())[-1]
block0 = list(node.blocks())[0]
block0_graph, graph_inputs0 = mapper.traverse(block0, current_layer)
len0 = 0
for i, input_name in enumerate(graph_inputs0):
current_layer.inputs['input-{}'.format(i)] = input_name
len0 = i
current_layer.add_block(block0_graph)
block1 = list(node.blocks())[1]
block1_graph, graph_inputs1 = mapper.traverse(block1, current_layer)
for i, input_name in enumerate(graph_inputs1):
current_layer.inputs['input-{}'.format(len0 + 1 + i)] = input_name
current_layer.add_block(block1_graph)
return list(current_layer.inputs.values()), node_outputs
def prim_min(mapper, graph, node):
""" 构造min的PaddleLayer。
......@@ -245,6 +275,31 @@ def prim_min(mapper, graph, node):
return current_inputs, current_outputs
def prim_RaiseException(mapper, graph, node):
""" 构造抛出异常的PaddleLayer。
TorchScript示例:
= prim::RaiseException(%76)
参数含义:
%76 (str): 异常信息。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%76
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.exception", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def prim_requires_grad(mapper, graph, node):
""" 构造是否计算梯度的PaddleLayer。
......
......@@ -100,6 +100,12 @@ def prim_exception(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_floordiv(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} // {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_if(layer, indent=1, init_func=[], forward_func=[]):
line = "if {} :".format(get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
......@@ -141,6 +147,14 @@ def prim_len(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_len2list(layer, indent=1, init_func=[], forward_func=[]):
lines = []
lines.append("{} = []".format(layer.outputs[0]))
lines.append("for i in range({}):".format(get_value(layer, "len")))
lines.append(" {}.append(i)".format(layer.outputs[0]))
forward_func.extend(gen_codes(lines, indent=indent))
def prim_lt(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} < {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
......@@ -157,6 +171,11 @@ def prim_list(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_list_unpack(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {}".format(", ".join(layer.outputs), get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_loop(layer, indent=1, init_func=[], forward_func=[]):
loop_range = get_value(layer, "input")
line = "for {} in range({}):".format(layer.outputs[1], loop_range)
......@@ -194,6 +213,13 @@ def prim_not(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_replaceitem(layer, indent=1, init_func=[], forward_func=[]):
line = "{}[{}] = {}".format(
get_value(layer, "list"),
get_value(layer, "index"), get_value(layer, "item"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_requires_grad(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = not {}.stop_gradient".format(layer.outputs[0],
get_value(layer, "input"))
......
......@@ -14,10 +14,8 @@
from .fc_fuser import FcFuser
from .fc_fuse_pass import FcFusePass
from .nn_adaptive_pool2d_fuser import NnAdaptivePool2dFuser
from .nn_adaptive_pool2d_fuse_pass import NnAdaptivePool2dFusePass
from .functional_adaptive_pool2d_fuser import FunctionalAdaptivePool2dFuser
from .functional_adaptive_pool2d_fuse_pass import FunctionalAdaptivePool2dFusePass
from .adaptive_pool2d_fuser import AdaptivePool2dFuser
from .adaptive_pool2d_fuse_pass import AdaptivePool2dFusePass
from .constant_fuser import ConstantFuser
from .constant_fuse_pass import ConstantFusePass
from .batchnorm2d_fuser import BatchNorm2dFuser
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import AdaptivePool2dFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class AdaptivePool2dFusePass(Pass):
name = "adaptive_pool2d_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = AdaptivePool2dFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
adaptive_pool2d_fuse_pass = AdaptivePool2dFusePass()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class AdaptivePool2dFuser(FuseBase):
def __init__(self):
super(AdaptivePool2dFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的adaptive pool2d图结构。
adaptive pool2d层模式python实现代码示例:
x72 = [6, 6]
x73 = x71.shape
x75 = 'Exception'
x76 = 9223372036854775807
x77 = 1
x78 = len(x73)
x79 = 2
x80 = x78 <= x79
if x80 :
raise RaiseException(x75)
x83 = []
x84 = -2
x85 = x73[x84: x76: x77]
x86 = 2
x87 = len(x85)
x88 = [x86, x87]
x89 = min(x88)
for _x91 in range(x89):
x92 = x72[_x91]
x83.append(x92)
x93 = fluid.layers.adaptive_pool2d(input=x71, pool_size=x83, pool_type='avg')
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(0)], value=[6, 6])
self.pattern.add_layer(
"prim.shape",
inputs={'input': "pool-input-0"},
outputs=[gen_name(1)])
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(2)], value=True)
self.pattern.add_layer(
"prim.constant",
inputs={},
outputs=[gen_name(3)],
value="Exception")
self.pattern.add_layer(
"prim.constant",
inputs={},
outputs=[gen_name(4)],
value=9223372036854775807)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(5)], value=1)
self.pattern.add_layer(
"prim.len", inputs={"input": gen_name(1)}, outputs=[gen_name(6)])
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(7)], value=2)
self.pattern.add_layer(
"prim.le",
inputs={"x": gen_name(6),
"y": gen_name(7)},
outputs=[gen_name(8)])
self.pattern.add_layer("prim.if", {'input': gen_name(8)}, [gen_name(9)])
if_layer = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
pattern_block0 = PaddleGraph(if_layer, graph_type="dygraph")
pattern_block0.add_layer(
"prim.exception",
inputs={"input": gen_name(3)},
outputs=[gen_name(9)])
if_layer.inputs["input-0"] = gen_name(3)
if_layer.add_block(pattern_block0)
pattern_block1 = PaddleGraph(if_layer, graph_type="dygraph")
if_layer.add_block(pattern_block1)
self.pattern.add_layer("prim.list", inputs={}, outputs=[gen_name(10)])
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(11)], value=-2)
self.pattern.add_layer(
"prim.slice",
inputs={
"input": gen_name(1),
"start": gen_name(11),
"end": gen_name(4),
"step": gen_name(5)
},
outputs=[gen_name(12)])
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(13)], value=2)
self.pattern.add_layer(
"prim.len", inputs={"input": gen_name(12)}, outputs=[gen_name(14)])
self.pattern.add_layer(
"prim.list",
inputs={"input0": gen_name(13),
"input1": gen_name(14)},
outputs=[gen_name(15)])
self.pattern.add_layer(
"prim.min", inputs={"input": gen_name(15)}, outputs=[gen_name(16)])
self.pattern.add_layer("prim.loop", {'input': gen_name(16)},
[gen_name(17), gen_name(18)])
loop_layer = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
pattern_block = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_block.add_layer(
"prim.getitem",
inputs={"list": gen_name(0),
"index": gen_name(18)},
outputs=[gen_name(19)])
pattern_block.add_layer(
"prim.append",
inputs={"list": gen_name(10),
"index": gen_name(19)},
outputs=[gen_name(20)])
loop_layer.inputs["input-0"] = gen_name(0)
loop_layer.inputs["input-2"] = gen_name(10)
loop_layer.add_block(pattern_block)
pool_attrs = {'pool_type': string("avg")}
self.pattern.add_layer(
"fluid.layers.adaptive_pool2d",
inputs={'input': "pool-input-0",
"pool_size": gen_name(10)},
outputs=[gen_name(21)],
**pool_attrs)
self.pattern.build(inputs={"input-0": "pool-input-0"})
def insert_new_layer(self, graph, parameters, matches):
parameters = graph.parameters
new_layer = self.gen_new_layer(parameters, matches)
new_layer_id = list(matches.keys())[0]
graph.layers[new_layer_id] = new_layer
matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches):
layers_id = list(matches.keys())
layer = matches[layers_id[0]]
pool_size = layer.attrs["value"]
layer = matches[layers_id[1]]
input_name = layer.inputs["input"]
layer = matches[layers_id[-1]]
output_name = layer.outputs[0]
pool_type = layer.attrs["pool_type"]
attrs = dict()
attrs["pool_size"] = pool_size
attrs["pool_type"] = pool_type
new_layer = PaddleLayer(
layers_id[0],
"fluid.layers.adaptive_pool2d",
inputs={"input": input_name},
outputs=[output_name],
**attrs)
return new_layer
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import BatchNorm2dFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class BatchNorm2dFusePass(Pass):
name = "batchnorm2d_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = BatchNorm2dFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
batchnorm2d_fuse_pass = BatchNorm2dFusePass()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class BatchNorm2dFuser(FuseBase):
def __init__(self):
super(BatchNorm2dFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的batchnorm2d图结构。
batchnorm2d层模式python实现代码示例:
x2209 = 1
x2212 = 'Exception'
x2213 = 4
x2214 = x2207.shape
x2214 = len(x2214)
x2215 = x2214 != x2213
if x2215 :
raise RaiseException(x2212)
x2218 = False
if x2218 :
x2220 = self.x2220
x2221 = x2220 + x2209
self.x2220 = x2221
x2227 = False
if x2227 :
x2230 = x2207.shape
x2231 = 'Exception'
x2233 = 0
x2234 = 2
x2235 = 1
x2236 = x2230[x2233]
x2237 = len(x2230)
x2238 = x2237 - x2234
x2241 = x2236
for _x2240 in range(x2238):
x2242 = _x2240 + x2234
x2243 = x2230[x2242]
x2244 = x2241 * x2243
x2239 = x2244
x2245 = x2239 == x2235
if x2245 :
raise RaiseException(x2231)
x2248 = self.batchnorm41(x2207)
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(0)], value=1)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(1)], value=0.1)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(2)], value=0.001)
self.pattern.add_layer(
"prim.constant",
inputs={},
outputs=[gen_name(3)],
value="Exception")
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(4)], value=4)
self.pattern.add_layer(
"prim.shape", inputs={'input': "bn-input-0"},
outputs=[gen_name(5)])
self.pattern.add_layer(
"prim.len", inputs={'input': gen_name(5)}, outputs=[gen_name(5)])
self.pattern.add_layer(
"prim.ne",
inputs={"x": gen_name(5),
"y": gen_name(4)},
outputs=[gen_name(6)])
self.pattern.add_layer("prim.if", {'input': gen_name(6)}, [gen_name(7)])
if_layer1 = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
pattern_block0 = PaddleGraph(if_layer1, graph_type="dygraph")
pattern_block0.add_layer(
"prim.exception",
inputs={"input": gen_name(3)},
outputs=[gen_name(8)])
if_layer1.inputs["input-0"] = gen_name(3)
if_layer1.add_block(pattern_block0)
pattern_block1 = PaddleGraph(if_layer1, graph_type="dygraph")
if_layer1.add_block(pattern_block1)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(9)], value=False)
self.pattern.add_layer("prim.if", {'input': gen_name(9)},
[gen_name(10)])
if_layer2 = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
pattern_block0 = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block0.add_layer(
"fluid.dygraph.base.to_variable",
inputs={},
outputs=[gen_name(11)],
value="params[{}]".format(string(gen_name(11))))
pattern_block0.add_layer(
"prim.add",
inputs={"x": gen_name(11),
"y": gen_name(0)},
outputs=[gen_name(12)])
pattern_block0.add_layer(
"prim.set_attr",
inputs={"input": gen_name(12)},
outputs=["self." + gen_name(11)])
if_layer2.inputs["input-0"] = gen_name(0)
if_layer2.add_block(pattern_block0)
pattern_block1 = PaddleGraph(if_layer2, graph_type="dygraph")
if_layer2.add_block(pattern_block1)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(13)], value=True)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(14)], value=False)
self.pattern.add_layer("prim.if", {'input': gen_name(14)},
[gen_name(15)])
if_layer3 = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
pattern_block0 = PaddleGraph(if_layer3, graph_type="dygraph")
pattern_block0.add_layer(
"prim.shape",
inputs={'input': "bn-input-0"},
outputs=[gen_name(16)])
pattern_block0.add_layer(
"prim.constant",
inputs={},
outputs=[gen_name(17)],
value="Exception")
pattern_block0.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(18)], value=True)
pattern_block0.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(19)], value=0)
pattern_block0.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(20)], value=2)
pattern_block0.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(21)], value=1)
pattern_block0.add_layer(
"prim.getitem",
inputs={"list": gen_name(16),
"index": gen_name(19)},
outputs=[gen_name(22)])
pattern_block0.add_layer(
"prim.len", inputs={"input": gen_name(16)}, outputs=[gen_name(23)])
pattern_block0.add_layer(
"prim.sub",
inputs={"x": gen_name(23),
"y": gen_name(20)},
outputs=[gen_name(24)])
pattern_block0.add_layer(
"prim.equal",
inputs={"input": gen_name(22)},
outputs=[gen_name(25)])
pattern_block0.add_layer(
"prim.loop",
inputs={"input": gen_name(24)},
outputs=[gen_name(26), gen_name(27)])
loop_layer = pattern_block0.layers[list(pattern_block0.layers.keys())[
-1]]
pattern_block0_block0 = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_block0_block0.add_layer(
"prim.add",
inputs={"x": gen_name(27),
"y": gen_name(20)},
outputs=[gen_name(28)])
pattern_block0_block0.add_layer(
"prim.getitem",
inputs={"list": gen_name(16),
"index": gen_name(28)},
outputs=[gen_name(29)])
pattern_block0_block0.add_layer(
"prim.mul",
inputs={"x": gen_name(25),
"y": gen_name(29)},
outputs=[gen_name(30)])
pattern_block0_block0.add_layer(
"prim.equal",
inputs={"input": gen_name(30)},
outputs=[gen_name(26)])
loop_layer.inputs["input-1"] = gen_name(20)
loop_layer.inputs["input-2"] = gen_name(16)
loop_layer.inputs["input-3"] = gen_name(25)
loop_layer.add_block(pattern_block0_block0)
pattern_block0.add_layer(
"prim.eq",
inputs={"x": gen_name(26),
"y": gen_name(21)},
outputs=[gen_name(31)])
pattern_block0.add_layer(
"prim.if", inputs={"input": gen_name(31)}, outputs=[gen_name(32)])
if_layer31 = pattern_block0.layers[list(pattern_block0.layers.keys())[
-1]]
pattern_block0_block0 = PaddleGraph(if_layer31, graph_type="dygraph")
pattern_block0_block0.add_layer(
"prim.exception",
inputs={"input": gen_name(17)},
outputs=[gen_name(33)])
if_layer31.inputs["input-0"] = gen_name(17)
if_layer31.add_block(pattern_block0_block0)
pattern_block0_block1 = PaddleGraph(if_layer31, graph_type="dygraph")
if_layer31.add_block(pattern_block0_block1)
if_layer3.inputs["input-0"] = "bn-input-0"
if_layer3.add_block(pattern_block0)
pattern_block1 = PaddleGraph(if_layer3, graph_type="dygraph")
if_layer3.add_block(pattern_block1)
self.pattern.add_layer(
"fluid.dygraph.BatchNorm",
inputs={"input": "bn-input-0"},
outputs=[gen_name(34), gen_name(35)],
is_test=True,
num_channels=160,
momentum=0.1,
epsilon=0.001)
self.pattern.build(inputs={"input-0": "bn-input-0"})
def insert_new_layer(self, graph, parameters, matches):
new_layer = self.gen_new_layer(parameters, matches)
new_layer_id = list(matches.keys())[0]
graph.layers[new_layer_id] = new_layer
matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches):
layers_id = list(matches.keys())
layer = matches[layers_id[-1]]
return layer
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import ConstantFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class ConstantFusePass(Pass):
name = "constant_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = ConstantFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
constant_fuse_pass = ConstantFuser()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class ConstantFuser(FuseBase):
def __init__(self):
super(ConstantFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的constant图结构。
constant层模式python实现代码示例:
x3 = 10
for _x70 in range(x3):
...
"""
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=["x1"], value=2)
self.pattern.build()
self.pattern.outputs = ["x1"]
def insert_new_layer(self, graph, parameters, matches):
def replace_value(layer_connect, match_name, match_value):
for k, v in layer_connect.inputs.items():
if v == match_name:
layer_connect.inputs.pop(k)
layer_connect.attrs[k] = match_value
break
for k, v in layer_connect.attrs.items():
if v == match_name:
layer_connect.attrs[k] = match_value
break
if layer_connect.kernel == "prim.loop" or \
layer_connect.kernel == "prim.if":
for block in layer_connect.blocks:
for b_layer_id, b_layer in block.layers.items():
if block.edges_in.get(b_layer_id, 0) != 0 and \
-1 in block.edges_in[b_layer_id]:
replace_value(b_layer, match_name, match_value)
layer_id = list(matches.keys())[0]
layer = list(matches.values())[0]
layer_output_name = layer.outputs[0]
layer_value = layer.attrs["value"]
if graph.edges_out.get(layer_id, 0) != 0:
for layer_id_out in graph.edges_out[layer_id]:
layer_connect = graph.layers[layer_id_out]
replace_value(layer_connect, layer_output_name, layer_value)
......@@ -26,28 +26,23 @@ class FcFuser(FuseBase):
def build_pattern(self):
""" 描述需要替换的fc图结构。
fc层模式python实现代码示例:
x149 = 2
x151 = x146.shape
x151 = len(x151)
x152 = x151 == x149
if x152 :
x147 = self.x147
x154 = fluid.layers.transpose(x=x147, perm=[1, 0])
x148 = self.x148
x155 = fluid.layers.addmm(input=x148, x=x146, y=x154, beta=1, alpha=1)
x153 = x155
x131 = 2
x133 = x128.shape
x133 = len(x133)
x134 = x133 == x131
if x134 :
classifier_6_weight = self.classifier_6_weight
x136 = fluid.layers.transpose(x=classifier_6_weight, perm=[1, 0])
classifier_6_bias = self.classifier_6_bias
x137 = fluid.layers.addmm(input=classifier_6_bias, x=x128, y=x136, beta=1, alpha=1)
x135 = x137
else:
x147 = self.x147
x157 = fluid.layers.transpose(x=x147, perm=[1, 0])
x158 = fluid.layers.matmul(x=x146, y=x157)
x159 = True
if x159 :
x148 = self.x148
x161 = x158 + 1 * x148
x160 = x161
else:
x160 = x158
x153 = x160
classifier_6_weight = self.classifier_6_weight
x138 = fluid.layers.transpose(x=classifier_6_weight, perm=[1, 0])
x139 = fluid.layers.matmul(x=x128, y=x138)
classifier_6_bias = self.classifier_6_bias
x140 = x139 + 1 * classifier_6_bias
x135 = x140
"""
def gen_name(id):
......@@ -117,38 +112,19 @@ class FcFuser(FuseBase):
outputs=[gen_name(9)])
if_layer1.inputs["input-1"] = "fc-input-0"
pattern_block1.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(10)], value=True)
pattern_block1.add_layer("prim.if", {'input': gen_name(10)},
[gen_name(11)])
if_layer2 = pattern_block1.layers[list(pattern_block1.layers.keys())[
-1]]
pattern_block1_block0 = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block1_block0.add_layer(
"fluid.dygraph.base.to_variable",
inputs={},
outputs=[gen_name(12)],
value="params[{}]".format(string(gen_name(12))))
pattern_block1_block0.add_layer(
pattern_block1.add_layer(
"prim.add_",
inputs={"x": gen_name(9),
"y": gen_name(12)},
outputs=[gen_name(13)],
alpha=1)
if_layer2.inputs["input-0"] = gen_name(9)
pattern_block1_block0.add_layer(
"prim.equal",
inputs={'input': gen_name(13)},
outputs=[gen_name(11)])
if_layer2.add_block(pattern_block1_block0)
pattern_block1_block1 = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block1_block1.add_layer(
"prim.equal", inputs={'input': gen_name(9)},
outputs=[gen_name(11)])
if_layer2.inputs["input-1"] = gen_name(9)
pattern_block1.add_layer(
"prim.equal", inputs={'input': gen_name(11)},
"prim.equal", inputs={'input': gen_name(13)},
outputs=[gen_name(4)])
if_layer2.add_block(pattern_block1_block1)
if_layer1.add_block(pattern_block1)
self.pattern.build(inputs={"input-0": "fc-input-0"})
......
......@@ -19,11 +19,8 @@ from x2paddle.optimizer.pass_manager import PassManager
class GraphOptimizer(object):
def __init__(self):
self.passes = [
"fc_fuse_pass",
# "nn_adaptive_pool2d_fuse_pass",
# "functional_adaptive_pool2d_fuse_pass",
# "batchnorm2d_fuse_pass",
"constant_fuse_pass"
"fc_fuse_pass", "adaptive_pool2d_fuse_pass",
"batchnorm2d_fuse_pass", "constant_fuse_pass"
]
def optimize(self, graph):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册