未验证 提交 5fc5a8ed 编写于 作者: J Jason 提交者: GitHub

Merge pull request #516 from SunAhong1993/develop

fix one caffe bug, one tf bug, one pytorch bug and two onnx bug
......@@ -508,9 +508,9 @@ class PaddleGraph(object):
],
indent=0)
comment_list = list()
comment_list.append("# 共{}个输入".format(len(self.inputs_info)))
comment_list.append("# There are {} inputs.".format(len(self.inputs_info)))
for k, v in self.inputs_info.items():
comment_list.append("# {}: 形状为{},类型为{}。".format(k, v[0], v[1]))
comment_list.append("# {}: shape-{},type-{}。".format(k, v[0], v[1]))
self.run_func.extend(
gen_codes(
comment_list,
......
......@@ -571,7 +571,7 @@ class CaffeOpMapper(OpMapper):
if params.HasField('negative_slope') and params.negative_slope != 0:
negative_slope = float(params.negative_slope)
layer_attrs = {'alpha': negative_slope}
layer_attrs = {'negative_slope': negative_slope}
self.paddle_graph.add_layer(
"paddle.nn.LeakyReLU",
inputs={"input": input.name},
......
......@@ -445,6 +445,8 @@ class OpSet9():
layer_outputs = [nn_op_name, output_name]
if is_pads_attr:
paddings = []
if len(pads) == 10 and sum(pads) == 0:
pads = pads[0: 6]
if len(pads) in [2, 4, 6]:
if data_shape:
assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW
......@@ -728,11 +730,14 @@ class OpSet9():
inputs={'x': name_trans,
'index': indices.name},
outputs=[node.name])
new_perm = [0] * len(perm)
for i in range(len(perm)):
new_perm[perm[i]] = i
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[node.name],
perm=perm)
perm=new_perm)
if len(indices_shape) < 1:
self.paddle_graph.add_layer(
'paddle.squeeze',
......@@ -809,11 +814,15 @@ class OpSet9():
'index': indices_reshape},
outputs=[node.name])
input_transpose = node.name + '_transpose'
new_perm = [0] * len(perm)
for i in range(len(perm)):
new_perm[perm[i]] = i
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[input_transpose],
perm=perm)
perm=new_perm)
perm = new_perm
val_x_shape = val_x.out_shapes[0]
reshaped_shape = []
for i in perm:
......
......@@ -3353,6 +3353,42 @@ def aten_pow(mapper, graph, node):
return current_inputs, current_outputs
def aten_prelu(mapper, graph, node):
""" 构造prelu激活的PaddleLayer。
TorchScript示例:
%result.3 : aten::prelu(%input.150, %999)
参数含义:
%result.3 (Tensor): 输出,prelu后的结果。
%input.150 (Tensor): 需要prelu的Tensor。
%999 (Tnsor): 权重。
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("relu", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.150
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%999
weight = mapper.pytorch_params[inputs_name[1]]
mapper.paddle_params[op_name + "._weight"] = weight
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.PReLU",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
num_parameters=weight.shape[0])
return current_inputs, current_outputs
def aten_relu(mapper, graph, node):
""" 构造ReLU激活的PaddleLayer。
......
......@@ -407,6 +407,8 @@ class OpSet9():
if is_pads_attr:
paddings = []
paddle_op = 'paddle.nn.functional.pad'
if len(pads) == 10 and sum(pads) == 0:
pads = pads[0: 6]
if len(pads) in [2, 4, 6]:
if data_shape:
assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW
......@@ -424,7 +426,7 @@ class OpSet9():
(2, -1)).transpose().astype("int32")
paddings = np.flip(paddings, axis=0).flatten().tolist()
layer_attrs['pad'] = paddings
layer_attrs['data_format'] = data_format
layer_attrs['data_format'] = string(data_format)
else:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW
......@@ -694,11 +696,14 @@ class OpSet9():
inputs={'x': name_trans,
'index': indices.name},
outputs=[node.name])
new_perm = [0] * len(perm)
for i in range(len(perm)):
new_perm[perm[i]] = i
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[node.name],
perm=perm)
perm=new_perm)
if len(indices_shape) < 1:
self.paddle_graph.add_layer(
'paddle.squeeze',
......@@ -770,11 +775,15 @@ class OpSet9():
'index': indices_reshape},
outputs=[node.name])
input_transpose = node.name + '_transpose'
new_perm = [0] * len(perm)
for i in range(len(perm)):
new_perm[perm[i]] = i
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[input_transpose],
perm=perm)
perm=new_perm)
perm = new_perm
val_x_shape = val_x.out_shapes[0]
reshaped_shape = []
for i in perm:
......
......@@ -60,7 +60,7 @@ class GraphOptimizer(object):
def optimize(self, graph):
for pass_name in self.passes:
pass_ = PassManager.lookup(pass_name)()
if pass_name.endswith("_eliminate_pass"):
if pass_name.endswith("_eliminate_pass") or pass_name.endswith("_conv2d_add_fuse_pass"):
pass_.apply(graph)
else:
while True:
......
......@@ -383,9 +383,9 @@ class HierarchicalTree(Tree):
input_data_name = ', '.join(self.pd_graph.inputs)
run_func_list = list()
run_func_list.append("def main({}):".format(input_data_name))
run_func_list.append(" # 共{}个输入".format(len(self.pd_graph.inputs_info)))
run_func_list.append(" # There are {} inputs.".format(len(self.pd_graph.inputs_info)))
for k, v in self.pd_graph.inputs_info.items():
run_func_list.append(" # {}: 形状为{},类型为{}。".format(k, v[0], v[1]))
run_func_list.append(" # {}: shape-{},type-{}。".format(k, v[0], v[1]))
run_func_list.extend(
[" paddle.disable_static()",
" params = paddle.load('{}/model.pdparams')".format(osp.abspath(save_dir)),
......
......@@ -27,6 +27,7 @@ NN_KERNEL_NAME = {"paddle.nn.BatchNorm": "bn",
"paddle.nn.Linear": "linear",
"paddle.nn.Conv2DTranspose": "conv",
"paddle.nn.LSTM": "lstm",
"paddle.nn.PReLU": "prelu",
"paddle.nn.ReLU": "relu",
"paddle.nn.ReLU6": "relu",
"paddle.nn.Softmax": "softmax",
......@@ -41,7 +42,7 @@ NN_KERNEL_NAME = {"paddle.nn.BatchNorm": "bn",
"paddle.nn.GELU": "gelu",
"paddle.nn.Hardtanh": "tanh",
"paddle.nn.LeakyReLU": "leakly_relu"}
NN_KERNEL_WITH_PARAMS = list(NN_KERNEL_NAME.keys())[:7]
NN_KERNEL_WITH_PARAMS = list(NN_KERNEL_NAME.keys())[:8]
def rename_layers(layers, param_tree=None, is_rename_module=False):
""" 对子模块的输入输出等进行重命名。
......
......@@ -349,9 +349,9 @@ class ModuleGraph(object):
input_data_name = ', '.join(self.pd_graph.inputs)
run_func_list = list()
run_func_list.append("def main({}):".format(input_data_name))
run_func_list.append(" # 共{}个输入".format(len(self.pd_graph.inputs_info)))
run_func_list.append(" # There are {} inputs.".format(len(self.pd_graph.inputs_info)))
for k, v in self.pd_graph.inputs_info.items():
run_func_list.append(" # {}: 形状为{},类型为{}。".format(k, v[0], v[1]))
run_func_list.append(" # {}: shape-{},type-{}.".format(k, v[0], v[1]))
run_func_list.extend(
[" paddle.disable_static()",
" params = paddle.load('{}/model.pdparams')".format(osp.abspath(save_dir)),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册