未验证 提交 fd0205c4 编写于 作者: S SunAhong1993 提交者: GitHub

PyTorch convertor for Seg SwimTransformer (#637)

* Update stargan.md

* fix the paddle_type

* add docs

* add docs

* add acknowledge

* fix the docs

* fix the docs

* add docs

* fix

* add docs

* add docs

* Update README.md

* fix onnx inputs

* fix

* fix

* remove

* remove numpy input

* add for seg swin transformer

* add pad

* add pad

* fix onnx
上级 32eaafe0
...@@ -109,7 +109,7 @@ class PaddleGraph(object): ...@@ -109,7 +109,7 @@ class PaddleGraph(object):
layer = PaddleLayer( layer = PaddleLayer(
layer_id, kernel, inputs, outputs, scope_name=scope_name, **kwargs) layer_id, kernel, inputs, outputs, scope_name=scope_name, **kwargs)
self.layers[layer_id] = layer self.layers[layer_id] = layer
if layer.kernel in ["prim.list_unpack" or "prim.tuple_unpack"]: if layer.kernel in ["prim.list_unpack" , "prim.tuple_unpack"]:
self.has_unpack = True self.has_unpack = True
return layer_id return layer_id
......
...@@ -1514,7 +1514,7 @@ class OpSet9(): ...@@ -1514,7 +1514,7 @@ class OpSet9():
"paddle.minimum", "paddle.minimum",
inputs={"x": val_x.name, inputs={"x": val_x.name,
"y": output_name + "__zeros"}, "y": output_name + "__zeros"},
outputs=[output_name + "__max"]) outputs=[output_name + "__min"])
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.multiply", "paddle.multiply",
inputs={"x": val_slope.name, inputs={"x": val_slope.name,
......
...@@ -983,23 +983,31 @@ def aten_constant_pad_nd(mapper, graph, node): ...@@ -983,23 +983,31 @@ def aten_constant_pad_nd(mapper, graph, node):
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name) scope_name)
layer_inputs["input"] = inputs_name[0] layer_inputs["input"] = inputs_name[0]
# 处理输入1,即%4876
is_padding_tensor = False
if inputs_name[1] in mapper.attrs:
layer_attrs["padding"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs,
scope_name)
layer_inputs["pad"] = inputs_name[1]
is_padding_tensor = True
# 获取当前节点输入的list # 获取当前节点输入的list
current_inputs = list(layer_inputs.values()) current_inputs = list(layer_inputs.values())
# 处理输入1,即%4876
layer_attrs["padding"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%42 # 处理输入2,即%42
layer_attrs["value"] = mapper.attrs[inputs_name[2]] layer_attrs["value"] = mapper.attrs[inputs_name[2]]
graph.add_layer( if not is_padding_tensor:
"prim.shape", graph.add_layer(
inputs={"input": inputs_name[0]}, "prim.shape",
outputs=[inputs_name[0] + "_shape"], inputs={"input": inputs_name[0]},
scope_name=scope_name) outputs=[inputs_name[0] + "_shape"],
graph.add_layer( scope_name=scope_name)
"prim.len", graph.add_layer(
inputs={"input": inputs_name[0] + "_shape"}, "prim.len",
outputs=[inputs_name[0] + "_len"], inputs={"input": inputs_name[0] + "_shape"},
scope_name=scope_name) outputs=[inputs_name[0] + "_len"],
scope_name=scope_name)
def add_pad_layers(kernel, dim): def add_pad_layers(kernel, dim):
graph.add_layer( graph.add_layer(
...@@ -1020,6 +1028,7 @@ def aten_constant_pad_nd(mapper, graph, node): ...@@ -1020,6 +1028,7 @@ def aten_constant_pad_nd(mapper, graph, node):
inputs={"y": inputs_name[0] + "_len"}, inputs={"y": inputs_name[0] + "_len"},
outputs=[inputs_name[0] + "_len0"], outputs=[inputs_name[0] + "_len0"],
scope_name=scope_name, scope_name=scope_name,
alpha=1.0,
x=dim) x=dim)
block.add_layer( block.add_layer(
"prim.len2list", "prim.len2list",
...@@ -1058,17 +1067,25 @@ def aten_constant_pad_nd(mapper, graph, node): ...@@ -1058,17 +1067,25 @@ def aten_constant_pad_nd(mapper, graph, node):
if_layer.inputs["input-0"] = inputs_name[0] if_layer.inputs["input-0"] = inputs_name[0]
if_layer.inputs["input-1"] = inputs_name[0] + "_len" if_layer.inputs["input-1"] = inputs_name[0] + "_len"
if len(layer_attrs["padding"]) == 2: if not is_padding_tensor:
layer_outputs[0] = layer_outputs[0].raplace("pad", "pad1d") if len(layer_attrs["padding"]) == 2:
add_pad_layers("paddle.nn.Pad1D", 3) layer_outputs[0] = layer_outputs[0].replace("pad", "pad1d")
elif len(layer_attrs["padding"]) == 4: add_pad_layers("paddle.nn.Pad1D", 3)
layer_outputs[0] = layer_outputs[0].raplace("pad", "pad2d") elif len(layer_attrs["padding"]) == 4:
add_pad_layers("paddle.nn.Pad2D", 4) layer_outputs[0] = layer_outputs[0].replace("pad", "pad2d")
elif len(layer_attrs["padding"]) == 6: add_pad_layers("paddle.nn.Pad2D", 4)
layer_outputs[0] = layer_outputs[0].raplace("pad", "pad3d") elif len(layer_attrs["padding"]) == 6:
add_pad_layers("paddle.nn.Pad3D", 5) layer_outputs[0] = layer_outputs[0].replace("pad", "pad3d")
add_pad_layers("paddle.nn.Pad3D", 5)
else:
raise Exception("The lenght of padding list must be 2, 4 or 6!")
else: else:
raise Exception("The lenght of padding list must be 2, 4 or 6!") graph.add_layer(
"custom_layer:Pad",
inputs=layer_inputs,
outputs=[output_name],
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs return current_inputs, current_outputs
...@@ -4191,10 +4208,45 @@ def aten_relu6(mapper, graph, node): ...@@ -4191,10 +4208,45 @@ def aten_relu6(mapper, graph, node):
return current_inputs, current_outputs return current_inputs, current_outputs
def aten_remainder(mapper, graph, node):
""" 构造取余数的PaddleLayer。
TorchScript示例:
%701 : Tensor = aten::remainder(%661, %139)
参数含义:
%701 (Tensor): 输出,取余结果的Tensor。
%661 (Tensor): 需要取余的Tensor。
%139 (Tensor): 除数Tensor。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%661
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%139
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs,
scope_name)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.remainder",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_repeat(mapper, graph, node): def aten_repeat(mapper, graph, node):
""" 构造根据参数对输入各维度进行复制的PaddleLayer。 """ 构造根据参数对输入各维度进行复制的PaddleLayer。
TorchScript示例: TorchScript示例:
701 : Tensor = aten::repeat(%699, %700) %701 : Tensor = aten::repeat(%699, %700)
参数含义: 参数含义:
%701 (Tensor): 输出,复制后的Tensor。 %701 (Tensor): 输出,复制后的Tensor。
%699 (Tensor): 需要复制的Tensor。 %699 (Tensor): 需要复制的Tensor。
......
...@@ -609,6 +609,21 @@ def prim_or(layer, ...@@ -609,6 +609,21 @@ def prim_or(layer,
if is_return_line: if is_return_line:
return line.split(" = ")[1] return line.split(" = ")[1]
forward_func.extend(gen_codes([line], indent=indent)) forward_func.extend(gen_codes([line], indent=indent))
def prim_remainder(layer,
indent=1,
init_func=[],
forward_func=[],
layer_id=None,
different_attrs=None,
is_return_line=False):
line = "{} = {} % {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
if is_return_line:
return line.split(" = ")[1]
forward_func.extend(gen_codes([line], indent=indent))
def prim_replaceitem(layer, def prim_replaceitem(layer,
......
...@@ -14,3 +14,4 @@ ...@@ -14,3 +14,4 @@
from .gather import Gather from .gather import Gather
from .instance_norm import InstanceNorm from .instance_norm import InstanceNorm
from .pad import Pad
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from x2paddle.core.util import *
class Pad(object):
def __init__(self, value):
self.value = value
def __call__(self, input, pad):
shape = input.shape
dim = len(shape)
if len(pad) == 2:
data_format = "NCL"
elif len(pad) == 4:
data_format = "NCHW"
elif len(pad) == 6:
data_format = "NCDHW"
if dim == 3 and len(pad) == 4:
input = paddle.unsqueeze(input, [0])
output = paddle.nn.functional.pad(input,
pad,
data_format=data_format)
output = paddle.squeeze(output, [0])
elif dim == 4 and len(pad) == 6:
input = paddle.unsqueeze(input, [0])
output = paddle.nn.functional.pad(input,
pad,
data_format=data_format)
output = paddle.squeeze(output, [0])
else:
output = paddle.nn.functional.pad(input,
pad,
data_format=data_format)
return output
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册