From 51666c110d4bed59389442ab0119f88d5d0eab17 Mon Sep 17 00:00:00 2001 From: SunAhong1993 Date: Mon, 16 Nov 2020 19:04:16 +0800 Subject: [PATCH] add onnx --- x2paddle/convert.py | 28 +- x2paddle/core/fluid_code.py | 2 + x2paddle/core/op_mapper.py | 2 + x2paddle/core/program.py | 5 +- .../dygraph/caffe2paddle/caffe_op_mapper.py | 38 +- .../op_mapper/dygraph/onnx2paddle/__init__.py | 0 .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 183 bytes .../__pycache__/onnx_op_mapper.cpython-37.pyc | Bin 0 -> 2821 bytes .../dygraph/onnx2paddle/onnx_op_mapper.py | 93 + .../dygraph/onnx2paddle/opset9/__init__.py | 1 + .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 226 bytes .../opset9/__pycache__/opset.cpython-37.pyc | Bin 0 -> 37011 bytes .../dygraph/onnx2paddle/opset9/opset.py | 1688 +++++++++++++++++ .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 182 bytes .../__pycache__/onnx_op_mapper.cpython-37.pyc | Bin 0 -> 2686 bytes .../static/onnx2paddle/onnx_op_mapper.py | 2 +- .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 275 bytes .../opset9/__pycache__/opset.cpython-37.pyc | Bin 0 -> 34360 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 2959 bytes .../__pycache__/register.cpython-37.pyc | Bin 0 -> 1396 bytes .../static/onnx2paddle/opset9/opset.py | 2 +- 21 files changed, 1836 insertions(+), 25 deletions(-) create mode 100644 x2paddle/op_mapper/dygraph/onnx2paddle/__init__.py create mode 100644 x2paddle/op_mapper/dygraph/onnx2paddle/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/dygraph/onnx2paddle/__pycache__/onnx_op_mapper.cpython-37.pyc create mode 100644 x2paddle/op_mapper/dygraph/onnx2paddle/onnx_op_mapper.py create mode 100644 x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__init__.py create mode 100644 x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__pycache__/opset.cpython-37.pyc create mode 100644 x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py create mode 100644 x2paddle/op_mapper/static/onnx2paddle/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/onnx2paddle/__pycache__/onnx_op_mapper.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/onnx2paddle/opset9/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/onnx2paddle/opset9/__pycache__/opset.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/onnx2paddle/opset9/custom_layer/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/onnx2paddle/opset9/custom_layer/__pycache__/register.cpython-37.pyc diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 76b9ece..fe1a1b4 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -177,7 +177,7 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto, mapper.paddle_graph.gen_model(save_dir) -def onnx2paddle(model_path, save_dir, params_merge=False): +def onnx2paddle(model_path, save_dir, paddle_type, params_merge=False): # check onnx installation and version try: import onnx @@ -190,19 +190,23 @@ def onnx2paddle(model_path, save_dir, params_merge=False): return print("Now translating model from onnx to paddle.") - from x2paddle.op_mapper.onnx2paddle.onnx_op_mapper import ONNXOpMapper from x2paddle.decoder.onnx_decoder import ONNXDecoder - from x2paddle.optimizer.onnx_optimizer import ONNXOptimizer + if paddle_type == "dygraph": + from x2paddle.op_mapper.dygraph.onnx2paddle.onnx_op_mapper import ONNXOpMapper + else: + from x2paddle.op_mapper.static.onnx2paddle.onnx_op_mapper import ONNXOpMapper model = ONNXDecoder(model_path) mapper = ONNXOpMapper(model) - print("Model optimizing ...") - optimizer = ONNXOptimizer(mapper) - optimizer.delete_redundance_code() - print("Model optimized.") - - print("Paddle model and code generating ...") - mapper.save_inference_model(save_dir, params_merge) - print("Paddle model and code generated.") + if paddle_type == "dygraph": + mapper.paddle_graph.build() + mapper.paddle_graph.gen_model(save_dir) + else: + from x2paddle.optimizer.onnx_optimizer import ONNXOptimizer + print("Model optimizing ...") + optimizer = ONNXOptimizer(mapper) + optimizer.delete_redundance_code() + print("Model optimized.") + mapper.save_inference_model(save_dir, params_merge) def pytorch2paddle(model_path, save_dir, jit_type, input_files): @@ -318,7 +322,7 @@ def main(): if args.params_merge: params_merge = True - onnx2paddle(args.model, args.save_dir, params_merge) + onnx2paddle(args.model, args.save_dir, args.paddle_type, params_merge) elif args.framework == "pytorch": assert args.model is not None, "--model should be defined while translating pytorch model" pytorch2paddle(args.model, args.save_dir, args.jit_type, args.input_files) diff --git a/x2paddle/core/fluid_code.py b/x2paddle/core/fluid_code.py index dbf66ef..b7cb87d 100644 --- a/x2paddle/core/fluid_code.py +++ b/x2paddle/core/fluid_code.py @@ -41,6 +41,8 @@ class Layer(object): layer_code = layer_code elif self.use_fluid: layer_code = layer_code + "fluid." + self.op + "(" + elif self.op == "full_like": + layer_code = layer_code + "paddle." + self.op + "(" else: layer_code = layer_code + "fluid.layers." + self.op + "(" diff --git a/x2paddle/core/op_mapper.py b/x2paddle/core/op_mapper.py index 9e6d241..5e1f99a 100644 --- a/x2paddle/core/op_mapper.py +++ b/x2paddle/core/op_mapper.py @@ -128,6 +128,7 @@ class OpMapper(object): self.add_codes("from paddle.fluid.initializer import Constant") self.add_codes("from paddle.fluid.param_attr import ParamAttr") self.add_codes("import paddle.fluid as fluid") + self.add_codes("import paddle") self.add_codes("") def save_inference_model(self, save_dir, params_merge): @@ -214,6 +215,7 @@ class OpMapper(object): self.add_codes("", 0) self.add_codes("\ndef x2paddle_net():", 0) + self.add_codes("paddle.enable_static()", 1) for i in range(len(self.graph.topo_sort)): node_name = self.graph.topo_sort[i] node = self.graph.get_node(node_name) diff --git a/x2paddle/core/program.py b/x2paddle/core/program.py index 9f24644..5291e99 100644 --- a/x2paddle/core/program.py +++ b/x2paddle/core/program.py @@ -488,11 +488,12 @@ class PaddleGraph(object): gen_codes( comment_list, indent=1)) + use_structured_name = False if self.source_type in ["tf", "onnx"] else True self.run_func.extend( gen_codes(["paddle.disable_static()", "params, _ = fluid.load_dygraph('{}/model')".format(code_dir), "model = {}()".format(self.name), - "model.set_dict(params)", + "model.set_dict(params, use_structured_name={})".format(use_structured_name), "model.eval()", "out = model({})".format(input_data_name), "return out"], indent=1)) @@ -624,7 +625,7 @@ class PaddleGraph(object): paddle.disable_static() restore, _ = fluid.load_dygraph(osp.join(save_dir, "model")) model = getattr(x2paddle_code, self.name)() - if self.source_type == "tf": + if self.source_type in ["tf", "onnx"]: model.set_dict(restore, use_structured_name=False) else: model.set_dict(restore) diff --git a/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py b/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py index c06babc..63208f2 100644 --- a/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py +++ b/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py @@ -578,9 +578,11 @@ class CaffeOpMapper(OpMapper): mode_bool = params.channel_shared output_shape = node.output_shape[0] if mode_bool: - num_parameters = 1 + mode = 'all' + channel = None else: - num_parameters = output_shape[1] + mode = 'channel' + channel = output_shape[1] data = node.data self.params[prelu_name + '._weight'] = np.squeeze(data[0]) assert data is not None, "The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.".format( @@ -589,7 +591,8 @@ class CaffeOpMapper(OpMapper): "paddle.nn.PReLU", inputs={"input": self.get_input_name(input)}, outputs=layer_outputs, - num_parameters=num_parameters) + channel=channel, + mode=string(mode)) def Eltwise(self, node): assert len( @@ -745,12 +748,29 @@ class CaffeOpMapper(OpMapper): inputs_dict = {} inputs_dict['x'] = node.layer_name + "_mul" inputs_dict['y'] = node.layer_name + "_cparam2" - self.paddle_graph.add_layer( - "fluid.layers.elementwise_add", - inputs=inputs_dict, - outputs=[node.layer_name], - axis=axis) - + output_shape = node.output_shape[0] + if axis == -1: + self.paddle_graph.add_layer( + "paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name]) + else: + if axis < 0: + axis = axis + len(output_shape) + param2_shape = self.params[node.layer_name + "_cparam2"].shape + param2_shape_len = len(param2_shape) + diff_len = len(output_shape) - axis - param2_shape_len + new_shape = param2_shape + [1] * diff_len + self.paddle_graph.add_layer( + "paddle.reshape", + inputs={"x": node.layer_name + "_cparam2"}, + outputs=[node.layer_name + "_cparam2"], + shape=new_shape) + self.paddle_graph.add_layer( + "paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name]) + def Reshape(self, node): input = self.graph.get_bottom_node(node, idx=0, copy=True) output_shape = node.output_shape[0] diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/__init__.py b/x2paddle/op_mapper/dygraph/onnx2paddle/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/dygraph/onnx2paddle/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68ef8733249007b597ae77541227e0da2f69bbf4 GIT binary patch literal 183 zcmZ?b<>g`kg8#Eu$Ajp{AOZ#$feZ&AE@lA|DGb33nv8xc8Hzx{2;x_Qer{fgesO7D zWnx}hVqUs|p?-u>Kw?TtPHKE|eo88Wm!FqcpKBXSv*6dLwDAhiyhqj1W+`wvr%FAP9k@#DEg71groXgE)i9Ommmq z;mmAzQ(Dp_5QKt_9CFAd$dP-@DgPk9Bj{^R{tG_ktL9_5Nrvc}>aOmZnfj`};>SBX zb%yJH@sGH<$JoDVvbsDdKgUo{Au^WnoDF!yIq6RB4BW`Ix|@3gKk}{a<-wp9)j0cv zWquaCVOb#DnHz;!?U=R0Z{Ufwxe1(|p2a&fK|V_%wckS?p#cy$SF8xcfSwP|&mf>|$Pz|IXCYH?X!f@n(+vr@pz;r~bsBxOD#ftBFUe{aa^&{1<8S zgNb{&X?Yu#udLcb7B2 z-=ZDycQb~lZy$ZLg}|CxHaa1EjaL@MMcXxvDv_elah{BXG<9o?^{5hNTa`nZ3OgC! z`xFV0x3ITWl)9ycRaHtYveroG&Uo)-sgoR*OsJo=zMQ6j&t3$kk8zBl?m(R2wZGW^ ztuy6QN3)5e`ONu?bM76pzq7Vu>OIvflun9NNXmY^pws!#cttW0knk^eux!P*-NQkl z57n?3B}F$WPCoqT@bia@gpE^dy_(Ut9$q}GY%(8~RXng6f4B_BA=%ef=<#|>ry6}* zf1LM}PRl{{tc!ry@NA>Nb5Ac~d)0RHC&sHJGikzwx#dc?2M8WIZWTosX>VJ&}t6((rXp z*;kFOEzr|zuSM_V6raA`E2^PZ2zR$MAwDX$$+65_tMAe(@)}8_g;WS1=9|YFM;c4yFBCpMw2%nLnq)4=C|AsmIky2esv6*HL;_PUlX%kKEC;Xvuj;ggfxv`V5q-A zXf|V0e(?e|&-)H|>#5}}K6lr=<$<@rn*D%yOEoP+1^wDtO|JCN>U<7nBNjPtf|2SI zFk{Bg9C?~K#|&)h&Krx>1!rE|m~cF=0q@)JOy~aa8Ou=8mU;XmNo??P{L#x(0s0^c z`EuGqBK|LvJZMqPv=Y_&y~xYgziRIw35Aw#!-0IC#P>*ipTq|w?vnTci8h3Bk@?2W zMIqm%wf7*}zI=#Vv^Ol)q(#Lbe@r_FRrwPVYg&;O$a`3OgrUe015{UlE#UG1<^sI& z!8NF^N^9NP2q>y(3)Wwu00cI5GHNHhk7m_(&p8){!<}CJW51KM7C`sG^@XzA8ljhLp6Dss z)=GxD1l9DwnE9xsEmdWN|AOeKmvq{pJV4A$uvn8+Uq}lruRu1Qc$MVxHY`iL<&U68 zbvZ1S6uC$hD$Dg|c*<7U5A!1RaguL;7xNjT8 zQF|-;;i_+S5S>L=>nyukX9GM~fzTsKtt&cMpGiw8t`17-*5;kHb;*L(PrRJit{mN5 n@39^#S)$ntZ2Pfqwh>}mSM(Vse7uOs6a39(Duf{V!_WN>u3!5^ literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_op_mapper.py b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_op_mapper.py new file mode 100644 index 0000000..9377475 --- /dev/null +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_op_mapper.py @@ -0,0 +1,93 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.op_mapper.dygraph.onnx2paddle.opset9 import OpSet9 +from x2paddle.core.op_mapper import OpMapper +from x2paddle.decoder.onnx_decoder import ONNXGraphNode +from x2paddle.core.program import PaddleGraph + + +class ONNXOpMapper(OpMapper): + def __init__(self, decoder): + super(ONNXOpMapper, self).__init__() + self.support_op_sets = [9, ] + self.default_op_set = 9 + self.graph = decoder.graph + self.paddle_graph = PaddleGraph(parent_layer=None, graph_type="dygraph", source_type="onnx") + self.opset = self.create_opset(decoder) + if not self.op_checker(): + raise Exception("Model are not supported yet.") + #mapping op + print("Total nodes: {}".format( + sum([ + isinstance(node, ONNXGraphNode) + for name, node in self.graph.node_map.items() + ]))) + + print("Nodes converting ...") + for node_name in self.graph.topo_sort: + node = self.graph.get_node(node_name) + op = node.layer_type + if hasattr(self.opset, op): + func = getattr(self.opset, op) + func(node) + elif op in self.opset.default_op_mapping: + self.opset.directly_map(node) + elif op in self.opset.elementwise_ops: + self.opset.elementwise_map(node) + print("Nodes converted.") + self.weights = self.opset.weights + self.inputs_info = self.opset.inputs_info + self.paddle_graph.set_name(self.graph.graph_name) + self.paddle_graph.set_parameters(self.weights) + self.paddle_graph.set_inputs_info(self.inputs_info) + self.paddle_graph.outputs = self.graph.output_nodes + + def op_checker(self): + unsupported_ops = set() + for node_name in self.graph.topo_sort: + node = self.graph.get_node(node_name) + op = node.layer_type + if not hasattr(self.opset, op) and \ + op not in self.opset.default_op_mapping and \ + op not in self.opset.elementwise_ops: + unsupported_ops.add(op) + if len(unsupported_ops) == 0: + return True + else: + print("There are {} ops not supported yet, list as below".format( + len(unsupported_ops))) + for op in unsupported_ops: + print(op) + return False + + def create_opset(self, decoder): + run_op_set = self.default_op_set + opset = '' + if decoder.op_set in self.support_op_sets: + opset = 'OpSet' + str(decoder.op_set) + elif decoder.op_set < self.default_op_set: + opset = 'OpSet' + str(self.default_op_set) + else: + for op_set in self.support_op_sets: + if decoder.op_set > op_set: + run_op_set = op_set + else: + break + opset = 'OpSet' + str(run_op_set) + print( + 'Now, onnx2paddle support convert onnx model opset_verison {},' + 'opset_verison of your onnx model is {}, automatically treated as op_set: {}.' + .format(self.support_op_sets, decoder.op_set, run_op_set)) + return eval(opset)(decoder, self.paddle_graph) diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__init__.py b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__init__.py new file mode 100644 index 0000000..0092e6e --- /dev/null +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__init__.py @@ -0,0 +1 @@ +from .opset import OpSet9 diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39915faf0e01db4b474c9ac29a52f7ea9a6f3725 GIT binary patch literal 226 zcmZ?b<>g`kf=w${$4dd}#~=<2Faa43KwK;UBvKes7;_kM8KW2(8B&;n88n$+G6ID) z8E>)q7X+u4So&!)-D1rzC{8UY0%=^yP{abHz{Ia~{oK3~{o>NR%EY|1#JqF^L;VP& zfW(xPoYeT_{FGD#FF!A@LchYO04%4UUl5<0SWu8!q@PlmUX)mnp%0Qr$O0|1)Q^wP d%*!l^kJl@xyv1RYo1apelWGTY+h-tV007YNJ)i&p literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__pycache__/opset.cpython-37.pyc b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/__pycache__/opset.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c606a86ad2fd51ca00ac4906be44395e44b9031 GIT binary patch literal 37011 zcmeHwdypK*dEf5r`*C}F58NF<5Ij*JrK8CM2~rd#gAWi7QWSZB08pgYl$ML@xw}2= z?H*?Kh{JMEk_pJBWl@18Ii_t^e6Uio9V>F;#EwdTxG1|yl_;{}$SGSjPC0h0l0(Op z*p-T-u#`Bz-`738vwI7Ww3W&~0qS~tdU|?hy1)Lu-}mUAhet;8G5q`a&-`ljg-ks5 zr#uM%C6IXtfBt_#qGBrEh^@sd@inL7tR*UmILn+yaxGO!Nj}j?uVpG|1?5TPvz4sm zQ;pmimZcl{wL+yJWtm2?GJ>*9qf{x$@90{&QkHYs#@O0;W!%)8s7&H~uCZ%vsxoED zrYqBO%}ixRes@=P%kQ4b9{IhYa)Ud$z1O|*YP_;f?NS9*yd1CGq()Tfa;!3|MpYTV zZ&PDx9KSa|5>pdu@^VZ~UUDjLSDDkXxvAIi&at^fkUO!k@bn{IZG9yevDt-|a)U8@ zc(m58v1l$H8LIks^6#Hs3;A3Axg zdgerR;iP17rf|YjE-D_aFSd^_I(#Dh<1dHi2>$&0kf?Y!wi-K^_R9E;pN_SiZmgZS z|cVuDe0%xmshx4f1urE=?}F zUJ_3iq%XMj<&}1jUTS%3wRU(-v$p02rNh4OdhL3vdCc=#-kcL8o9x?M(_M6Zzvf;1 z<(M~xR$f;J)|%}D{zmg+t+`ZdF5h|Afu|3itSQxStBbg#chqV&HxFzcT$ghPTIH_ar`-(w|8SlV;|bTmFOl^{9Metxf@r`yJK5%94ByG z>N;}loG(3v;Zn}Ir1!Am&^f4|^cI*`DGXV9!Q<^-ljLvk@Aw(%pSfg^@>go>Zl&0C zm)rH{+^VXt`IWrOewBQXK#5e=$u!yt%GJ7G1p&0{i&eS)07`wnRt#^L#JgtYe-UX? z{=6G;ZjtZFe;NFx@F&E09GP~k9p454s`wd zhd%YjaO90M8PVm*nz!r+>2nwO>rJ9CNO-Ofwv=4jXf8%*sNiihWs;${9zi`bRkCp> zUcz)qqu;Vl=N)pNuwJ#^Tx!i9YP1$>4gZ1pNDI>Noa5cjXSst3=l@(>7YNag?`(>s z=d*KT@nXT7$FJ>j7Vct#fBrWyN^YzI{#U`&QtZ~GN~!cKsY=34UW!#xD&wY9_7Fr4 ze)ET7Zu(LTxq{6VaSnoHIlk;rkjU>Cf8DeiryzOBL2j3tdMUn~RMXcyGjq)|yRUg> z56|GfH<%iG)r~LlzWdZoa<^Gsjwep5xA7dF;AT_z?WS&6zTdQUi}}6P{JuliXB%$D z&93Gy#nn5dr8!gMwq6aq=MTua+s(N<%EJrKS{ zcSOBQ-Fq2qpi)Bq-ReHck0Spb^a^tdB7a6bDft_b|DbwG^81i~TK$mZZ$iGJepvFe$Umb#B>A@?|6x^?{LRQe zt7?*eJMw4MqU84@uaqnKTaaH;%aXqp`4v@{{5z0eRp%uCPUIVEP4aWdH&sjWw;{i- z&P)CWkoS}?`P-3is}0HDf&6pog5>9s-&7YRe*pQ8dS3E(BHvY8lD`Z2kEoAI{vh%n zQ$Hg4yODoEU6TAg$X`|;m;Aes|55d0lD`-E7uAnT{@uubLj8o~??e8R>Ltm)2l-E_ zzbEg1)aT1L}Bv+S^Rmo;jX#O_Jz9dR{f2$ol#SHZKKhyuQx8*meyMr=2Ai8uu?(d zX#Ke$aeCuyka%LF5hPBwE(Ebk?&RU4M;|{{J#liOGG_AcTR8dTnd;$FkDRU)51%=6 zs`|v?lPBM|@JJ1@Hk7h2H@7g8)d?8tW^y<>x2A_{g7hEIXSn5`DxowdP7^tk=Tn`tn+Kw|ofZg|Jc7}y zpgCJwxJjk9l z@2his%OG)Va~(DG+XTtec>W-DoDU8fKFjmol7(-?Ea9Ke-XVPJ#x6PA&Q-vVZhSS- zja3s}0J>_DdPX(XhCIY?#{6c@Z%!bHqv7dX9%ys}pxYyy46;>-`+B=t4N?NZ14H^% zeN+(AM-}C621x~FC(uQ{poAc!(YPPvo6RcKhJ$qlD46u!#!`?`F5$oDJ%KxcHgnbL zVx#8!)oKr+c<;x#PvXxnA&CK%WCfQH+IB`Y2EU)IZ8jXVN@!UnTq8L~?}XqxkbTp(x<#pcif>`ILa#cxC`?XolSs zfM=$g*~XaSIJ2F-3Mt&qbu*Co0yeJ#W_B~rB)76#xgU>hY56V4wwvD0UxiWw@Lo`f zZUNe-*DY)#&udgtN-j%DQd-Gx7q7;)irwP&NVmWe+$V|qq|Q11KWdj$`cnJ_>anA% zW%P1JWmWE_#MVeRyFI4zSL0XW-rjB&io876_Bh%qwI{kITmhwhtAw}OwKa<4(Pd|g zdhpch^ir&w?dDVlpgFcZ(;c}QugAM(ReUAUE&jCgDLg|HirF=5qQ&KSv^MTPa?Zh% zwDnc0q(*sPv?_Jmz-@S=v6qrGQ$Q+>FIXjklZ=Bh!9m%Fq>hLB(9|=|7WBie^I~ip z!_DSumB__uey? z@D_38DJfF4Hq7k_QX1iH|2HRYo8;h`sIbL!b6Ltvt zQEPON$RG$mNK&f`O#kEMD@IzH2csqjtDET?xujAQ>xioA1`67qBL~v6atkrl6!e%0 zLkdQ8Ju{4kTD@oZV8XwOQQG-A6#ML?n4|e_9;Sr7$znY36j4GGgZ`V0k3+vM<1dXe z>W^jQ@~Fd{u9HTdt#l@a5VhrjWR}(}v5o`GP)xYh__?%SZaczhIBR26y-}V3Hx!*+ zCBStc^Gr#S8X`)P;2^0kc$@r!qp8#@i){ zLN*$oIhwTN7jepx^;}NV@Msj4!YtLJFwbK%qzn;t-gC@RVV_G!2xJolVu`D6>Ys~N zmXb9L%9*~~ffBxk%YEwlF^9u2j{jLv;vIB*XUcSW^ciuH?REAzncNZl`9F(?e zn@52wV77o6>^?9(_9*bgImf$4Ok(&|V1wPnAsU5PJ}Ko1Z&D@vNsie0NtniDTvQUw zGrpQaf2X!Za$!8uNk_0FM9=n1PCn%Ep57varjd^D@uC;Vr&wWPTk4J6H0 zdloPPSkhIUvf*g$Ruw$N2L?=RQnY(#@shnk1bBfZ7~m(5iJ{GDevosU8*7~LhFfs1 z_Xba6f(4c>F*%83E)^Nj8&D9$>y;c3zix7H42$cH+M-(({OtV!s{4D8#GE_^uZ-c- z^N9nPMt-I@bE4N-M949uWk|-5m_Sv=JTW`stC%a8IZ0TWfhJZd$2n%F z(C?|$v~py8GRyIHwwu7Y#4@jhrRAkGWbVDpbA<4l;n>JEsqmU^5?A32kchrItuil( zQvtIot7ny!6iXY8hMrZa-Z1E%l#a_U)f)}X(mT6|7v~o}7efK#o(E=(G3m@%X*D0Q zc+ZFC?stkDxc|=iJLfRb?0og4`6L;o+%A|1*=nSbxx9Xq7txJglL?K;a_PVv_Ac7y zgMV54S%&&C8~_YsI;;3YuuuZRO|}8w6sEN)Pwt6rr8aK^fuv;8?*rs?x@p=kY2%#+ z<2{$~=2dDdc`|l>9#cE%DUw<@C05SF5@9u_JJ>SJb1yktn7$yR)Fc2Y&ym5>FsuQH zM@V%X$fyK~Ncg=DQ4j4NQ&jq$aym$P@Ef?$+G}bq+nZtm&B4&3G0gvl0o$RfFR&&hlz~sams_17XH|g8CQ?$MhGJQ?)x9qh9M)EaIryx;nE>;rNl?y%iyWXgiRxNu8 zMjVMp=%jS>XNf{pJ!7{$h#6D7qNl6`U=UX1ky{BrUbaGF4!3^Gl(24hS6 zI8gZroO{bnc)Z{>@qF)de73wUW8Kay3BBRY!#WhP=Kztr-g>J6E{c2i3&6RefZ+&K zAu!idaG5y)>*8hTi9xcyAWQg6ZVH~+(~2+g8g`1Fc=95CCC5p~m-H(@>2T34>fe^=@V1w{-)T*QA4EVK}$dXRNVXWX#m z$fb5H35}>+9KoM2@Ms4=wxb&=4|+tw-)Ms2*dhmo0l5rvX~|`g%g_tj1qW2QS7356 zV?Pp(y!=81QH56syEwK*aw_w2FnAc+#c zPw9z!{|qPI+?eME_<|PLY|{Iue1czNVxPt9@^F9{vZq@0!{m97krHN6gOo51q%8j( zLL$(DL%PTcWmxNAc%>x-0wPNY*O=5HXIq>AITpdPHARu7eZeFDhfyHeV+K6TVrT`l9eyN-5qP260 z6EUER4Db}v)+bSeCRWst@~qU$w*Dc?iAKWmocC2#*ebTc*QNatpqDi61Er~1R3#Ww zc{F+`7P;g1+UafZk*o3TtSTeTU5(LDyIp|6w9ssrnqz{JRw8xB2kW+rT^L(co@gA; z1oaMxa)r->^1L>~ZWY4kfgv=1F3lR<{Pu_%y-EXbX?v8WUKnvl)C7=!LB;U~BWhCe zyCjc3Mfrr}cS)WYt*8oezu)Q>@P<=(!?K#j#2BNgx0#jF8Ps#GIEbBzt&Xowbc+bO z*o{d8!+{{lu#MMY6sCFBX@b*=Np}yXPhL--=E8NacDzISYIOtTt<%i21<4Z`+E&6+ z;{w<_Y&w3Hl>a#-K~_SH5OO8fTq_-qd#~WEbwdJlovf*YM}y?Tkw>2j3bZU#bp%zX z+&5!^(!}Ja)m+R~P8NZouaQZG1#De|Q`+M+Ld_82(Kwny=Aa4~BEw@}2^yDu?hGm zJzD6zXORS11?MKM$leCaPBH00#U26}jYxx^WF=uQwv(l6gS>Q_euhxu$x)>U(+;dM ze5Zp-%ZgXo%JWDn#rE1d&qz;H=yZZoNgL>ZJn=q*+HJBYK0ieX@gV^P4V={X_{Wsm zAOCoqcNj;-T9d$@G}25-IgOzF&nt8*u)GVw8!F6z4|Zn)DvhsF>Kns7k!|0r_k4Jh*BtAn~oyh1!LAZ3%L`ds#LRjP?k;!7tQU^ zxLyyg9L7>Z9kl1eWqTaczHic zP^}?t$}|xqKX3_<*jF}Fl?-U1$U)a zXzi@BT(vVnLoR&L>n#MtorP}lw)^kw+{D?vL$h6hs^G668t$BCe^I)iZyZJ=dNH(i zD`Q8q(!hvmll%?^h0o^wE|URFnm#Re{3Zv61fT~dG}&PniTd`G!;v!l^By{#e_P~BWD^A4OjV2;V7gt<=)`y9vw(RP1;R2nddUN)jp&5Ag zjt6FM@#o$_*$2y;(%b&GZ`FqWuN%zQIlEs@rEb5CgqD3oUuWg%@G=t~-v- znj5dxF3vXGnwo95BtBCI?N}~DgErLTMN*1lvc!bOW{=wD{{FBJ zwuTyDP?*v>4;u;K0xcprupDtO5vXkCp&3wgd>+5lwY~t|q|n@tqau#JtVVD&g`*OV z{-qklQC8cHMaUFZp~paD7!zotZO=(H4(JiJ_D(ERM)YC^1PL~=d?5^X81U%|t`0Kp zx?gX!n#+H+@~1EU`ggzm04yg(+HsfQa@qjy@V3kK!(j}HKkyQ!3Y=x8W*+Vlv`55lBNMJ^Evlrd zSnT2h--^5u$wQ&@dKi={!YLZGphFqFA3-InxQ6kw;C_FC@;=ZcO$v<&=kEyuQbyD# z7aed3sVs27;e%uBPbDXTGR=e@dz|v zG_mBlxc4bDju?48y7EgSa0X+qV@_mbn=cPso4k_p4w}~KoB!Xet#Y8Nkcx)4Nwc!F z7*Y@{k!jJ*sPo0~R}{VPop1KZt|Vcw;rk|GteJ|uFOPC5NOGzEI%OYK6}X%{cig!LsL zX_)KqK6F9yea6iDXj4|MJ)*|J1t$1Lh?Sf~tx*VLnxnEX!sWXK2;-u;7Rw#GIlM=) zO*{^RqP_DJ-9XLP;aHt+l-I_#M$kVaYDVq84EMW4QSv$V;5kN3TQ~GQ$0+(0>t57e zd5+;@7xs5ZucL#M8a~Zd8OGAF&Ar_WVuB{O#=B$N!0_Gi-B>afk6lTef9$E)=ADSw z3_}9S{ZxRf%H}Eh8zTexPW(G z=M#kDPQp-K-Z*Lr5IrR4uAOzoKp0&!aV#wCbLC@23W9hv#@l9VagS=dl`D`TjLJa`Moeew_9~;{1*W)SaE!J>mrN;^r4rpLdO$Gx_#cwvXX!8s8;ZP6O_*X}Z zF<-(Jv~q`5LRtkie&&7jy`X1{U7JU2GwCzdTp)EXm&9LXv zE$I93X$M>5`&sy=`^ggV2)fF`w>)|eEb@&Zc~~bU#x%yaLG#t-HNtWLt1jB1$&zqN z43_c>!7aZqyFmbk(L@5QAcQ^vrVKAX7nh)Ogxs!K!?!$mBkM0f;zR~{JU zyHV^>CIuP25}+@xp)zR#?(0oNRxuutyv8xU}J78Uhf37m2_b`Biv5Kgg4WHVP6@FCVq%rXlM&*>YG*aKI6nZV( zRhc&L0$W_)eeBXVJVI1LRmOK-uv0nAM|i(*d=UV{*uH3r6uK}p^y~xV;@)2{`Aa_W zgl3w3{ViH-rQGX3I!LRVJti)_H6D6nd!1D&+A?%qWlFa_`2Ll#9p!_pI(J$V%)-&% zC%3+gC)EllZ4H`>(Gysf02Iyp^6>eEd{BN7?0`BtM8)`F$oa@O9XXfgxxO>{dhy+u zwpd%&VX5`v*vGR>k_Dvcrgo2_D92N{)P9c~g zYKk`ThFs30vyJ*%-DH*9K)E!v$oBe;(Lw4IpW~JU27d;p!@;Lfskf{G15z}QO{&|$ zpv1C+@w-DMmP%rA9;6b3h!BgC(E<7B#+0brR222_h4PEe6kTE?dZDHwNPjD<)m2zo z$sZZEpTW)}pSJbGNZJ(Yv9&B>vQxHh4t2k5>xPlPY1BpZu>?3JRuLD20nba2Q&!Yq z8aS|U6bdnO+3hqOMOd;Yzq!pTZN5*AMgb^$AA|IO41kr`xT0Y4!V%)gQxbPFP^z$> z2m;0!74)j$6|)__I2fOq;Hq@UD3|2?QSh|j_E${REuaHi78v#XG|WmRHAZ*7+Ml9*Q+N}e3;zD zHYWHdp7~z53UsDjoL>xL!L+8=o9pAa!Y)W+{`+-oh4WGG=UHc`4o7LdgBf^#&C36S zi8Pnd8^-vrPli%fHjn`}Z$NrSbADg|?z>PrO+lWCSO(M!tR}k?hNdH2 zz_^lNT%0T^gj;j6B*hP&+|GdcaP1N8?Q|@CunibXe|WFVdl}P1TXPZf0t+4k{}7&S zZ1cs{5wXq3R{?1;jjVNEh<%vd9R%|%o?Cp=Lh_x%%h+k5XkdF@s5Ccxy%lC7L8j_|BY*NB`)#nd4_jNu@t0O68wqKo%?qrU@!q6&|sVM-@A+f zBuCQmn|&Oqo8oR9TGxU(>Nu{a!9}j8r_TEZd`Gs#`^I%zGPQJD&F|qkN(xH{0g9K9 zN2oX;Ejb*Yq##k8c>{{MkZ)8#@rd}7BJUvH!npVFzC4_#no*xj=j`!{C= zl0efTNR|M12a*k~FT(z!^JpwGfxd|=BXV8%<@F6OBJgkIWg@%@NU=v=V`+zy&<+X$S3(@B4`eBAiPiM+FzA^%rDMjw#cB3o+*0ezGi10DhC!pUUdJ8+MIQBm zOn}i4U`9;WiLewV7CBES+9`#ax4x~NrktX7ntl=oAeLcFqLSNxK@)gECC-RMF3Udnhp)xvT?u#Cqoa zzHQ?Vm>U3x$s|% zOrO&(e*LXH>;_|qb*uxSBRWiPSrb(h@zOmBHl$}TlY3YN`y%gn|HJ|m0KPt@247!j zHVpj7N&Bv4oM)vY`14N=7b}7wqH9cvCJ_7&YygTJvK|o_f~7fypcT{sUceD-DPWGU z<4ndT86vD#rouCT-b(;q3`mG7pK2)sma%wJ$TrnvvtO^2IFaYZ;H`;wSV zV^GSlG78%@jbnXxe6;`oKk-UNf*PQrbxVl*n?#7R(?$>l1l65g2O%o4?Xn$z3XWY6 zA)`9DO6J_ukU57fhc>V$2n+^6df~*G)5p#P#c&f^@#uXQ!{L1vNs!}?epRlk*1>}L z(}$lpR(j^n-m%HW#s<9JEpF z$;`zsoI!om9ThLa6-1u=YrgW!OvJWvD|3R*L~W!L8N_MAOuoSrPa^^E=}n4DEM;uF zM^|T%<}){y$8MFyO1eY&ywc0wt2|-$lM^8$E1Rm=f(c_(y`UomhpC~3!`Lww0pKTb zgLiPo9YRZ(aZo%pm}f-(6ppzM5urc-vQSU=0sfH<>Hj5ROnY(8t|E>!Y-ei*ZCmMb z1qTR)aoRB;_{8S%c3fLkxvh$z_Jc4|NLUSa2*bQlj>OqIK&~sieQuE=XX1QH&l`R!0ET2n`(x(qL#$TEi)!_T0kGor^k4SoR?c zDX^)qUvFXW4z|wz)DE$Y7_APa{9L%RrCwih@AW!n9CI zODrKqA=(~aruq=IVN}9h2xXyKT4buPwSxrJL_Wcw#7e^&fyaH8B(xXjV`8Q~6Qq*Zqwg`hPB#n{v)V>9Rmw?RVelue!cQS=vK+{pjG#2r3Hwz1SG5;0pPCo!r=?hF28WzbVHUyzz z%NSrEwGyc!k>@q#|B7 zPZgfPE<~O@@*rbLKH{#eJSb`F;t0%w^wQ=97vUW<-g<1eEw8O=rvi$H{I6x;3(^~tDOOBBx@B^FN4%fYO zH@?n5nIln8zlmYNZeGYIJmz&d^W3~5V~jo=aJ`gM0syq}MuNy?DJS?N7Bk4QKGx)Z z2AYeFh|y-skjy~2aHP_;I8ti zJ6~MJXat!}ZTbi@7fs0D$5C9#;YcqW9wdwO6SX#W8viOO<91Zl^B$&V+C$>|4VaKH zJCtb3uoM!m8`W)CA|c!^;7?RVV(ahw@~)6CWXysuC|70)Sk{zang^Gqe zKGB0bJZc~Wvm8v+LY~9Uy-+`?3?kB9CBQLa0K0mTn9j>0F;fn|jO-oAIxu8I`NIZC zujH^tur_E`Jg8W472iK9|#L!WLI*2o6J_>Oryu&eb4;PqDPIH<$Mv9C?4pqVihnIjo?p za{G6^Io+fwft;w-=(zyh`9M(Zq%YxtUr|9WIA_lNm1Tt&9S9RpGL9^4)) zuWSshW@bGK7s)7OF5*%2w*m1O-HTz1kNy!RA7vu?+aEC}0#jHb-=&hFT*Ny>UO&F* z-Oaujrk;)4cd#qb{+M&(9yHy*ge#ParG(xmh7ylblK(?j4UF^Q)_XhKwetk+_n7I! zI2giA2^ImVM7ojj=`c_Pbs9qwR@1-;lCd#OX8+25-Rj$D+Xm=aP_z@=uwEbck+JEC zA7^tOtTC#D!0YSLX~Y`(;e(sPXE03c~cG}D)P7%Ycw(I8`HKNM2zU%C&SiN&M(dlVQunf{;Sh&{mh zun)=W;nyj+EeyZPFl;IxMn#04TyrYREEXHh3>*Inliy~-#n(ENzMq)p3K2RQGP6x& zT!UW(>Y0fv!p&<2@dTa?>!*7e%Z|wSh#wG1U;jkCLgYy>==2D!pR#t6(y;!?Jfd&) zD0_$=3k_#UNOWvq>1{|rrlRd7I+(1yxlSxkzhV=TRL{^uRR^07!`&f>)HvHhc}s75DZ(Wa`U#SzCZ$)eCa?xnCf1}GN!Qfp%MzN(GcXN?=VWROi{Ac@JH!1W)2>h7jv5oi^3qRMS||yxifjuDUrGy0DrENn0hpw< z$_QSA>lXBh$~e8te6>hfNy>V4a(Gs{sIe%(W-soT)K6gLS=C{$0J=@{nZp)DXrBLq&k-&_PV6q;^yOva~ zn2(?~Ywnshd=Cl>jgw>t@j>d!EVP;xywDcui&iGmm;OExLAYKdM=(y;ftI*oS#;l8 z%Q#JQmYNc1(K1f9zmu9!^0)MXhn=Orldxk<-cYDU-QO~i2jpq?ecZ#n-oE&A>>bJg z_KxlayEAK8$G$RO*S_+Xul34$N?EX>-DQjhM(k7;U--dtI~~)3yUSge<=HN*6M9(%;(|r~ust0SRj~RZzm$OO zKCQlZ%?Tl)?$)d=ku*e)o%Hit^Vq(ykNAauIzbU1JcGly+ODr*E1DaiEZFPY_)ci_ zlA!2e2mKmWp|Wn@uHK?L2&-nZJ&Tw2@d~ySf9_2@ON}%d`60@vUIVLdfUnt^jNXQ7 zfB=$b@L-?g;NOOpH0#Yu7*Oa%qmzhHxrXr|yxtN9eSVfOnt1=WWcZSR zam6jkV5Njn3MMC#ioei?jnizK29p+y0U0Qx4ButE1b_rew7H9Wq2@^jC@m^SVk)pa zA4diGwgG%aCZ+Z>2ndNt3>m&XiJSxEy%-)S`G?>C0$8&iTNPu#0#^v7InfxXbxuvLW zQ5Wg>wRLbY*@*L5#EIC{Bs$_{HYYmUt<3ds&_Hy=#PwD3Ua*9>myi8jHg+R(y&wsR zfPIxGXj<@YVlvBQnB^^FI`CD_!iR8C&)i0{GdIuHJC`Cn0x)MELL}(AwILFim;A3I zw)TQ~TTx{iw$qdR-2$Jl;@~Y!Ph+cr(-u1vf+JI#f0TBm=FO%AE>y3gvc{ssI^3Ud zl@=wH2S3Mz#wQ^V9BroCi#Q+mKP}S@U!K!b+}Nmy%5$(PWyKN%tF^WzV@hG!6nCk? zK7^qK0ImuKWDTKunvTW#uLh z_yvXQS$=!wqF(q;k1U0?M?kqZ#6G0yOXyH&?rVI(0Tw^v0=1pA(=*WjYK% zLmhN>RhBN*hDatzy^8SBMNn@G_BJoy#^A@RT zCfqqiNHWrpr6?LsxUNW)Gsk(0JW4e69a|&H8Y2X8kZWK!Tz=|Euwq&^L;%;KMO?O- z02;;BQ6E}9;zPs65#V42Tnk>cRfNr=XgJvy5k3<4za*Rtem3D`X@nZ{j`)LTEj;2E zK}%``;m1X^g|Ay~V*^DA8O2vQkslSydQk#D$E2hjDZyT>s52&D2KP5$W&!|2?zM&G z$grBnyC@O3vI}q}Pq=a=?w>(U1C~z$R7|QYfQZ1BF4_|~G9~5wnrA^aiyUJLm@?g+ z>P`cu>;gn7yQ@3iEw{_s-VflC-J0oU5$yR}D9h0Y(4A4(g1(tWP>$rl z(Z^QDyQ6?L)A+Jzs*Uf8qW*Ubtl1Uvc-cT=CIV}A1J*!Yd~OJXC%M4K_AGuhcPBd0 zBAQ@F08HoY0!EIY1p`8u1qg{k9lIL@5DiZQ&hT5ht&Mdp;XlCE7-t#*D_I6YRzp9d zY@KS425=0vT{oyJNK31}7qGEEL~?wmUN?1ZeDh6Sw#?-Bmf~+>IWLL1C=2>j2DIUij917Bl=r?@-vn*d$!e$n>XXIN; zk*<;`QG9F|J9nDS5ERwxV}VVt@~y6qost}?U1^9kO3R)|!@@>r_5e?2!RtQ2B>*Nw z^DwGs3Xp0XYZ!VEifT>oKh>PFC_%}%0IE>g;oU5!;&N$=rXB^S0(}pdk!*?8f>r-F z{jiCE_n58Kv!YUE>4nR2x9&KNM^-fwC8(S6yHJg=Lo`;O2wN)HmhfJtCCN!k$e|^q11+IMT1vu)iS+{BbKB(PymP=x%?$f-* zBoi^8J;R4&poOr#3W0uK=X)Pwg({N|GZ9e!EOWh1krx$7@+ul4YxSy|I)ZJOdN{&S zv19T$c^xgL+Ek;r44@U0AU+G|9_4dIiKovdA4@zoJ_GI-ayY7y{4Ow~-sIzIA;|B$ z6*kA4i=UZ$kRO+q9onivzRKkQ8~Eg272kWS(if-CcrW0IJjN_|4JId$1gWJ)tJX&R z_(d3@)@qyb@xD4wzn=*?wl~k@V@!UO$xkr3!sKNpzr^H=On!sOmzaEp$-iOpZ{y)_>;{e z@LNh0bJ=w9Kyjp4hV^@Qb|iaSalANPoWRlj#o1!9cv~@v|NBt((b#zLL~*qE0p#}= zXNvctW~TU_;#e_XysNke|8GX_ZE{?|b^D6h;*G_*;uQYBTb_R|etd3!aO37d!#nK6FO#s30oVVO?2m%z1sSU2Q?!W%=Dj;#XImlp+QUv3@Pi3 z_t)8g-PQYZo~2HKkBx4u;lnpyVLr)+sEje4mtuT-t${EL2_9R&XpSmHslZQobDw=$ zP=XA1-^URqMwlxswi>X5id9WdgIU&eTE2#+bq;&lU*YovIedxnaoND%b4e5b4pqGw zCO0y 1: + return False + if error_dims > 0: + return False + return True + + +def _get_same_padding(in_size, kernel_size, stride): + new_size = int(math.ceil(in_size * 1.0 / stride)) + pad_size = (new_size - 1) * stride + kernel_size - in_size + pad0 = int(pad_size / 2) + pad1 = pad_size - pad0 + return [pad0, pad1] + + +def print_mapping_info(func): + def run_mapping(*args, **kwargs): + node = args[1] + try: + res = func(*args, **kwargs) + except: + print("convert failed node:{}, op_type is {}".format( + node.layer_name[9:], node.layer_type)) + raise + else: + return res + + return run_mapping + + +class OpSet9(): + elementwise_ops = { + 'Add': 'paddle.add', + 'Div': 'paddle.divide', + 'Sub': 'fluid.layers.elementwise_sub', + 'Mul': 'paddle.multiply', + 'Pow': 'paddle.pow', + } + + default_op_mapping_field_values = OrderedDict() + default_op_mapping_field_values['PADDLE_OP'] = '' + default_op_mapping_field_values['PADDLE_INPUT_ARGS'] = None + default_op_mapping_field_values['ATTR_MAPPING'] = dict() + default_op_mapping_field_values['DEFAULTS'] = dict() + + default_op_mapping = { + 'Shape': ['paddle.shape', ['input']], + 'Ceil': ['paddle.ceil', ['x']], + 'ReduceMean': [ + 'paddle.mean', ['x'], dict( + axes='axis', keepdims='keepdim'), dict(keepdim=1) + ], + 'ReduceSum': [ + 'paddle.sum', ['x'], dict( + axes='axis', keepdims='keepdim'), dict(keepdim=1) + ], + 'ReduceMin': [ + 'paddle.min', ['x'], dict( + axes='axis', keepdims='keepdim'), dict(keepdim=1) + ], + 'ReduceMax': [ + 'paddle.max', ['x'], dict( + axes='axis', keepdims='keepdim'), dict(keepdim=1) + ], + #active function + 'Relu': ['paddle.nn.ReLU', ['x']], + 'LeakyRelu': ['paddle.nn.LeakyReLU', ['x'], dict(alpha='negative_slope'), + dict(negative_slope=.01)], + 'Elu': ['paddle.nn.functional.elu', ['x'], dict(), dict(alpha=1.)], + 'ThresholdedRelu': [ + 'paddle.nn.functional.thresholded_relu', ['x'], dict(alpha='threshold'), + dict(alpha=1.) + ], + 'Tanh': ['paddle.nn.Tanh', ['x']], + 'Sigmoid': ['paddle.nn.Sigmoid', ['x']], + 'Softsign': ['paddle.nn.Softsign', ['x']], + 'Softplus': ['paddle.nn.Softplus', ['x'], dict(), dict(threshold=float(sys.maxsize))], + 'Exp': ['paddle.exp', ['x']], + 'Softmax': ['paddle.nn.Softmax', ['x'], dict(), dict(axis=1)], + 'Sqrt': ['paddle.sqrt', ['x']], + 'Floor': ['paddle.floor', ['x']], + 'Abs': ['paddle.abs', ['x']], + 'Erf': ['paddle.erf', ['x']], + } + + def __init__(self, decoder, paddle_graph): + super(OpSet9, self).__init__() + self.graph = decoder.graph + self.paddle_graph = paddle_graph + self.input_index = 0 + self.inputs_info = dict() + self.weights = dict() + self.nn_name2id = dict() + + def get_node_name(self, node): + if hasattr(node, "index"): + return "{}_{}".format(node.layer_name, node.index) + else: + return node.layer_name + + @print_mapping_info + def directly_map(self, node, *args, **kwargs): + inputs = node.layer.input + op_type = node.layer_type + attrs = node.attr_map + info = self.default_op_mapping[op_type] + info.extend( + list(self.default_op_mapping_field_values.values())[len(info):]) + (paddle_op, + paddle_input_args, + attr_mapping, + default_attrs) = info + mapped_attrs = { + attr_mapping.get(key, key): value + for key, value in attrs.items() + } + if '' in mapped_attrs: + mapped_attrs.pop('') + if '_' in mapped_attrs: + mapped_attrs.pop('_') + layer_attrs = default_attrs.copy() + layer_attrs.update(mapped_attrs) + assert len(inputs) == 1, 'directly_map error with multi inputs' + input = self.graph.get_input_node(node, idx=0, copy=True) + if paddle_op.startswith("paddle.nn"): + op_name = paddle_op[10:].lower() + op_name = name_generator(op_name, self.nn_name2id) + output_name = node.layer_name + layer_outputs = [op_name, output_name] + self.paddle_graph.add_layer( + kernel=paddle_op, + inputs={paddle_input_args[0]: self.get_node_name(input)}, + outputs=layer_outputs, + **layer_attrs) + else: + self.paddle_graph.add_layer( + kernel=paddle_op, + inputs={paddle_input_args[0]: self.get_node_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + if paddle_op == 'paddle.shape': + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": node.layer_name}, + outputs=[node.layer_name], + dtype=string('int64')) + + @print_mapping_info + def elementwise_map(self, node): + assert node.layer_type in self.elementwise_ops + op_type = self.elementwise_ops[node.layer_type] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + inputs_dict = {'x': self.get_node_name(val_x), + 'y': self.get_node_name(val_y)} + self.paddle_graph.add_layer( + op_type, + inputs=inputs_dict, + outputs=[node.layer_name]) + + @print_mapping_info + def place_holder(self, node): + shape = node.out_shapes[0] + for i, dim_shape in enumerate(shape): + if dim_shape == 0 and i == 0: + shape[i] = 1 + if dim_shape == 0 and i != 0: + assert 'shape of input is not assigned' + self.paddle_graph.add_layer( + kernel="paddle.to_tensor", + inputs={}, + outputs=[node.layer_name], + data="x{}".format(self.input_index)) + self.inputs_info["x{}".format(self.input_index)] = [shape, node.dtype] + self.input_index += 1 + + @print_mapping_info + def create_parameter(self, node, parameter=None): + if parameter is not None: + node = parameter + dtype = node.dtype + shape = node.out_shapes[0] + if len(node.weight.shape) == 0: + self.paddle_graph.add_layer( + "paddle.full", + inputs={}, + outputs=[node.layer_name], + dtype=string(dtype), + shape=[1], + fill_value=node.weight) + else: + self.weights[node.layer_name] = node.weight + self.paddle_graph.add_layer( + "self.create_parameter", + inputs={}, + outputs=[node.layer_name], + shape=shape, + attr=string(node.layer_name), + dtype=string(dtype), + default_initializer="paddle.nn.initializer.Constant(value=0.0)") + + + def _pad_if_asymmetric(self, node, pads, val_name): # pads: SSEE + assert len(pads) & 1 == 0 + symmetric = True + ndims = len(pads) // 2 + for idx_dim in range(ndims): + if pads[idx_dim] != pads[ndims + idx_dim]: + symmetric = False + break + if symmetric: + return pads[:ndims], val_name + val_padded = self.Pad(node, op_independent=False) + return [0] * ndims, val_padded + + def _interpolate(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + inputs = {'x': self.get_node_name(val_x)} + if node.layer_type == 'Resize': + if len(node.layer.input) == 2: + # opset 10 + val_scales = self.graph.get_input_node(node, idx=1, copy=True) + inputs['scale_factor'] = self.get_node_name(val_scales) + elif len(node.layer.input) == 3: + # opset 11 + val_scales = self.graph.get_input_node(node, idx=2, copy=True) + inputs['scale_factor'] = self.get_node_name(val_scales) + elif len(node.layer.input) == 4: + # opset 11 + val_sizes = self.graph.get_input_node(node, idx=3, copy=True) + var_nc, var_hw = val_sizes.layer_name + '_nc', val_sizes.layer_name + '_hw' + self.paddle_graph.add_layer( + 'paddle.split', + inputs={"x": self.get_node_name(val_sizes)}, + outputs=[var_nc, var_hw], + num_or_sections=[2, 2], + axis=0) + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": var_hw}, + outputs=[var_hw], + dtype=string('int32')) + inputs['size'] = var_hw + elif node.layer_type == 'Upsample': + val_scales = self.graph.get_input_node(node, idx=1, copy=True) + inputs['scale'] = val_scales + + mode = node.get_attr('mode', 'nearest') + attrs = {"align_corners": False, + "mode": string(mode), + "align_mode": 1} + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.interpolate", + inputs=inputs, + outputs=[node.layer_name], + **attrs) + + @print_mapping_info + def HardSigmoid(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + alpha = node.get_attr('alpha', 0.2) + beta = node.get_attr('beta', 0.5) + self.paddle_graph.add_layer( + kernel="paddle.scale", + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name + "_val"], + scale=alpha, + bias=beta) + self.paddle_graph.add_layer( + kernel="paddle.clip", + inputs={"x": node.layer_name + "_val"}, + outputs=[node.layer_name], + min=0.0, + max=1.0) + + @print_mapping_info + def RoiAlign(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_rois = self.graph.get_input_node(node, idx=1, copy=True) + + pooled_height = node.get_attr('output_height') + pooled_width = node.get_attr('output_width') + spatial_scale = node.get_attr('spatial_scale') + sampling_ratio = node.get_attr('sampling_ratio') + layer_attrs = { + 'pooled_height': pooled_height, + 'pooled_width': pooled_width, + 'spatial_scale': spatial_scale, + 'sampling_ratio': sampling_ratio, + } + self.paddle_graph.add_layer( + 'fluid.layers.roi_align', + inputs={'input': self.get_node_name(val_x), + 'rois': self.get_node_name(val_rois)}, + outputs=[node.layer_name], + **layer_attrs) + + + @print_mapping_info + def MaxRoiPool(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_rois = self.graph.get_input_node(node, idx=1, copy=True) + + spatial_scale = node.get_attr('spatial_scale') + pooled_height, pooled_width = node.get_attr('pooled_shape') + layer_attrs = { + 'pooled_height': pooled_height, + 'pooled_width': pooled_width, + 'spatial_scale': spatial_scale, + } + self.paddle_graph.add_layer( + 'fluid.layers.roi_pool', + inputs={'input': self.get_node_name(val_x), + 'rois': self.get_node_name(val_rois)}, + outputs=[node.layer_name], + **layer_attrs) + + @print_mapping_info + def Pad(self, node, op_independent=True): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + pads = node.get_attr('pads') + mode = node.get_attr('mode', 'constant') + value = node.get_attr('value', 0.) + data_shape = val_x.out_shapes[0] + output_shape = node.out_shapes[0] + assume_pad2d = False + layer_attrs = {} + layer_attrs['mode'] = string(mode) + paddings = [] + if len(pads) == 4: + assume_pad2d |= mode != 'constant' + if data_shape: + assume_pad2d |= data_shape and len(data_shape) == 4 # NCHW + if output_shape: + assume_pad2d |= output_shape and len(output_shape) == 4 # NCHW + if assume_pad2d: + paddle_op = 'paddle.nn.Pad2D' + layer_attrs['data_format'] = string('NCHW') + layer_attrs['value'] = value + else: + paddle_op = 'fluid.layers.pad' + layer_attrs["pad_value"] = value + if len(pads) == 4: + paddings = np.array(pads).reshape( + (-1, 2)).transpose().flatten().tolist() # SSEE -> SESE + elif len(pads) == 8: + paddings = np.array(pads).reshape( + (-1, 4)).transpose().flatten().tolist() # SSEE -> SESE + if sum(paddings[:4]) == 0: + paddle_op = 'paddle.nn.Pad2D' + paddings = paddings[4:] + layer_attrs['value'] = value + if 'pad_value' in layer_attrs: + layer_attrs.pop('pad_value') + tmp_paddings = copy.deepcopy(paddings) + paddings[0] = tmp_paddings[2] + paddings[1] = tmp_paddings[3] + paddings[2] = tmp_paddings[0] + paddings[3] = tmp_paddings[1] + if paddle_op == 'paddle.nn.Pad2D': + layer_attrs['padding'] = paddings + nn_op_name = name_generator("pad2d", self.nn_name2id) + else: + layer_attrs['paddings'] = paddings + if op_independent: + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': self.get_node_name(val_x)}, + outputs=[nn_op_name, node.layer_name] if paddle_op == 'paddle.nn.Pad2D' else [node.layer_name], + **layer_attrs) + else: + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': self.get_node_name(val_x)}, + outputs=[nn_op_name, node.layer_name + '_paded'] if paddle_op == 'paddle.nn.Pad2D' \ + else [node.layer_name + '_paded'], + **layer_attrs) + return node.layer_name + '_paded' + + @print_mapping_info + def Unsqueeze(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axes = node.get_attr('axes') + layer_attrs = {'axis': axes} + if len(val_x.out_shapes[0]) == 0: + if node.layer_name: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + shape=[1]) + else: + if str(val_x.dtype) == 'bool': + val_x_cast = val_x.layer_name + '_cast' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": self.get_node_name(val_x)}, + outputs=[val_x_cast], + dtype=string('int64')) + self.paddle_graph.add_layer( + 'paddle.unsqueeze', + inputs={"x": val_x_cast}, + outputs=[node.layer_name], + **layer_attrs) + else: + self.paddle_graph.add_layer( + 'paddle.unsqueeze', + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + **layer_attrs) + + @print_mapping_info + def Shrink(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + bias = node.get_attr('bias') + lambd = node.get_attr('lambd') + assert bias == 0.0, 'not support bias!=0' + self.paddle_graph.add_layer( + 'paddle.nn.functional.hardshrink', + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + threshold=lambd) + + @print_mapping_info + def Constant(self, node): + val_output = self.graph.get_node(node.layer.output[0], copy=True) + + value = node.get_attr('value') + dtype = np.dtype(value.dtype) + output_dtype = val_output.dtype + if output_dtype: + assert dtype == output_dtype, 'tensor dtype unmatches storage dtype' + + shape = node.get_attr('shape', None) + + if shape is None: + shape = val_output.out_shapes[0] + if shape is None: + shape = list(value.shape) + _logger.warning('in (Constant -> %s): ' + 'attribute "shape" of %s not inferred, ' + 'using value as 1-D tensor may lead to fails', + val_output.layer_name, val_output.layer_name) + if len(value) == 1: + value = value.tolist() + value = value[0] + self.paddle_graph.add_layer( + "paddle.full", + inputs={}, + outputs=[node.layer_name], + dtype=string(dtype), + shape=[1], + fill_value=value) + else: + value = np.reshape(value, shape) + self.weights[node.layer_name] = value + self.paddle_graph.add_layer( + "self.create_parameter", + inputs={}, + outputs=[node.layer_name], + shape=shape, + attr=string(node.layer_name), + dtype=string(dtype), + default_initializer="paddle.nn.initializer.Constant(value=0.0)") + + @print_mapping_info + def Resize(self, node): + self._interpolate(node) + + @print_mapping_info + def Upsample(self, node): + self._interpolate(node) + + @print_mapping_info + def InstanceNormalization(self, node): + op_name = name_generator("instanse_norm", self.nn_name2id) + output_name = node.layer_name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_scale = self.graph.get_input_node(node, idx=1, copy=True) + val_b = self.graph.get_input_node(node, idx=2, copy=True) + epsilon = node.get_attr('epsilon', 1e-5) + layer_attrs = { + 'num_features': node.out_shapes[0][1], + 'epsilon': epsilon, + 'weight_attr': string(self.get_node_name(val_scale)), + 'bias_attr': string(self.get_node_name(val_b)) + } + dim = len(val_x.out_shapes[0]) + if dim == 2 or dim == 3: + paddle_op = "paddle.nn.InstanceNorm1D" + elif dim == 4: + paddle_op = "paddle.nn.InstanceNorm2D" + elif dim == 5: + paddle_op = "paddle.nn.InstanceNorm3D" + else: + raise Exception("The paddle only support 2D, 3D, 4D or 5D input in InstanceNormalization.") + self.paddle_graph.add_layer( + paddle_op, + inputs={"x": self.get_node_name(val_x)}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def Expand(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_shape = self.graph.get_input_node(node, idx=1, copy=True) + val_x_dtype = val_x.dtype + name_ones = node.layer_name + '_ones' + attr_ones = { + 'shape': val_shape.layer_name, + 'dtype': string(val_x_dtype), + 'fill_value': 1 + } + self.paddle_graph.add_layer( + 'paddle.full', + inputs={}, + outputs=[name_ones], + **attr_ones) + inputs_dict = {'x': name_ones, + 'y': self.get_node_name(val_x)} + self.paddle_graph.add_layer( + 'paddle.multiply', + inputs=inputs_dict, + outputs=[node.layer_name]) + + @print_mapping_info + def Gather(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + indices = self.graph.get_input_node(node, idx=1, copy=True) + indices_shape = indices.out_shapes[0] + axis = node.get_attr('axis', 0) + #assert len( + # indices_shape) <= 2, "Gather op don't support dim of indice >2 " + if axis == 0 and len(indices_shape) <= 1: + if len(val_x.out_shapes[0]) <= 1: + self.paddle_graph.add_layer( + 'paddle.gather', + inputs={'x': self.get_node_name(val_x), + 'index': self.get_node_name(indices)}, + outputs=[node.layer_name]) + elif len(val_x.out_shapes[0]) > 1: + if len(indices_shape) == 0: + gather_ = node.layer_name + '_1' + self.paddle_graph.add_layer( + 'paddle.gather', + inputs={'x': self.get_node_name(val_x), + 'index': self.get_node_name(indices)}, + outputs=[gather_]) + self.paddle_graph.add_layer( + 'paddle.squeeze', + inputs={'x': gather_}, + outputs=[node.layer_name], + axis=[0]) + else: + self.paddle_graph.add_layer( + 'paddle.gather', + inputs={'x': self.get_node_name(val_x), + 'index': self.get_node_name(indices)}, + outputs=[node.layer_name]) + elif axis > 0 and len(indices_shape) <= 1: + perm = list(range(len(val_x.out_shapes[0]))) + perm = [axis] + perm[:axis] + perm[axis + 1:] + name_trans = val_x.layer_name + '_trans' + self.paddle_graph.add_layer( + 'paddle.transpose', + inputs={"x": self.get_node_name(val_x)}, + outputs=[name_trans], + perm=perm) + self.paddle_graph.add_layer( + 'paddle.gather', + inputs={'x': name_trans, + 'index': self.get_node_name(indices)}, + outputs=[node.layer_name]) + self.paddle_graph.add_layer( + 'paddle.transpose', + inputs={"x": node.layer_name}, + outputs=[node.layer_name], + perm=perm) + if len(indices_shape) < 1: + self.paddle_graph.add_layer( + 'paddle.squeeze', + inputs={'x': node.layer_name}, + outputs=[node.layer_name], + axis=[axis]) + elif axis == 0 and len(indices_shape) > 1: + if val_x.out_shapes[0] is not None and isinstance( + val_x, ONNXGraphDataNode): + indices_cast = indices.layer_name + '_cast' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": self.get_node_name(indices)}, + outputs=indices_cast, + dtype=string('int64')) + op_name = name_generator("embedding", self.nn_name2id) + output_name = node.layer_name + layer_outputs = [op_name, output_name] + self.paddle_graph.add_layer( + 'paddle.nn.Embedding', + inputs={"x": indices_cast}, + outputs=layer_outputs, + param_attr=string(val_x.layer_name), + size=val_x.out_shapes[0]) + else: + from functools import reduce + reshape_shape = reduce(lambda x, y: x * y, indices_shape) + indices_reshape = indices.layer_name + '_shape' + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": self.get_node_name(indices)}, + outputs=[indices_reshape], + shape=[reshape_shape, ]) + + perm = list(range(len(val_x.out_shapes[0]))) + self.paddle_graph.add_layer( + 'paddle.gather', + inputs={'x': self.get_node_name(val_x), + 'index': indices_reshape}, + outputs=[node.layer_name]) + val_x_shape = val_x.out_shapes[0] + reshaped_shape = [] + for i in perm: + reshaped_shape.append(indices_shape[i]) + for i in val_x_shape[:axis] + val_x_shape[axis + 1:]: + reshaped_shape.append(i) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": node.layer_name}, + outputs=[node.layer_name], + shape=reshaped_shape) + elif axis > 0 and len(indices_shape) > 1: + from functools import reduce + reshape_shape = reduce(lambda x, y: x * y, indices_shape) + indices_reshape = indices.layer_name + '_shape' + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": self.get_node_name(indices)}, + outputs=[indices_reshape], + shape=[reshape_shape, ]) + + perm = list(range(len(val_x.out_shapes[0]))) + perm = [axis] + perm[:axis] + perm[axis + 1:] + name_trans = val_x.layer_name + '_transpose' + self.paddle_graph.add_layer( + 'paddle.transpose', + inputs={"x": self.get_node_name(val_x)}, + outputs=[name_trans], + perm=perm) + self.paddle_graph.add_layer( + 'paddle.gather', + inputs={'x': name_trans, + 'index': indices_reshape}, + outputs=[node.layer_name]) + input_transpose = node.layer_name + '_transpose' + self.paddle_graph.add_layer( + 'paddle.transpose', + inputs={"x": node.layer_name}, + outputs=[input_transpose], + perm=perm) + val_x_shape = val_x.out_shapes[0] + reshaped_shape = [] + for i in perm: + reshaped_shape.append(indices_shape[i]) + for i in val_x_shape[:axis] + val_x_shape[axis + 1:]: + reshaped_shape.append(i) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": input_transpose}, + outputs=[node.layer_name], + shape=reshaped_shape) + + @print_mapping_info + def ScatterND(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + indices = self.graph.get_input_node(node, idx=1, copy=True) + updates = self.graph.get_input_node(node, idx=2, copy=True) + if len(indices.out_shapes[0]) == 1: + self.paddle_graph.add_layer( + 'paddle.scatter', + inputs={'x': self.get_node_name(val_x), + 'index': self.get_node_name(indices), + 'updates': self.get_node_name(updates)}, + outputs=[node.layer_name]) + else: + input_inner_indices = node.layer_name + '_input_inner_indices' + shape = val_x.out_shapes[0] + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": self.get_node_name(indices)}, + outputs=[self.get_node_name(indices)], + shape=indices.out_shapes[0]) + + zeros_like_val_x = val_x.layer_name + '_zeros' + self.paddle_graph.add_layer( + 'paddle.zeros_like', + inputs={"x": self.get_node_name(val_x)}, + outputs=[zeros_like_val_x]) + self.paddle_graph.add_layer( + 'paddle.scatter_nd_add', + inputs={ + 'x': zeros_like_val_x, + 'index': self.get_node_name(indices), + 'updates': self.get_node_name(updates) + }, + outputs=[input_inner_indices]) + indices_mask = node.layer_name + '_indices_mask' + constant_minus_one = node.layer_name + '_constant_minus_one' + # full_like support create tensor shape like input tensor + self.paddle_graph.add_layer( + 'paddle.full_like', + inputs={"x": self.get_node_name(updates)}, + outputs=[constant_minus_one], + dtype=string(updates.dtype), + fill_value=-1) + self.paddle_graph.add_layer( + 'paddle.scatter_nd_add', + inputs={ + 'x': zeros_like_val_x, + 'index': self.get_node_name(indices), + 'updates': constant_minus_one + }, + outputs=[indices_mask]) + constant_one = node.layer_name + '_constant_1' + # full_like support create tensor shape like input tensor + self.paddle_graph.add_layer( + 'paddle.full_like', + inputs={"x": self.get_node_name(val_x)}, + outputs=[constant_one], + dtype=string(val_x.dtype), + fill_value=1) + input_out_indices_mask = node.layer_name + '_input_out_indices_mask' + self.paddle_graph.add_layer( + "paddle.add", + inputs={"x": indices_mask, + "y": constant_one}, + outputs=[input_out_indices_mask]) + + input_out_indices = node.layer_name + '_input_out_indices' + self.paddle_graph.add_layer( + "paddle.multiply", + inputs={"x": self.get_node_name(val_x), + "y": input_out_indices_mask}, + outputs=[input_out_indices]) + + self.paddle_graph.add_layer( + "paddle.add", + inputs={"x": input_inner_indices, + "y": input_out_indices}, + outputs=[node.layer_name]) + + @print_mapping_info + def Range(self, node): + val_start = self.graph.get_input_node(node, idx=0, copy=True) + val_limit = self.graph.get_input_node(node, idx=1, copy=True) + val_delta = self.graph.get_input_node(node, idx=2, copy=True) + dtype = val_start.dtype + inputs = {'start': self.get_node_name(val_start), + 'end': self.get_node_name(val_limit), + 'step': self.get_node_name(val_delta)} + self.paddle_graph.add_layer( + 'paddle.arange', + inputs=inputs, + outputs=[node.layer_name], + dtype=string(dtype)) + + @print_mapping_info + def Slice(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + starts, ends, axes, steps = None, None, None, None + layer_attrs = {} + if len(node.inputs) > 1: + starts = self.graph.get_input_node(node, idx=1, copy=True) + ends = self.graph.get_input_node(node, idx=2, copy=True) + starts_value = _const_weight_or_none(starts) + ends_value = _const_weight_or_none(ends) + + if len(node.inputs) > 3: + axes = self.graph.get_input_node(node, idx=3, copy=True) + axes = _const_weight_or_none(axes, necessary=True) + if len(node.inputs) > 4: + steps = self.graph.get_input_node(node, idx=4, copy=True) + steps = _const_weight_or_none(steps) + layer_attrs = { + "axes": axes, + "starts": starts.layer_name, + "ends": ends.layer_name + } + if starts_value is not None and ends_value is not None: + starts_value = starts_value.copy() + ends_value = ends_value.copy() + #for idx in range(len(ends_value)): + # if ends_value[idx] > 2**31 - 1: + # ends_value[idx] = 2**31 - 1 + #print(val_x.out_shapes) + for idx in range(len(ends_value)): + if starts_value[idx] >= val_x.out_shapes[0][axes[idx]]: + starts_value[idx] = val_x.out_shapes[0][axes[idx]] - 1 + ends_value[idx] = val_x.out_shapes[0][axes[idx]] + starts_value[idx] = val_x.out_shapes[0][axes[idx]] - 1 + elif ends_value[idx] > 2**31 - 1: + ends_value[idx] = 2**31 - 1 + layer_attrs = { + "axes": axes, + "starts": starts_value, + "ends": ends_value + } + else: + if starts.dtype != 'int32': + starts_cast = starts.layer_name + '_cast' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": self.get_node_name(starts)}, + outputs=[starts_cast], + dtype=string('int32')) + layer_attrs['starts'] = starts_cast + if ends.dtype != 'int32': + ends_cast = ends.layer_name + '_cast' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": self.get_node_name(ends)}, + outputs=[ends_cast], + dtype=string('int32')) + layer_attrs['ends'] = ends_cast + else: + starts = node.get_attr('starts') + ends = node.get_attr('ends') + axes = node.get_attr('axes') + for idx in range(len(ends)): + if ends[idx] > 2**31 - 1: + ends[idx] = 2**31 - 1 + layer_attrs = {"axes": axes, "starts": starts, "ends": ends} + + if steps is not None: + layer_attrs['strides'] = steps + self.paddle_graph.add_layer( + 'paddle.strided_slice', + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + **layer_attrs) + else: + self.paddle_graph.add_layer( + 'paddle.slice', + inputs={"input": self.get_node_name(val_x)}, + outputs=[node.layer_name], + **layer_attrs) + + @print_mapping_info + def ConstantOfShape(self, node): + val_shape = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_node(node.layer.output[0], copy=True) + + value = node.get_attr('value') + dtype = value.dtype + value = value.tolist() + assert len(value) == 1, ('given value not Scalar, shape of value > 1, ' + 'this is not supported') + if len(value) == 1: + value = value[0] + layer_attrs = { + 'shape': val_shape.layer_name, + 'dtype': string(dtype), + 'fill_value': value + } + self.paddle_graph.add_layer( + "paddle.full", + inputs={}, + outputs=[node.layer_name], + **layer_attrs) + + @print_mapping_info + def Clip(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_node(node.layer.output[0], copy=True) + max_value, min_value = None, None + if len(node.inputs) == 1: + max_value = node.get_attr('max') + min_value = node.get_attr('min') + layer_attrs = { + 'max': max_value, + 'min': min_value, + } + self.paddle_graph.add_layer( + 'paddle.clip', + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + **layer_attrs) + else: + max_ipt = self.graph.get_input_node(node, idx=1, copy=True) + min_ipt = self.graph.get_input_node(node, idx=2, copy=True) + max_value = _const_weight_or_none(max_ipt) + min_value = _const_weight_or_none(min_ipt) + if max_value.shape == (1, ): + max_value = max_value[0] + if min_value.shape == (1, ): + min_value = min_value[0] + if max_value is not None and min_value is not None: + layer_attrs = {'max': max_value, 'min': min_value} + self.paddle_graph.add_layer( + 'paddle.clip', + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + **layer_attrs) + else: + raise + + @print_mapping_info + def Split(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + + paddle_op = 'split' + split = node.get_attr('split') + axis = node.get_attr('axis', 0) + layer_attrs = { + 'num_or_sections': split, + 'axis': axis, + } + outputs_list = list() + if isinstance(split, list) or isinstance(split, tuple): + for i, s in enumerate(split): + outputs_list.append("{}_{}".format(node.layer_name, i)) + else: + outputs_list.append(node.layer_name) + + self.paddle_graph.add_layer( + 'paddle.split', + inputs={"x": self.get_node_name(val_x)}, + outputs=outputs_list, + **layer_attrs) + + @print_mapping_info + def Reshape(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_shape = self.graph.get_input_node(node, idx=1, copy=True) + val_reshaped = self.graph.get_node(node.layer.output[0], copy=True) + shape_value = _const_weight_or_none(val_shape) + shape_dims = len(val_shape.out_shapes[0]) + + if shape_value is not None: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': self.get_node_name(val_x)}, + outputs=[node.layer_name], + shape=shape_value.tolist()) + elif len(node.out_shapes[0]) > 0 and _is_static_shape(node.out_shapes[ + 0]): + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': self.get_node_name(val_x)}, + outputs=[node.layer_name], + shape=node.out_shapes[0]) + elif val_shape.dtype == 'int64': + val_shape_cast = val_shape.layer_name + '_cast' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={'x': self.get_node_name(val_shape)}, + outputs=[val_shape_cast], + dtype=string('int32')) + # shape may be [], come form Gather by scalar indices + if len(val_shape.out_shapes[0]) > 0: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': self.get_node_name(val_shape_cast)}, + outputs=[val_shape_cast], + shape=val_shape.out_shapes[0]) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': elf.get_node_name(val_x), + 'shape': val_shape_cast}, + outputs=[node.layer_name]) + else: + # shape may be [], come form Gather by scalar indices + if len(val_shape.out_shapes[0]) > 0: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': self.get_node_name(val_shape)}, + outputs=[self.get_node_name(val_shape)], + shape=val_shape.out_shapes[0]) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': self.get_node_name(val_x), + 'shape': self.get_node_name(val_shape)}, + outputs=node) + + @print_mapping_info + def Cast(self, node): + val_input = self.graph.get_input_node(node, idx=0, copy=True) + val_output = self.graph.get_node(node.layer.output[0], copy=True) + + dtype = node.get_attr('to') + if not isinstance(dtype, np.dtype): + dtype = TENSOR_TYPE_TO_NP_TYPE[dtype] + + output_dtype = val_output.dtype + if output_dtype: + assert dtype == output_dtype, 'dtype of to unmatches output' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={'x': self.get_node_name(val_input)}, + outputs=[node.layer_name], + dtype=string(dtype)) + + @print_mapping_info + def Not(self, node): + val_input = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer('paddle.logical_not', + inputs={'x': self.get_node_name(val_input)}, + outputs=[node.layer_name]) + + @print_mapping_info + def AveragePool(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + + auto_pad = node.get_attr('auto_pad', 'NOTSET') + kernel_shape = node.get_attr("kernel_shape") + poolnd = len(kernel_shape) + strides = node.get_attr("strides") + pad_mode = node.get_attr("pads") + ceil_mode = bool(node.get_attr('ceil_mode', 0)) + pads = node.get_attr('pads', [0] * (poolnd * 2)) + + paddings, val_x = self._pad_if_asymmetric(node, pads, val_x) + + if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER": + input_shape = val_x.out_shapes[0] + pad_h = _get_same_padding(input_shape[2], kernel_shape[0], + strides[0]) + pad_w = _get_same_padding(input_shape[3], kernel_shape[1], + strides[1]) + paddings = pad_h + pad_w + + paddle_op = 'fluid.layers.pool{}d'.format(poolnd) + assert 2 <= poolnd <= 3, 'only pool2d and pool3d are supported' + layer_attrs = { + "pool_size": kernel_shape, + "pool_type": string('avg'), + "pool_stride": strides, + "pool_padding": paddings, + "ceil_mode": ceil_mode, + "exclusive": 'True', + "name": string(node.layer_name) + } + self.paddle_graph.add_layer( + paddle_op, + inputs={'input': val_x if isinstance(val_x, str) else self.get_node_name(val_x)}, + outputs=[node.layer_name], + **layer_attrs) + # TODO(syf): op has diff +# op_name = name_generator("pool", self.nn_name2id) +# output_name = node.layer_name +# layer_outputs = [op_name, output_name] +# paddle_op = 'paddle.nn.Pool{}D'.format(poolnd) +# assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' +# layer_attrs = { +# "kernel_size": kernel_shape, +# "stride": strides, +# "padding": paddings, +# "ceil_mode": ceil_mode, +# "exclusive": 'True', +# } +# self.paddle_graph.add_layer( +# paddle_op, +# inputs={'x': self.get_node_name(val_x)}, +# outputs=layer_outputs, +# **layer_attrs) + + @print_mapping_info + def Concat(self, node): + inputs_list = [] + dtypes = set() + for i in range(len(node.layer.input)): + ipt = self.graph.get_input_node(node, idx=i, copy=True) + inputs_list.append(self.get_node_name(ipt)) + dtypes.add(ipt.dtype) + if len(dtypes) > 1: + assert 'Unspported situation happened, please create issue on https://github.com/PaddlePaddle/X2Paddle/issues.' + axis = node.get_attr('axis') + self.paddle_graph.add_layer( + 'paddle.concat', + inputs={"x": inputs_list}, + outputs=[node.layer_name], + axis=axis) + + @print_mapping_info + def Flatten(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + output_shape = node.out_shapes[0] + axis = node.get_attr('axis', 1) + shape_list = [1, 1] + if axis == 0: + for s in output_shape: + shape_list[1] *= s + else: + for s in output_shape[:axis]: + shape_list[0] *= s + for s in output_shape[axis:]: + shape_list[1] *= s + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + shape=shape_list) + + @print_mapping_info + def Gemm(self, node): + val_a = self.graph.get_input_node(node, idx=0, copy=True) + val_b = self.graph.get_input_node(node, idx=1, copy=True) + val_c = self.graph.get_input_node(node, idx=2, copy=True) + + alpha = node.get_attr('alpha', 1.) # optional + beta = node.get_attr('beta', 1.) # optional + trans_a = bool(node.get_attr('transA', 0)) # optional + trans_b = bool(node.get_attr('transB', 0)) # optional + val_mm = node.layer_name + '_mm' + matmul_inputs = {"x": self.get_node_name(val_a), + "y": self.get_node_name(val_b)} + attr_matmul = { + "transpose_x": trans_a, + "transpose_y": trans_b, + } + self.paddle_graph.add_layer( + 'paddle.matmul', + inputs=matmul_inputs, + outputs=[val_mm], + **attr_matmul) + self.paddle_graph.add_layer( + "paddle.scale", + inputs={"x": val_mm}, + outputs=[val_mm], + scale=alpha) + + if beta != 0: + if beta == 1.: + add_inputs = {"x": val_mm, + "y": self.get_node_name(val_c)} + self.paddle_graph.add_layer( + "paddle.add", + inputs=add_inputs, + outputs=[node.layer_name]) + else: + var_beta = node.layer_name + '_beta' + self.paddle_graph.add_layer( + "paddle.scale", + inputs={"x": self.get_node_name(val_c)}, + outputs=[var_beta], + scale=beta) + add_inputs = {"x": val_mm, "y": var_beta} + self.paddle_graph.add_layer( + "paddle.addd", + inputs=add_inputs, + outputs=[node.layer_name]) + + @print_mapping_info + def Sum(self, node): + val_inps = node.layer.input + inputs_dict = { + "x": self.get_node_name( + self.graph.get_input_node( + node, idx=0, copy=True)), + "y": self.get_node_name( + self.graph.get_input_node( + node, idx=1, copy=True)), + } + self.paddle_graph.add_layer("paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name]) + + for idx, ipt in enumerate(val_inps[2:]): + y = self.graph.get_input_node(node, idx=idx, copy=True) + inputs_dict = { + "x": node.layer_name, + "y": self.get_node_name(y), + } + self.paddle_graph.add_layer( + "paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name]) + + @print_mapping_info + def MatMul(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + x_shape = val_x.out_shapes[0] + y_shape = val_y.out_shapes[0] + inputs_dict = {"x": self.get_node_name(val_x), + "y": self.get_node_name(val_y)} + if y_shape[0] == 1 and x_shape[-1] != 1 and x_shape[0] != 1: + y_squeeze = val_y.layer_name + '_squeeze' + self.paddle_graph.add_layer( + "paddle.squeeze", + inputs={"x": self.get_node_name(val_y)}, + outputs=[y_squeeze], + axis=[0]) + inputs_dict['y'] = y_squeeze + self.paddle_graph.add_layer( + "paddle.matmul", + inputs=inputs_dict, + outputs=[node.layer_name]) + else: + self.paddle_graph.add_layer( + "paddle.matmul", + inputs=inputs_dict, + outputs=[node.layer_name]) + + @print_mapping_info + def BatchNormalization(self, node): + op_name = name_generator("batchnorm", self.nn_name2id) + output_name = node.layer_name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_scale = self.graph.get_input_node(node, idx=1, copy=True) + val_b = self.graph.get_input_node(node, idx=2, copy=True) + val_mean = self.graph.get_input_node(node, idx=3, copy=True) + val_var = self.graph.get_input_node(node, idx=4, copy=True) + + momentum = node.get_attr('momentum', .9) + epsilon = node.get_attr('epsilon', 1e-5) + c = val_x.out_shapes[0][1] + + # Attribute: spatial is used in BatchNormalization-1,6,7 + spatial = bool(node.get_attr('spatial')) + layer_attrs = { + "num_channels": c, + "momentum": momentum, + "epsilon": epsilon, + "is_test": True, + "param_attr": string(self.get_node_name(val_scale)), + "bias_attr": string(self.get_node_name(val_b)), + "moving_mean_name": string(self.get_node_name(val_mean)), + "moving_variance_name": string(self.get_node_name(val_var)), + "use_global_stats": False, + } + self.paddle_graph.add_layer( + "paddle.nn.BatchNorm", + inputs={"x": self.get_node_name(val_x)}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def Transpose(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + perm = node.get_attr('perm') + self.paddle_graph.add_layer( + "paddle.transpose", + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + perm=perm) + + @print_mapping_info + def PRelu(self, node): + op_name = name_generator("prelu", self.nn_name2id) + output_name = node.layer_name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_slope = self.graph.get_input_node(node, idx=1, copy=True) + + mode = 'channel' + shape_slope = val_slope.out_shapes[0] + if shape_slope == [1]: + mode = 'all' + elif len(shape_slope) > 2: + mode = 'element' + + if mode == 'channel' and len(shape_slope) == 1: + # paddle params shape need be [1, channel] + slope_data = _const_weight_or_none(val_slope) + slope_data = np.reshape(slope_data, [1] + shape_slope) + self.weights[val_slope.layer_name] = slope_data + + layer_attrs = { + "param_attr": string(val_slope.layer_name), + 'mode': string(mode), + "channel": val_x.out_shapes[0][1] if mode == "channel" else None, + "input_shape": val_x.out_shapes[0] if mode == "element" else None, + } + self.paddle_graph.add_layer( + "paddle.nn.PReLU", + inputs={"x": self.get_node_name(val_x)}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def Squeeze(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axes = node.get_attr('axes') + if len(val_x.out_shapes[0]) == 1: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + dtype=string(val_x.dtype)) + else: + self.paddle_graph.add_layer( + "paddle.squeeze", + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + axis=axes) + + @print_mapping_info + def Equal(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.equal", + inputs={'x': self.get_node_name(val_x), + 'y': self.get_node_name(val_y)}, + outputs=[node.layer_name]) + + @print_mapping_info + def Greater(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.greater_than", + inputs={'x': self.get_node_name(val_x), + 'y': self.get_node_name(val_y)}, + outputs=node, + param_attr=None) + + @print_mapping_info + def Where(self, node): + condition = self.graph.get_input_node(node, idx=0, copy=True) + val_x = self.graph.get_input_node(node, idx=1, copy=True) + val_y = self.graph.get_input_node(node, idx=2, copy=True) + + not_condition = condition.layer_name + '_not' + self.paddle_graph.add_layer( + "paddle.logical_not", + inputs={"x": self.get_node_name(condition)}, + outputs=[not_condition]) + cast_not_condition = not_condition + '_cast' + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": not_condition}, + outputs=[cast_not_condition], + dtype=string(val_x.dtype)) + cast_condition = condition.layer_name + '_cast' + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": self.get_node_name(condition)}, + outputs=[cast_condition], + dtype=string(val_x.dtype)) + mul_val_x = val_x.layer_name + '_mul' + self.paddle_graph.add_layer( + "paddle.multiply", + inputs={'x': self.get_node_name(val_x), + 'y': cast_condition}, + outputs=[mul_val_x]) + mul_val_y = val_y.layer_name + '_mul' + self.paddle_graph.add_layer( + "paddle.multiply", + inputs={'x': self.get_node_name(val_y), + 'y': cast_not_condition}, + outputs=[mul_val_y]) + + self.paddle_graph.add_layer( + "paddle.add", + inputs={'x': mul_val_x, + 'y': mul_val_y}, + outputs=[node.layer_name]) + + @print_mapping_info + def NonZero(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_x_dim = len(val_x.out_shapes[0]) + if val_x_dim == 1: + self.paddle_graph.add_layer( + "paddle.nonzero", + inputs={"x": self.get_node_name(val_x)}, + outputs=[self.get_node_name(val_x)]) + self.paddle_graph.add_layer( + "paddle.transpose", + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_naem], + perm=[1, 0]) + if val_x_dim > 1: + self.paddle_graph.add_layer( + "paddle.nonzero", + inputs={"x": self.get_node_name(val_x)}, + outputs=[self.get_node_name(val_x)]) + self.paddle_graph.add_layer( + "paddle.split", + inputs={"x": self.get_node_name(val_x)}, + outputs=[self.get_node_name(val_x)], + num_or_sections=1, + axis=val_x_dim) + self.paddle_graph.add_layer( + "paddle.concat", + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name]) + + @print_mapping_info + def Identity(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + "paddle.assign", + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name]) + + @print_mapping_info + def Tile(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_repeats = self.graph.get_input_node(node, idx=1, copy=True) + repeats = _const_weight_or_none(val_repeats) + + if repeats is None: + repeats = val_repeats.layer_name + if val_repeats.dtype != 'int32': + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": repeats}, + outputs=["{}.tmp".format(repeats)], + dtype=string("int32")) + repeats = "{}.tmp".format(repeats) + + elif isinstance(repeats, int): + repeats = [repeats] + + attr = { + 'expand_times': repeats, + "name": string(node.layer_name), + } + self.paddle_graph.add_layer( + "paddle.tile", + inputs={"x": self.get_node_name(val_x)}, + outputs=[node.layer_name], + repeat_times=repeats) + + @print_mapping_info + def MaxPool(self, node): + op_name = name_generator("pool", self.nn_name2id) + output_name = node.layer_name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + auto_pad = node.get_attr('auto_pad', 'NOTSET') + assert node.get_attr( + "dilations") is None, 'only dilations = 0 is supported' # optional + + kernel_shape = node.get_attr("kernel_shape") + poolnd = len(kernel_shape) + strides = node.get_attr("strides") + pad_mode = node.get_attr("pads") + ceil_mode = bool(node.get_attr('ceil_mode', 0)) # optional + pads = node.get_attr('pads', [0] * (poolnd * 2)) # optional + paddle_op = 'paddle.nn.MaxPool{}D'.format(poolnd) + assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' + + paddings, val_x = self._pad_if_asymmetric(node, pads, val_x) + + if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER": + input_shape = val_x.out_shapes[0] + pad_h = _get_same_padding(input_shape[2], kernel_shape[0], + strides[0]) + pad_w = _get_same_padding(input_shape[3], kernel_shape[1], + strides[1]) + paddings = pad_h + pad_w + + layer_attrs = { + "kernel_size": kernel_shape, + "stride": strides, + "padding": paddings, + "ceil_mode": ceil_mode, + } + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': val_x if isinstance(val_x, str) else self.get_node_name(val_x)}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def GlobalMaxPool(self, node): + op_name = name_generator("pool", self.nn_name2id) + output_name = node.layer_name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + input_shape = val_x.out_shapes[0] + if len(input_shape) == 4: + poolnd = 2 + elif len(input_shape) == 5: + poolnd = 3 + elif len(input_shape) == 3: + poolnd = 1 + paddle_op = 'paddle.nn.AdaptiveMaxPool{}D'.format(poolnd) + assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' + output_shape = node.out_shapes[0] + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': self.get_node_name(val_x)}, + outputs=layer_outputs, + output_size=output_shape[2:]) + + @print_mapping_info + def GlobalAveragePool(self, node): + op_name = name_generator("pool", self.nn_name2id) + output_name = node.layer_name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + input_shape = val_x.out_shapes[0] + if len(input_shape) == 4: + poolnd = 2 + elif len(input_shape) == 5: + poolnd = 3 + elif len(input_shape) == 3: + poolnd = 1 + paddle_op = 'paddle.nn.AdaptiveAvgPool{}D'.format(poolnd) + assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' + output_shape = node.out_shapes[0] + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': self.get_node_name(val_x)}, + outputs=layer_outputs, + output_size=output_shape[2:]) + + @print_mapping_info + def Conv(self, node): + op_name = name_generator("conv", self.nn_name2id) + output_name = node.layer_name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_w = self.graph.get_input_node(node, idx=1, copy=True) + val_y = self.graph.get_node(node.layer.output[0], copy=True) + has_bias = len(node.layer.input) == 3 + if has_bias: + val_b = self.graph.get_input_node(node, idx=2, copy=True) + auto_pad = node.get_attr('auto_pad', 'NOTSET') + + kernel_shape = node.get_attr('kernel_shape') + convnd = len(kernel_shape) + assert 2 <= convnd <= 3, 'only Conv2D and Conv3D is supported' + num_out_channels = val_w.out_shapes[0][0] + num_in_channels = val_w.out_shapes[0][1] + paddle_op = 'paddle.nn.Conv{}D'.format(convnd) + + num_groups = node.get_attr('group', 1) + strides = node.get_attr('strides', [1] * convnd) + dilations = node.get_attr('dilations', [1] * convnd) + pads = node.get_attr('pads', [0] * (convnd * 2)) + + input_shape = val_x.out_shapes[0] + paddings, val_x = self._pad_if_asymmetric(node, pads, val_x) + + if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER": + pad_h = _get_same_padding(input_shape[2], kernel_shape[0], + strides[0]) + pad_w = _get_same_padding(input_shape[3], kernel_shape[1], + strides[1]) + paddings = pad_h + pad_w + + layer_attrs = { + "in_channels": num_in_channels * num_groups, + "out_channels": num_out_channels, + "kernel_size": kernel_shape, + "stride": strides, + "padding": paddings, + "dilation": dilations, + "groups": num_groups, + 'weight_attr': string(val_w.layer_name), + } + if has_bias: + layer_attrs["bias_attr"] = string(val_b.layer_name) + else: + layer_attrs["bias_attr"] = False + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': val_x if isinstance(val_x, str) else self.get_node_name(val_x)}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def ConvTranspose(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_w = self.graph.get_input_node(node, idx=1, copy=True) + val_b = None + if len(node.layer.input) > 2: + val_b = self.graph.get_input_node(node, idx=2, copy=True) + auto_pad = node.get_attr('auto_pad', 'NOTSET') + out_padding = node.get_attr('output_padding', [0, 0]) + kernel_shape = node.get_attr('kernel_shape') + assert kernel_shape, 'kernel_shape not inferred' + convnd = len(kernel_shape) + assert 2 <= convnd <= 3, 'only Conv2DTranspose and Conv3DTranspose supported' + num_in_channels = val_w.out_shapes[0][0] + num_out_channels = val_w.out_shapes[0][1] + paddle_op = 'paddle.nn.functional.conv{}d_transpose'.format(convnd) + + num_groups = node.get_attr('group', 1) + strides = node.get_attr('strides', [1] * convnd) + dilations = node.get_attr('dilations', [1] * convnd) + output_size = node.get_attr('output_shape', []) + pads = node.get_attr('pads', [0] * (convnd * 2)) + + paddings, var_x = self._pad_if_asymmetric(node, pads, val_x) + + output_size = [0, 0] + + output_size[0] = (val_x.out_shapes[0][2] - 1 + ) * strides[0] - 2 * paddings[0] + dilations[0] * ( + kernel_shape[0] - 1) + 1 + out_padding[0] + output_size[1] = (val_x.out_shapes[0][3] - 1 + ) * strides[1] - 2 * paddings[1] + dilations[1] * ( + kernel_shape[1] - 1) + 1 + out_padding[1] +# layer_attrs = { +# 'in_channels': num_in_channels, +# 'out_channels': num_out_channels, +# 'output_size': output_size or None, +# 'kernel_size': kernel_shape, +# 'padding': paddings, +# 'stride': strides, +# 'dilation': dilations, +# 'groups': num_groups, +# 'weight_attr': string(val_w.layer_name), +# 'bias_attr': None if val_b is None else string(val_b.layer_name), +# } +# self.paddle_graph.add_layer( +# paddle_op, +# inputs={"x": self.get_node_name(val_x)}, +# outputs=layer_outputs, +# **layer_attrs) + inputs_dict = {'x': val_x if isinstance(val_x, str) else self.get_node_name(val_x), + "weight": val_w.layer_name} + layer_attrs = { + "stride": strides, + "dilation": dilations, + "padding": paddings, + "groups": num_groups, + "output_size": node.out_shapes[0][2:]} + if val_b is not None: + inputs_dict["bias"] = val_b.layer_name + else: + layer_attrs["bias"] = None + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.conv2d_transpose", + inputs=inputs_dict, + outputs=[node.layer_name], + **layer_attrs) diff --git a/x2paddle/op_mapper/static/onnx2paddle/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/static/onnx2paddle/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ad66778230fe4955e8877697cb3c335fc46ae07 GIT binary patch literal 182 zcmZ?b<>g`kg7>pl$Ajp{AOZ#$feZ&AE@lA|DGb33nv8xc8Hzx{2;x_~er{fgesO7D zWnx}hVqUs|p?-u>Kw?TtPHKE|eo88Wm!FqcpSClvWetKiAIFcFw?eoJui3n z+TP8qgxE*{i3|LK^^r?{2S?6*;KC_?ffH4|58J@5s=B(WyQilfU(Nlwxf+4rAA4_n zF;B=}IGO%z5bgoV4PX(8XiO67(S&)7;+(}+Vtclct+7qUTh<#GNf$a5|lmaaZ-wVpo^ zLB6BsW@0OgxIR0ZZUiboRfq>qf7V2hYanpq(TI5LHQAyOjo7P-XGKx`6IGHhI9yfhLjRmao_WzW|nfW4SoBJi}X+@l>)z6&%*T@(3an zyMf?tnkiRy^E?xZM{bX+W`E&nrh*tO5tpC2F9#vG*Zk1gC7?|pc?sAyJoLrVH*7!$ zOp$^qI%L1FJ$sY59S9?!BgJzPJ$l^ff7miviG&~>RX*VD?Hwh(T9$yb@ix+1>PjA+I!QJhZA%_+=`x@SEP@v|Z-!Zf z5J@!4a&2#Q(-82g>ic0FNa_3I+**JIVhPxJAo(dULg#6PIzV-x3Uerf*@{&GeI2Aa z{A)nC3N?!`*5Kbi|Gzmhs}8ITq|brm2$&*6GN6agp$KV*0a$!(z=Dpf8Cci=763~f z2f;!v8dz{T<=wIXSyjd(+B;Pv(^0Lk4NxpX;h!l0mLj^#L>{qC0^nth=EmAFU>z{liir1tX$wkLTQN@sqKCK)tWgpl!r(1*(-{n5 z%8LfB!0?KXk;>?D~jw9J$Jr%j*5}LIWj2j}9iQTUt zfX~T*MF{D%0}Z3Y0D3=&m@@MrT_po6eK@g^T_Wxq8|<3cnujj#0`IR3umRD+-nGTI zg*{+<51){|hY$xm4Vn+0rEG{DW&$&7*^y}!%E5+#QHE&QEe)N~^y&Ujt?XdUJ%Myn z44=Z1vAGG9ON{Y^Y6q}t%PfTeuboz8TW&AwijzCq5|_C_7d{0LQ-e6}xyry%gefpa zF6<4>Mp-O_zqB*fiE}uyq!Py=);8Y{;z^*zN3g)FiEcW9yK$o21ns|LyaKwffaE*C zFvhm2MyN=Jadk|58N_*!_Aj1_@JZxslo5LoEPP+rd>?$$g_h#$y6^9GgLrH)5Fs$D z1U^>JO<*MCaXf_*Ou+^f_zg2oLIievd5Nh5X?Lx1&VYTZQS;WOJ*WweLFnY7Ic`wR z6JY}*#OAiZE;NSH)!Du<6a09yz>hzDb!MGlp*hT-Oa!K}>9hjBjm%w6IMtL8R*$aS Q8GGR_&dlu>!L`fQKU^cog#Z8m literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_op_mapper.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_op_mapper.py index bbb9608..18dbab6 100644 --- a/x2paddle/op_mapper/static/onnx2paddle/onnx_op_mapper.py +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_op_mapper.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from x2paddle.op_mapper.onnx2paddle.opset9 import OpSet9, custom_layers +from x2paddle.op_mapper.static.onnx2paddle.opset9 import OpSet9, custom_layers from x2paddle.core.op_mapper import OpMapper from x2paddle.decoder.onnx_decoder import ONNXGraph, ONNXGraphNode, ONNXGraphDataNode diff --git a/x2paddle/op_mapper/static/onnx2paddle/opset9/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/static/onnx2paddle/opset9/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0201a228d6ef297a53c2afe928af6c3d95dbf8b GIT binary patch literal 275 zcmYjLOA5j;5KUSY5qkqK(2Y@df{1tk72JrM5Smm9`H`exui_2F8+Gf-m3voC1kr(c z^WMB+X4Y9YKoHN%jjo~IqxePwWCiI*032~Fk-|reBTrwlg~{UI-+zchd+ z@bUf>iApJ>m0C3%W7Txb)wGi~m}j=E)r^x7INi#w=A4|s7GT@41CC_eEoH69tt*TR%@A1_MXF~4fTa&9(&Qw@#hck`)h1Sm1UCypBZ@05s z>g;j$$oEaoO^xa8y^Wi%8qPj7txBqV$#8B_6*YD#<;OYBn9@kI&6L|CC!_TMEiCIM-2)U_8D&T=(nDnl*yF6b&yj z$LbdvE_08z)|={(E|T?px7l7qws8-VtlLoQ^Nl%x)2ENjojQK9cKU-Sj?_*cug#qh z5O<2lUDZIr!_9gB=)B3k;g8>(phNh04dpf*-iQBi>5n+YwJ|1 zYq$?8b322p>{Y{i(9gYMs1#l|Jr~%AI&IIdxBbogU)-AUmOAS#HFLHxQ*X~S+fBb& zZ#6d??o7v>i3-ki)*5cz@3{MC?V!-&yVm>*YmFfDLcO)#2ntQFDNW8dTnoJmvgaGk z#U($;E_B@0x*yeP*H;@s<)G&^T))|AA93A|J8K43o1>g>H|86jS9dS`Xv*D*R=z%e zV72WZ@YdTG>g|Pkd-1;e4?O?CiMmp)Mr|HddRLuxd*i^y18Z{cKxeJCT326vXD=GK3uA$6#r?w1Sr*U2BnsRNPtDM1X zDRaegpH!SV6Xh*;jRKv&QwsMpU8|cxUbdU* zW*1Gor!}{T#_;zSkGp&D^(K)X>oVPJ`+)^4S_-WBMzb{ws?%)OJRoUMTxq!NMoVAF zaswjS?Z$bXV*?=sax6giNqYa_c#PK;8-C3LYSxH9Sg`NIgFT^?#iD1dcQwx}OWnQu|!B~!1X7jiwf5eeIKDXfGy&nljYIkbOyij=!aM$=QjMkY{ z`yM03e;;Ia+vu9hrb?^S$IRUr!{zi<^P*v-UP*m0)y_Vj>ZXAkT+P;;4Atfxz~L9L zsAd-G%~nIra6vut;?})bxUx`Zn%>NdTW_)(M4L5`CEuOEl{?9#qVuh~yXXbkmGk^| zcOWxJyA2Oi$y!)%&nIxG=&rYAjbXHi0^w4z4b!M#xnwbJwzYYeJSQqwYql3U`yXv} z=Ibr*vHgh_q~Tf9eFytOY}6Ts&Jd)X7;fqapUc5b8Kts&0N?SDGgA;6-`09Mpo@oyVagIOh7lOy>A$c8Flk@@9w+q-CM4EcP6@v zzTFzZ@^)Eun|hl({q}+UJHq@sd->`v=Ap;Yy}P4(Xm>UN-xI+o`*wA&dWXE{esw_I z_lAi#y+68-dJiPvcP8KmBRK5mP3m3J&xfLX)O~jXemDWY=gJPyh?HZ?x_m^v_mb`8 z8wItk9#!wV1oGq*arKyb{1Ql!Qv&>iIw)`%@RRD0z!kt()nRo+YK*C)>M6(&RrR!b z2H)ej`+oH;QgQ83D{Tb0^bGr1$AEFy8&;g z3j)sq-c&CNd=KER+7kHffIp&M68K)gFRPCV{0_jcsEY#c2Yg9=OyC26UsWF$_&&g| zsn-R*AMkgm?-cj}z@JcW2>edK-=)4=;0FO;R{vb!cLDyS`jo&A0sgf59)aJjzE^!8 zX7pj?eZTqv$$Jmr&!~SP@FRe)s2>#gy?{Tfen{X)KMwxyybomfUk2IZYo{Pq1Y?a> zW3|!t&o{kB4cw$ZLp5LM&+yjI_GhfFxB4^II_GCILHZz?N*_k`^eI$HKZ|1N6P@z` zaQx`8=bkxSJAPu$scP_`pmIowYh`O9;qFD=E$+b?p~19z<54LA7A%_ zbi-Y6Z(~kT%eS@FM!oIc%p7~A(O9D-LLsld(eQK*#T4ONvxfTw%N4?!-uh|`uQuB~ zxV{lsZlkphL9$U_xlkj#_~O*(pZbH%OOJy;W$LZ9r8@e8Y&3|!zj!>T`b*$mOP!W# zD5(+TTO{Qr)GfpszAs3leXT?JO)nN(5(m8Z8Fa*@jG}G45T5H`yuQt~3B-~i7Z;+YRH=15xdFNa|$Skxv z9XCkV&w8^vg3Kw(b3yuuyWm)dAnQ1VlcF;`iy?LL8a}nY8iCI?$JT3DgCle@oBtzcoEpA;{FrT{XfF~^Ai0JtW(nOqq^S*&w4{j zibzEdV>Bojc2gJ4ZSyJwo362(?xt$#MF>4L%g!~&WF40i{eAd&B_t`xH#X!OipZN|deQHX zIv0z`ls#;G#_;jJ3c(%k4mIs{$cFefUpF}x5P^+l3j%OzIn&KRAk1}h+ZYdA+uQl8 z;7op@o4W|Pg+ky}h=1MOhwLqTE5B8EJ+)QbDs7dwDqS1pi%OnYm7saw5hykeYNJCo|ktKIB&`6_BaBp+Al?l@ZdLU(+-qSAb# zvLxr%CC8FJ6t~B&rnV-!6Wi79xa5>nYQ=WH_L|Y2@^k+9#|_BBkgqP;-O@#ft*;x4 zsjbP4{r<#j;JoPDZRp$7){gGv_9S{dwY;Nibf;8K*>A`*FB|7dP>ITH^)d*=Yi8K4 ziFWhSZkg?FO?M}@c4CYsF-C4wkN=^8XxIr4a2Xq{B|9wGvrE$R}#(+eW>b~zr zf^!cSzg~Ehm_FZGU3=`O`5-DHS#$HQ5D`_=ZOr?v3#{3bw0m{j_o94Y)Z$)Kr(_$_ zYho-llHJrSjJc?bMiU*%%6jv9T?y<*TlLkms{Yu|quO8MliIxF`rQc2pT^2dHx($! zd}r;#H_{b%15ZI`a-DS_t3S~jcN2I1BR=knOsGC?-kuzd8BvO7&Nuy~8Sq%Y2~!7~ z0P!X_b00$jI|P(KQMPB(?gi##^^=0aTHUR$);KHRS;GIU`8tS9Mrk_;^r?p)oZS;- zgt!D5Y2AI9O;foF>}W!R3DsDrqX)InN&yWj$Tl|oMq9Zp7QJP-Ky9JfXsMbQTcDtY z-|1GP9ZW~1o1MD0WVm%IcT;RC)ASpwU_fh~HFt{{+4VJ5_rYO+AVI#-UdQ|LI!kj2 zITVi_&@{n#cDV^Qrc36Z8qx%w*dzic(38qRbpy1FL zJCJ57fq|{t>8v?b4e6&*?R3V&dosl}$tp+hC~Y_eT^h_`)hX%;s>#LJLbKJXaT4`_ z1^G1y7R`CSVQ!VSr9~J^#5AO2Ep4(;L)6F&Ckvu#EU1uS^DAMja z-;gNceuN1Leo#sx+z%ngJBB1>l2=eCF2J0!H$P<-U{ly-7R(av(Ac6s735mRgt5=8 z3N(#;>eBqqLa$~Eo0B6}Sj>NEu^psvi?Lh+A!9T<)4EZz_u>pD;fIN}k7k8Y6L^_nwOWNF7K^fH31?gkc+yC_Y2 zvQ#u_rQK6w)3|B?8D!mo142LL>X)(#+EGF z;YYCLEC6BxX2@8{dR5;94+YFTnyLV&i7VQE@SnXO*>NehiH*9pp}#KZX_Qg!4Rv4RAP;|vY97gU3Tv@sPGD7X4Q@_{gPPRwP<-@| zvRfFW6l{40YE|i!r_x1^i!1}z1*U_w!6h0W&45N0o?$0l2W2*(uQXihcg4<-(Pq9`3 zgfx`d-A^)?TuLuMX`f=qHKtOyH2J@ihx(`+aYO!#N>cC1cma#6dN`$&{onR6; zkq;b;kxAuV`!S$M5v}|x@=Moir9FkGvz@-SWsR1T=uwV5KzHA_|GwE_G?}tV3{lILJXIuKVwN0CZN6 z;wRY9Czy;g5ky!aB-kRtNldtts~_B-LK`2Lz}^uk@;~97jT7O@hjsAnEO{uZ_0<( zqR63jzO^T{mD#ul{2KEM60r|@BJ?;q1L!c>397u3cK55ymUSX^Za?%T%T>h9Zbn=N z=>@n{NTY!z(8b;WhbK06GjPY`cyAW8mWb(C`lR+No znoT;aNSd(*p#fT2Hd@Bmt1Q;j$+#9q5poZ?9rzu62JIhDl+B~;N8+;Cp|Y0}Wkq9k zsk7_;Xo-qe0q{;$g#5(s(oX1STUpo!AdfBEs?6C}72M73gc}Lu9nLc~cEwcHkUFr` zco((=Xsr2AW7YRSdDIkoe5btW4tdkw^M~F{P5XJ21U16w*j;10pmwTVSIw=w+Pz)u z=C2xC1+}M}7oAx_^S+5LDzz86H$#`+C-e)w=@uXvP$QRK2#vXAlpOFXaV&eXLC2op zqu?jZK=2ne-b~}}n_Nvny4Id|(zT`YxV6@5`d>Hhg*+kl#xW=~^x%5Xg5h8GMiUxM zm7t=5$OQSAB?b0#Yw!ZDwHhLn!sQ4FFvztVbyyMo%{)@{toGtokZm=YyZQW?y4!9; zfqP_zINfRp?}XWEtpitFXYCPP1zP8lKf~4@nb9@xojKo_fk|rSY*S~vb zGZhlfD#R*~SV+3WJ||xIbalq!E#-!I zp@|C+OPE5fUV4!T3pCvZ2>@b)X#+y!1m!3|IY3Z}0NH?Y^j&weSYmmwZY&KleXwLm zYpQq!Di+O8vAh*B)M265DTm7Ik_1aQrSS56Q^D6=_SO))z_BO(2Ngk0;93m|d$R*C zZg~xC7FJDNJ1B*hQro4j;C>ZJkas)HnjjBWiL5aTIm1h1Hj_Z9udsTIKC@_p89SH| zOEj@(Gwy$3-tRDpA30Jivrl%K2Z_Rpm@tD15XlS*W%7?%7Y!zydCq@qUX5zTb4z9# z&+P&(;+g!0<`z;n)L@xOOXaSlg~)OyZJEiu%w$d#;+ZUtB3r^l`R-0M10qs`^z;u+ z0G8RTagxG$a065tqEOeHh6K~qi}iQWf*$wYvqlIwK>^+YjN%DAe3Qe^K1ah}k)gMf z13w<})n2Vw9xkI|JoH~2#ZUDjf~`1QM36qfGs0zr$0iLh@}XXLr6I`F0{d@BW;E zo!o~HAJTAv7GS7J;Oc-g;DjYr>1M&*3Th0cdQbf;Kf8@Ze%06p*Fp+w{B{AKA}rs< z_J_ifig0}4AeG`4^7SXp!+E7sP6;bNS z(qC|H5Sq&JShpnnw4innG20L6GHQA!o`AXo<(A95gM2BC7UVh%AK5typ?{7@MXDay z5*dX+F)>RoB1`^`y1#;NM3L0tybr9oLreHCr7dPd&YWZ3bq04Ms-StzQUo;GW`0%li;ny|s=9 zKE41p1S4yZ^*iDoO2gSWz$U>}dvxyKv&+H~;{lOj2nzD5`T-HimMf8l@Z+0{St1x!L|4S09=b?behFkkZ&1kRzga7mmgkNL^ENTDM-K6W~U{e@kv@U zAv@KqQ~Xnfa2=nhtC)6j@x}u2FW_PX?IfS$0!Hkb7FdD0H0;1>hghU67pz8R_uGL` zmiu!^DRxptfl{jMMY>6}O8Q|4k>+wiFG}~v09zDp#XiA0P^7WI%YIr_dWvk=-WX^z zROPTGs;sOr#-~KNB2HFEq47by8xW0O+9>@dA<-U5{)Q0E{b}R|nHtUdqK|5&k!oy^ zUvGQo)*Fq@1_Y)hwnOWVD_oMur}0QMCBe#=a|UTNCJiMuQ;k;AGg50UZf)o%4+v5D z=b|TniO51{s7975rk|ES%{Qx%kF=SX*Hm0}eu2tqHAI8BsXk;NLL9=!bC6sY8Hg5I zI7(A1@*cvZfeOP?NE-*(CQyV{4(ctKbW9SWsjkaBVJc|WJvvR`Ns8&q<|Iu}kr>Sq zx4!Q_Oy#$rgIY8%YH$4#!q`@ZCK4vYfhiIz(P*A#Ep1-DFR31yNr-NE+f$pO^Bk#wxySxpV=mVr05B~bUBL= z&A3lf;KYbOt?fM+ah8W^Pq-<*;eTB~Xn&_mm z5gJ5`7U_EysxGN`28>@U+4#1^qHRM#qF1m--Lr2&-L-x37IY}cHP*altJ7ZmpG$xE z+E4xMA3X-#{|DUA&b~ZpyjUiJaCfjjQ%&lJV|}5fIj1R zO0K7~A_|-n17?vCnF)$hsChe&}5~xn@?5}CQKpNhbu{ZL0^xiZN+d2GrV7m(H3&L;Y3k@hD>#ZVstoJ z?ahJGmisqhTl6vg9%{l21>}gH396IFDtz31A`zMO7vOaCqC zqF?U4fn3>sKH0O2h${fXW#D5FWTeJRD>drF->)@Y^b&rlZcgmUCi-1Q4sFb&DcBGL z-QMo&aTYlP;}MsJ3>1})dzQLvxt|G3&_jma^uO(8v;CWTMlPq_hvRw{ucA4jfA6yC zoQ(%M16!l=M0_)1y#@=oY9;GQp2X2o+vaD*?^kwthZ=FeP+}Ev;n1~mt(m#8sRSj#w zDq^HAr_b4EXqDjn0gv$3H+9FL`bjS@8|||p_Fz^LV~wkU_uuNmScg))T2vZLATwPYwXE^v6YL%^tHN|Cgg!>mvUST3MUTevX)w2z;<={yl z<%2Ey4U)k<(4-L;FELkA0wPxd1!KYs;w|xrr||8O0(XI4Q^|P5B)=ID&Nfm0yU~Kg z0B+tAvgN1b-4oG}VHjeN4TA_`yTCWE7oUrI;S}_s(FE0J!4g^gE9kIx+{Chrmcd0x z|03^JnScRlK|IJa{KmC8EP&)-cCW7 zs!Xg?5MHZc#8>N@Vh-UP1?AAHrWN9G z&?6ROE_JEcy7vVP;SC8cL#hD64nh+>lyeJW#u5mgwG2Z%1r*rc;4SHiC?NF5a34k= zkVaYvwE=+w3G2Z9i(oK(R55BbVi71SE^d)jSr+CAiVv|6z`P2C2tIQR&qOF;d@rl2 zcpklPSv4LKbC#a~OUom?##*K;U{cs_#GZTY<)uA@za-k*fmX=ibt`7KfL35s;TZI_ zlBG#~i=mJ#xf9{!1-0vnDW?ADNm*gy2$hsFd+=UGDFcH^{6t+l(=ExJSmYCue$zEc zx>l&*6@)pPp){{z&Ry)ohnA(Idxw2{&J@F1vGkg4c#o0F+SbZErF_tK_}n{QP>`=$ ztGPmtqkcrKt!gmQb0@CbSdb^V*rK0u%27iIH}_VYNjRtVzVaIO?yP$h)k&}rlFRnQ z2LQ%Eet|$@M(T=Dg^`_6z5AV=VMpnej|dD~ckf3v)Q*)6XJWXdRzUIz^WO-QtfsJ{ zkFYC!Hh(IM?ik)pzu@GkL6h;S_Qr{CQQL50f+=G$5x-JA6izA5!@xMxy6wUDbjFAB z2dQNKl%AKl!@o^J`APIS(n=%izb2CD|##}nF;-f7ELpG zx9?umijcdwO?{=?XBr!wn%Xfa`(@L!d<}OOIHJ_RmZx11@iS-@p`CogwvRU{ zViQvu<-W$Z5@lJ!@8QCQ@gDqS%&*X~4II}2UPbUuY(>X7MK%9H#*At zHKCNN2{^=`hY@g0VOtl&;Q+y*E#{0dH3=xQaj%RU+yF9e2=xN5jot@pGTsN*kEWnX zo{;w;-_|ommju=7ezZ^% zb5o?9nK1SmdmwM`WsWeA0;J`LVNjB0$ZYbuAS{aVJli0{B5RV6+K8x0hhn1EXY2kn zIv5j;elw~NAut2&W3xbPh|rnPe`$$d&Dzd^C|JHN(I)h0egF}Z;?eBoxtD>$Xa&mv zi`_3u^ay>O+8Rz|1qlKQzm+u{cq!9PN06ZG=D|gr+0qVOOLTdHSGMZzy*QEwDifVi zA|TB?Hgo^IGyW1n^TX);&^3ynh#NH0RuZi0@A&)yN0A_)_$mp1Rk}9Y%;_LL|XGW}$ zJ9jm6EQLN5Tkqfnj)B2|k#PqUJh9yze6X5H@yZUbtmTR-L)rRjjB%MT=WDd10SD2i zklpR2DkLm$?_2cKd`0`WDkFrS5GQBNkcx6&;0BUr8>04IoZ!2WV54TMxi*42Vh+&) z=W=%J+8EX&g<|iBu}Mx+a(hHkXe6cUp>RV%vmlSYX^I9A2LQ?9(6ledTimUe;|&0V zGP89EZ}9!Zya(_A!8~9c^?1gc(m7BCHVTCkN)&C&n-XeOH8+c)h{bkdG^CgpJKoy3 zh!Nl|i4g<$V}iusIOx{mFpzk}X!{-YY|)L0ep=S$Hfbz9;feWcPpSK>=#E(bFmJz3 zm;utGL#(jTq9@6YA`g5|O%LM98}i_2Qt%m;j2}HRk|J}8LEB&8TtouxF06c5d4ywB zxFsj1{VW;}H&=bEIDGortDbh~em`eFH^_h+SP*wf11P#?G2FDeV};=g>H}An`%1X# z;SuHj0IC$&D~~9^NTq<;qoOBD*My(Pa(@XNf>vT_%l)OO4T>{57vd192q=qCkuT%P z*t0HifrkN?8SyVh4NiF(VxFxk5c9@*!BNTH@|h42?~b2i(;6MUP1Mu)u8F(-&p$Sz zwHM;@JiP3Bt)=^06Qmp&j3qQ8WPoCC6+K=S`;cB0D*3#rV<`q~%@SI19eN`Imk=Eh zu40sRg~9D(v%;)|-Vn8i=?!_CQ=(!YnK1>GgQ9TeNW{_?vcaXovif?IRFyla0}_9Y zSigWrYBg>bmxgpnsQtFLfEhj8kx1Ryc9{|TyU z)F=`XDM(0Pj6z8PNCy>&wZ{GXxaR*MG)fGz4ngov!;-HzG=;Jcr5?SVNy5ltE`zf1 zWC3S-TgUFk#fZtm)+Jb7A)6o&L%$DibD(MdtH-i)G7aU*^QjxUGu5I)|$qe-_!okBp@ zqPpIF!Hrlz8lV*K4(!5!b%43pqR!LtNHlSU5IAmgQ-!J26vN~4s)$oC8Mat}B|2jv z1_%!K*PvM&-Y4580^za>A!yw04C?_lws*~|X^4nL1mz(n6)60Z_#T5;_BT)fbbwt6 z=w4E^7l>*A6ehV(Z8ZvA|EO8E8TJzTee^^h9IcM;S$aw%-hBWEn1s#V2PT% zlG_^7%L2}0E{icW74!KWBltW{lvwvW3^fk2bH`7gI&wNFMTa4Z$K_T|7DJbVJWoxm zasQEoU~lHQ3E{;pCF=6#+dA#mg&Af&pl}4J67Zdhx<;&L1cg&P{pqYoh$0I-l@W#Gwj=a82!Xi6dUt6Dhc|~ zHAIx;0v0h+2>egu_4F}@vRHqCmqPUKA|z{9sC;h$R}<(g*4}q6p~|v} z4o6vfO@Cctux5bP*p|weOzhFCWA~iiZ3|5G5S!U)&n)@=n)k?o1B*E2W&P}a9FubZ zq3_t2r_=bj*aK3-+aG$z=R0j!)Q6eIIHI?)jrEHd$ieM(JxVE@?725Vl2s^T8t&{$Hgdq0!oT=1W1b+2#BMoKx_4OSj3eP&O)4pU@W+EeT_uF zjYxo|hF`)%-T#hcc+Bkt6OwV0p&hl7GlMJnnuC@*s<(a|=cpC%ShA@SWGyVGNJ0vW z#Dm0XjR)y4NWh3einRTHU4xoJWG>eXDvGJp>?wyqm9tk8tSg5>=3SVXBUTn}lVBWu zT9q*94C1q~I5RX`t1{xHscCQ;DqtCTnuEzO$c)5j`~;vJ*w)u@&yp5Osw!3wErl_@ zh_%PDpU6POkkqCD7*C!6v#LOXgLxTGCSzjB$?h0(rnsQPXXV2ZH_);OB%>Iata!cO z_h}6JBIifW0RC2dIavu7I2e9EiOs*OI83nT2E;zdVjNF^Q$7+G7v$7Fy|+qhTk~e)RFkox(mSq2g_0`nAhEq zx5-IYYjr0C&T=a)So5k=)*aM7O&m)kVHr9PF&a2&NjOeu>cv)jS9V$%qp6R)2> z4dat|E^r98G1+trQ59<>G?8Fk>FaP6$wnjG7q>a|a+J(bpxvORO7Tuk!xIL^V^et0 zN!S9gT<^`NenBmsTb4$&bnfJtYnxBBH7kK&k>fd*lLb|Q9AZL!mDtcjlbFcs(@lsg zNtMC*?yKoZ+QebMR1;R#U?tvp0S|f5|K^ukRaVMpW z>DGV1#SPIdxKb&?g0M$1tYU1E5rjdC_aqGDM=-`Xg932cC9LxaGh75G62yTVuo{5P zLo}DDMcD-e`UTsm>|&%NuOx0-?2QyV2sd{PKWl zSGntz1rasz3Z!szwu@M z{n_KP9U$JDfFMY{_1^sh)PZ&Md1BK)01mgg;*39-9D*FqDD!bNghVpB|H%8*)y@l0 zYUBu9eHhAAm;;jCthaG6ksOgyWf~7Tv;$W29(&^Y zPso}>$2&lrPif{P=e6*tUMEK$g#Pv3)BPZejS@g7p5!j8zP%Cuj);99o*VKO2%{5l z6UsmNOj^4F$vwC$mH*IXNWh}=TZY?=QNB-DJ&b3>sM<5JQ*r`ga*rA1C_xq3t_qk@ zx?1T#U`VV`heu%bYaWb;1Wnue|Y4;sPI54F}g*Jkc6F^_}=1BHWAT}jvRnw+{>tU z$MF&zj2%ow#UAn6?uT&qclp9>frvwaA{HfhOmY0CED^J`U%|x)6o}|oBm!XjMIST6 zCWJ+9mv~ZTx2$#*Xu_j?7@=}f*(eEJ2X{Xa<}>2)6VQ0Hb0C8anOWBFVh;|5cQCFL z1Pquu!OT+2`E5ox!et0s5+fYp=+mZ7aUPO((b%c2Iyvn;6vkG%7QMomgG+S;>fwm* zdaEV!1~yRXbH#z6AH@*p{rUJE2Mp9OiYARE!q~)EvYYk9#wix~6?18#t(jB;dLwt<6%ZW+zP<$o z=2!)p#<}%6&q(<(T*89E%7c!h-Q=kV(ZWkfQbtAw+~CJ>v+~4jg4i>%N?z z#5LH9@P>;N_o&PLba*C~CIBa-_4m93#kw%Tck3T&pZc*D6dWK+IkY zxD}S+mNZ=k@rP0dAwF@`Ht&o{q#kScM{Qrz`KhV6MW>7VPK?+PP7lPQ1y&9A3LVg# zSPEJjO^P7>f`_(D2z^MNQk#c~Q{k7rI&IC7xiFkE`ohp_eZ!fc z(ff&b=NP;&sPAOXV6Rc*HeyxM;?Q$rQUZ)@2_!_ru$C30J8}gAMKy_Oyhi0gF;37qy3Va=I?S+v; z3%?t{@Kj-VIaN30(#-1PYLkO?4Hl8~W{PY&z#Z&ocU;{{MOZoem8$-uDks>E$52&L90pTpWF-Zn6*CL}nrNbt5>?&N@h&EFC)H3Y0?sH~qr!@#87#_)E_ywc*f6?d)n&x)t^7EIPZ!Ncvo2M>22VImScgCCDV*MA>#9%J%2lP8$Sb3{Qt zh|3{jjMM3t(22wmQQ`$0a9IBWi~IzWyO_vw=;!50w}NKdv9H>FlKr@q$)UJM^!4|A z)GiU$J;LNDlaY!64)~)_`eR%zkDxkN3&Xv+SBP%Jq+gnxdX!%pYbmKbgiXeah#Wbl zkgwq48Wa+gNn|dXl)*h2uuUCaMbaS;0W{siDz}wsSEVfMjL12SgN8D&E>fR2X=UV= zdr&qfRPPEkzwpen(_V)B@(gN-^a^Qm@s&tNg~)%aC6k;mLmHiyYo5DFcF)_uQKe6 z=jLz^k%gFokOoG-Es?AjPos&d5%}0nX74r}`tEOpQATom! zWD=)ECr$5Sa?*9c7YYf3I)a$MFKp~jU_3c?D|GW=NYjs_O~d zD0buUZrcc;8t+!RB_HQ*aDn4nFZ9VP_SRH4gBaK^AP=Gv_|cS{a&2R4kw7RpzC3|| zE0TdJ^%k6nizK845+9{K(=mS%o)gAQK`(kCjF}=h5Hq!jUk+)%fG{imWLal@O_S>l zAe81-F~h-6Di?WfHMYcJL$4PUH6SrmE=BDi7sg5jcEk*EqyyXZv@uY4(LbW((P45k znP5f(Q%I&4-XTjB@AkUdBU%Htg2@u+%cd;=pWiRFC@ybmvFGtS)A@9 zJ_pU_b+8o&2fHk*a$+|&`-Ub@PdHdmV0QS5!sdG8CVcIW*`@0|MM$@FHUedKBIQcy~q2G^UA?M%JsEA5(Ns!w~WiX<5us`-qVa&W|A_uNs? zxErAyU~tLbjljlB_xZS9&s3u6h!xU=KB^+6L&_qnol2|-{aDA`%R-Yi*EGDbRAg~Y z*IYB%m}shk7?Y5yd|3h|zZy~%9Vi)56-NAmcJxsd(2ba?R1-8rQV zj;1OTi!`Yt!UX-93@HqLqd+K8L|3XYU765tfbt>jQTW|FWnnv}P?IVAV0L$$V=#rE z|3z#HAWdypW(wyK$?p(9Ln)1C8s|a9<*Y>#f;8=Srn$3M2Q+xD`ad*d@pOf0Tt^VM0RZULsDVW}h z$?cW9B;1cWt!*T!SP67&!nCdTr{B-5O`u%Szl|7E(RL%btM5Tzdhf+k@WX$z#_8F| z_!C%i9!xDL)EGRx-r~<;m1;HG&-ESmU!#IsV)7kKmYKZ9Cx2#QieiF){8wCU-EolL_O? z-FGlKz~liY4>Ea*$uTC+GC9TMG?Oz-zMV;(37wPf0-;4FO(rW$I!w+n@tB-va)AjY zUzg&oOM%mUoe58O4$9##iq1E1-bMJUqC3$%M*AF}!T=xniBm=|dcTB(+iLYcf1QYy z3ayC=B&T39&~pM%^0NzDDWRd!-;NJ7H$wR5!80srnI#)0xp$Rzl!~P(K)2Xsdw*%V zGzs`X>E=?Q^!Ad4|4M0R>7~@}(g#ZA(zB(h(oAW*^hl{%%9QRXO_c1?1IXJ^x)o5a zbW>>ztrbiAk(0sIo%V#Z@u0lXtZ{Vq_Tc7?2S|rnjs4-ztnTl$+b}We-&}R63Aj`& znNc2Pwpcma+$0&#$hCEw?sAlb8_~hdJR7|hAi(MBvXPKL=W!DCVC}d+bpCDMSIM) H?eYH$VN^aF literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/onnx2paddle/opset9/custom_layer/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/static/onnx2paddle/opset9/custom_layer/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..843c68ba69073354423756a2b3af11fdc472dc14 GIT binary patch literal 2959 zcmbtWOOG2x5bmCr$KD5YdG+G{sA;d{QfN{uz6MV=-r)PA^Jc>qJcgXv1p=?L`y88 zZ-`~Ff<6|f#4}>;5$`s|>Zh!|{+PpTo7?9$rAd|C%(an9igcLmN~L=qX6cFW26#Wj zt3N?gu?Oq{pL(wM1h2gBy>o2p=WH76T&(4x}?1SK=#ahxih1a3$@zq zsE0|fEKHUcI?0St`TdEJIw_>qCI; zY||hq`}3`dTQK)#Yun**LFB!9Jq?aMnSK7r?>aM>MC0dLp$Wa*RR64BLu8 zKfd>Nf@qp#dmzCv1)}9TakNV04$kekR=ZKFU4W1~D~rBqC|Yh;YIboXa;R>e!N@iN zb$y7}AZnLizIqnpd9IOH4ZO}p4-N}1*pMOD_yIp;$RkfDv$4PBsf!{w@I-jXr+)GK zOdmmi%jp|V|0`wUK1b^v1XLR;e(3M>vrM$MLiML;P6NE*&Nn8cb;#b&zVfEgv{6NZ z!`6?;*9Lw49pv{zQ(S-)9WwPBvN;kYb0$=8|hGo6Vm)CR%!eFSIaw((|jR9)r~pUv|Hj!1Dld3@PAeUAqQp%>(n>SUqPxxkMN`T78ZImF_98 zxZzuoe!+Ms;`|KT=ifn7@pB*=3OW^df9j)b6M?*G0D^c`uLB^9}UWzgO$4yC)V>U${VrcO4h>x(P34V{H{qek-- z4q#VrYAqEp;UlF=rCx^89opr*UCIl5Ij#4_6mTKk?rzKJQY!!TCc1xZlPa96h-#TE z5#%Q$0>-(crS-9D;~!z6rXVw);zF^AP<>X>st&aC7|<0ukHHNHPZtA$>wSnQRQ^Fw zAz>=dU6|qCjRXX7C{v9`<;r`5Bp;23l5U`S!qv@L9Xq=3T;+N?+-rQuY1}5q*sC|7 z0pANf#>Huy@I#jq)syW&y@{zA&UGPlIA131iE4Oilx^W^SSanEVwEQJkD-jYx(fNf zL3|f8HHb6Pg18_p{ugbJ9Rta5Spx6Atfko!F_| zfv;e+2#I=YK6L_%GmBUp8~z&Wp8zBVztr2%-k#qWA*4C2HXt}eJRpJ;7KDfo4U(7s zYf%~_O})L>6CtyqBgwYRHwU=jke>ASYBBK~RcvFnqU|3a@aUSOi~%($>NXIW$?DiI z&$c*tkbGEmGW^TrKssBOW51>%RYxAFa}DD*zuj)x_~@UhauwRaOV_%)IO~k3Jaas8 n*toq5pL1Bd3pl-(ZHO^xqZoZPLjuqiwy+h=jZRisE*JNqg2C!$sT;_ z76^tqC_){~Fta|Qj>T-|z~5%>OVp}O4v6kYii~2G4mn9=d=xRR$hzcVq%@c7q!&w4 zX^{v?H&r?wMH9}*kdC=jjjjvpm{FU42k=*4L1>iWbNn6_A6TlXEoPnLGYnSvPpz9^ zwWn5ejikj@YE}ab4=XHvkW~!hr6dPc-=BXN$|q_3}>!(2X*xc z^jhXNAscKsvobq#SpAEm-OSxVA3CoQ^Pm7%=m;Zp!H>`>y^XZzj5G3wh@~B3G zz}T0e+Ad~B+Z#lJkqxD#Ni=D#ZKB5sCxVW+fzW+kBF}PEpF+S!FBM%K#R4)YsT87P z5^}*M)x5|pg8`_h{JJ7>s*_aD&5!!)3P!zxK5>*D^5EWya3O=ir!?kpQAomEvfv`_aG8SbQm#`P)SE* z&D4GE(m~xMBGwnZlLUG)`QNNwk<}{hhw0+mJL-`1sUmbJIb~zmFc%j`hqIQGy9J@# zE%u%_y8UR#0z)^iPfOHp)pEz6`gt>t^9(+S`31+{PKN8wksTfnyqawU_-+#09yl8h|+C9@^Ax`|0 zt@0@NaT1JZl5puOO?A}ujeF))TtD*X_v2R&!`)<@dzG>RTTr(_poJ!GS_{~-EPTUi zV34@fESSq{5YaVZ)qtyr4&YOUlSXhqal3`(RnQ7vb7C26`OT2)pjwr)X^kEl1XCY` q<#mv4XiPl_SlkVQ63Z-HxaI&&-DH94?Ub+KuF;fv(;UmOy|v$Yoslj8 literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py index ead2370..9d18577 100644 --- a/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py @@ -17,7 +17,7 @@ from x2paddle.core.graph import GraphNode from x2paddle.core.fluid_code import Layer from x2paddle.core.fluid_code import FluidCode from x2paddle.core.util import string -from x2paddle.op_mapper.onnx2paddle.opset9.custom_layer import * +from x2paddle.op_mapper.static.onnx2paddle.opset9.custom_layer import * from functools import reduce import numpy as np import onnx -- GitLab