未验证 提交 4a2df7e1 编写于 作者: W WJJ1995 提交者: GitHub

Add MMdetection FCOS && Yolov3 support (#595)

* add BatchToSpaceND and SpaceToBatchND op convert

* add Less and fixed Resize RoiAlign Greater Tile op

* add nms custom layer

* fix topk int64 bug

* update model_zoo

* deal with comments
Co-authored-by: Nchanningss <chen_lingchi@163.com>
上级 3903bade
......@@ -71,7 +71,9 @@
|Ultra-Light-Fast-Generic-Face-Detector-1MB| [onnx_model](https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB/tree/master/models/onnx)|9 |
|BERT| [pytorch(huggingface)](https://github.com/huggingface/transformers/blob/master/notebooks/04-onnx-export.ipynb)|11|转换时需指定input shape,见[文档Q3](../inference_model_convertor/FAQ.md)|
|GPT2| [pytorch(huggingface)](https://github.com/huggingface/transformers/blob/master/notebooks/04-onnx-export.ipynb)|11|转换时需指定input shape,见[文档Q3](../inference_model_convertor/FAQ.md)|
|CifarNet | [tensorflow](https://github.com/tensorflow/models/blob/master/research/slim/nets/cifarnet.py)|9||
|CifarNet | [tensorflow](https://github.com/tensorflow/models/blob/master/research/slim/nets/cifarnet.py)|9|
|Fcos | [pytorch(mmdetection)](https://github.com/open-mmlab/mmdetection/blob/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py)|11|
|Yolov3 | [pytorch(mmdetection)](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py)|11||
## PyTorch预测模型
......
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .one_hot import OneHot
from .pad_two_input import PadWithTwoInput
from .pad_all_dim2 import PadAllDim2
from .pad_all_dim4 import PadAllDim4
from .pad_all_dim4_one_input import PadAllDim4WithOneInput
from .lrn import LocalResponseNorm
from .nms import NMS
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.fluid import core
from paddle.fluid.framework import Variable, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
return_index=False,
return_rois_num=True,
rois_num=None,
name=None):
helper = LayerHelper('multiclass_nms3', **locals())
if in_dygraph_mode():
attrs = ('background_label', background_label, 'score_threshold',
score_threshold, 'nms_top_k', nms_top_k, 'nms_threshold',
nms_threshold, 'keep_top_k', keep_top_k, 'nms_eta', nms_eta,
'normalized', normalized)
output, index, nms_rois_num = core.ops.multiclass_nms3(bboxes, scores,
rois_num, *attrs)
if not return_index:
index = None
return output, nms_rois_num, index
else:
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
inputs = {'BBoxes': bboxes, 'Scores': scores}
outputs = {'Out': output, 'Index': index}
if rois_num is not None:
inputs['RoisNum'] = rois_num
if return_rois_num:
nms_rois_num = helper.create_variable_for_type_inference(
dtype='int32')
outputs['NmsRoisNum'] = nms_rois_num
helper.append_op(
type="multiclass_nms3",
inputs=inputs,
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs=outputs)
output.stop_gradient = True
index.stop_gradient = True
if not return_index:
index = None
if not return_rois_num:
nms_rois_num = None
return output, nms_rois_num, index
class NMS(object):
def __init__(self, score_threshold, nms_top_k, nms_threshold):
self.score_threshold = score_threshold
self.nms_top_k = nms_top_k
self.nms_threshold = nms_threshold
def __call__(self, bboxes, scores):
attrs = {
'background_label': -1,
'score_threshold': self.score_threshold,
'nms_top_k': self.nms_top_k,
'nms_threshold': self.nms_threshold,
'keep_top_k': -1,
'nms_eta': 1.0,
'normalized': False,
'return_index': True
}
output, nms_rois_num, index = multiclass_nms(bboxes, scores, **attrs)
clas = paddle.slice(output, axes=[1], starts=[0], ends=[1])
clas = paddle.cast(clas, dtype="int64")
index = paddle.cast(index, dtype="int64")
if bboxes.shape[0] == 1:
batch = paddle.zeros_like(clas, dtype="int64")
else:
bboxes_count = bboxes.shape[1]
batch = paddle.divide(index, bboxes_count)
index = paddle.mod(index, bboxes_count)
res = paddle.concat([batch, clas, index], axis=1)
return res
......@@ -42,7 +42,10 @@ def _const_weight_or_none(node, necessary=False):
return None
def _rename_or_remove_weight(weights, origin_name, target_name=None, is_remove=True):
def _rename_or_remove_weight(weights,
origin_name,
target_name=None,
is_remove=True):
'''
Rename parameters by Paddle's naming rule of parameters.
......@@ -67,6 +70,7 @@ def _rename_or_remove_weight(weights, origin_name, target_name=None, is_remove=T
# rename weight
weights[target_name] = data
def _is_static_shape(shape):
negtive_dims = 0
error_dims = 0
......@@ -96,9 +100,8 @@ def print_mapping_info(func):
try:
res = func(*args, **kwargs)
except:
print("convert failed node:{}, op_type is {}".format(
raise Exception("convert failed node:{}, op_type is {}".format(
node.name[9:], node.layer_type))
raise
else:
return res
......@@ -112,51 +115,61 @@ class OpSet9():
'Sub': 'paddle.subtract',
'Mul': 'paddle.multiply',
'Pow': 'paddle.pow',
'Less': 'paddle.less_than',
}
directly_map_ops = {
'Ceil': ['paddle.ceil'],
# reduce function
'ReduceMean': ['paddle.mean',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdims=1)],
'ReduceSum': ['paddle.sum',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdims=1)],
'ReduceMin': ['paddle.min',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdim=1)],
'ReduceMax': ['paddle.max',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdim=1)],
'ReduceProd': ['paddle.prod',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdim=1)],
'ReduceMean': [
'paddle.mean', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdims=1)
],
'ReduceSum': [
'paddle.sum', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdims=1)
],
'ReduceMin': [
'paddle.min', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdim=1)
],
'ReduceMax': [
'paddle.max', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdim=1)
],
'ReduceProd': [
'paddle.prod', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdim=1)
],
# active function
'Relu': ['paddle.nn.ReLU'],
'LeakyRelu': ['paddle.nn.LeakyReLU',
dict(alpha='negative_slope'),
dict(negative_slope=.01)],
'Elu': ['paddle.nn.functional.elu',
dict(alpha='alpha'),
dict(alpha=1.)],
'ThresholdedRelu': ['paddle.nn.functional.thresholded_relu',
dict(alpha='threshold'),
dict(alpha=1.)],
'LeakyRelu': [
'paddle.nn.LeakyReLU', dict(alpha='negative_slope'),
dict(negative_slope=.01)
],
'Elu':
['paddle.nn.functional.elu', dict(alpha='alpha'), dict(alpha=1.)],
'ThresholdedRelu': [
'paddle.nn.functional.thresholded_relu', dict(alpha='threshold'),
dict(alpha=1.)
],
'Tanh': ['paddle.nn.Tanh'],
'Sigmoid': ['paddle.nn.Sigmoid'],
'Softsign': ['paddle.nn.Softsign'],
'Softplus': ['paddle.nn.Softplus',
dict(threshold='threshold'),
dict(threshold=float(sys.maxsize))],
'Softplus': [
'paddle.nn.Softplus', dict(threshold='threshold'),
dict(threshold=float(sys.maxsize))
],
'Exp': ['paddle.exp'],
'Log': ['paddle.log'],
'LogSoftmax': ['paddle.nn.functional.log_softmax',
dict(axis='axis'),
dict(axis=1)],
'Softmax': ['paddle.nn.Softmax',
dict(axis='axis'),
dict(axis=1)],
'LogSoftmax':
['paddle.nn.functional.log_softmax', dict(axis='axis'), dict(axis=1)],
'Softmax': ['paddle.nn.Softmax', dict(axis='axis'), dict(axis=1)],
'Sqrt': ['paddle.sqrt'],
'Floor': ['paddle.floor'],
'Abs': ['paddle.abs'],
......@@ -211,18 +224,14 @@ class OpSet9():
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def elementwise_map(self, node):
op_type = self.elementwise_ops[node.layer_type]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
inputs_dict = {'x': val_x.name,
'y': val_y.name}
inputs_dict = {'x': val_x.name, 'y': val_y.name}
self.paddle_graph.add_layer(
op_type,
inputs=inputs_dict,
outputs=[node.name])
op_type, inputs=inputs_dict, outputs=[node.name])
@print_mapping_info
def place_holder(self, node):
......@@ -288,13 +297,15 @@ class OpSet9():
val_scales = self.graph.get_input_node(node, idx=1, copy=True)
# TODO(syf): paddle.nn.functional.interpolate will support the length
# which is the same as the rank of input.
attrs['scale_factor'] = self.weights[val_scales.name].tolist()[2:]
attrs['scale_factor'] = self.weights[val_scales.name].tolist()[
2:]
elif len(node.layer.input) == 3:
# opset 11
val_scales = self.graph.get_input_node(node, idx=2, copy=True)
# TODO(syf): paddle.nn.functional.interpolate will support the length
# which is the same as the rank of input.
attrs['scale_factor'] = self.weights[val_scales.name].tolist()[2:]
attrs['scale_factor'] = self.weights[val_scales.name].tolist()[
2:]
elif len(node.layer.input) == 4:
# opset 11
val_sizes = self.graph.get_input_node(node, idx=3, copy=True)
......@@ -311,8 +322,17 @@ class OpSet9():
outputs=[var_hw],
dtype=string('int32'))
inputs['size'] = var_hw
attrs = {"align_corners": False,
"mode": string(node.get_attr('mode', 'nearest'))}
attrs = {
"align_corners": False,
"mode": string(node.get_attr('mode', 'nearest'))
}
mode = node.get_attr('mode', 'nearest')
if mode == "linear":
attrs["mode"] = string("bilinear")
if node.get_attr('coordinate_transformation_mode',
'half_pixel') == 'pytorch_half_pixel':
attrs["align_corners"] = False
attrs["align_mode"] = 0
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.interpolate",
inputs=inputs,
......@@ -331,12 +351,19 @@ class OpSet9():
inputs['scale_factor'] = val_scales.name
mode = node.get_attr('mode', 'nearest')
attrs.update({"align_corners": False,
attrs.update({
"align_corners": False,
"mode": string(mode),
"align_mode": 1})
"align_mode": 1
})
val_x_shape = val_x.out_shapes[0]
if mode == "linear" and len(val_x_shape) == 4:
attrs["mode"] = string("bilinear")
if node.get_attr('coordinate_transformation_mode',
'half_pixel') == 'pytorch_half_pixel':
attrs["align_corners"] = False
attrs["align_mode"] = 0
else:
attrs["align_corners"] = True
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.interpolate",
......@@ -384,11 +411,25 @@ class OpSet9():
pooled_width = node.get_attr('output_width')
spatial_scale = node.get_attr('spatial_scale')
sampling_ratio = node.get_attr('sampling_ratio')
#dygraph rois_num is necessary
val_rois_shape = val_rois.name + '_shape'
self.paddle_graph.add_layer(
kernel="paddle.shape",
inputs={"input": val_rois.name},
outputs=[val_rois_shape])
val_rois_num = val_rois.name + '_num'
self.paddle_graph.add_layer(
'paddle.split',
inputs={"x": val_rois_shape},
outputs=[val_rois_num, '_', '_', '_'],
num_or_sections=[1, 1, 1, 1],
axis=0)
layer_attrs = {
'pooled_height': pooled_height,
'pooled_width': pooled_width,
'spatial_scale': spatial_scale,
'sampling_ratio': sampling_ratio,
'rois_num': val_rois_num,
}
self.paddle_graph.add_layer(
'paddle.fluid.layers.roi_align',
......@@ -397,7 +438,6 @@ class OpSet9():
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def MaxRoiPool(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -446,12 +486,14 @@ class OpSet9():
if is_pads_attr:
paddings = []
if len(pads) == 10 and sum(pads) == 0:
pads = pads[0: 6]
pads = pads[0:6]
if len(pads) in [2, 4, 6]:
if data_shape:
assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW
assume_pad |= data_shape and 2 * (len(data_shape) - 2
) == len(pads) # NCHW
if output_shape:
assume_pad |= output_shape and 2 * (len(output_shape) - 2) == len(pads) # NCHW
assume_pad |= output_shape and 2 * (len(output_shape) - 2
) == len(pads) # NCHW
if assume_pad:
paddle_op = 'paddle.nn.Pad{}D'.format(len(output_shape) - 2)
paddings = np.array(pads).reshape(
......@@ -460,21 +502,27 @@ class OpSet9():
layer_attrs['padding'] = paddings
else:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW
assume_pad |= data_shape and 2 * len(data_shape) == len(
pads) # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW
assume_pad |= output_shape and 2 * len(
output_shape) == len(pads) # NCHW
if assume_pad:
paddle_op = 'paddle.nn.functional.pad'
paddings = np.array(pads).reshape(
(2, -1)).transpose().astype("int32").flatten().tolist()
(2,
-1)).transpose().astype("int32").flatten().tolist()
layer_attrs['pad'] = paddings
else:
raise Exception("The padding value {} is wrong!".format(pads))
raise Exception("The padding value {} is wrong!".format(
pads))
elif len(pads) == 8:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW
assume_pad |= data_shape and 2 * len(data_shape) == len(
pads) # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW
assume_pad |= output_shape and 2 * len(output_shape) == len(
pads) # NCHW
if assume_pad:
paddle_op = 'paddle.nn.Pad2D'
paddings = np.array(pads).reshape(
......@@ -491,7 +539,8 @@ class OpSet9():
self.paddle_graph.add_layer(
paddle_op,
inputs={'x': val_x.name},
outputs=layer_outputs[1:] if paddle_op == 'paddle.nn.functional.pad' else layer_outputs,
outputs=layer_outputs[1:]
if paddle_op == 'paddle.nn.functional.pad' else layer_outputs,
**layer_attrs)
if not op_independent:
return node.name + '_paded'
......@@ -499,9 +548,11 @@ class OpSet9():
pads_len = val_pad.out_shapes[0][0]
if pads_len in [2, 4, 6]:
if data_shape:
assume_pad |= data_shape and 2 * (len(data_shape) - 2) == pads_len # NCHW
assume_pad |= data_shape and 2 * (len(data_shape) - 2
) == pads_len # NCHW
if output_shape:
assume_pad |= output_shape and 2 * (len(output_shape) - 2) == pads_len # NCHW
assume_pad |= output_shape and 2 * (len(output_shape) - 2
) == pads_len # NCHW
if assume_pad:
if pads_len == 2:
data_format = "NCL"
......@@ -511,21 +562,25 @@ class OpSet9():
data_format = "NCDHW"
self.paddle_graph.add_layer(
"custom_layer:PadWithTwoInput",
inputs={'x': val_x.name, 'pad': val_pad.name},
inputs={'x': val_x.name,
'pad': val_pad.name},
outputs=layer_outputs,
value=value,
mode=string(mode),
data_format=string(data_format))
else:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW
assume_pad |= data_shape and 2 * len(
data_shape) == pads_len # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW
assume_pad |= output_shape and 2 * len(
output_shape) == pads_len # NCHW
if assume_pad:
if pads_len == 4:
self.paddle_graph.add_layer(
"custom_layer:PadAllDim2",
inputs={'x': val_x.name, 'pad': val_pad.name},
inputs={'x': val_x.name,
'pad': val_pad.name},
outputs=layer_outputs,
value=value,
mode=string(mode))
......@@ -533,13 +588,16 @@ class OpSet9():
raise Exception("The padding value is wrong!")
elif pads_len == 8:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW
assume_pad |= data_shape and 2 * len(
data_shape) == pads_len # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW
assume_pad |= output_shape and 2 * len(
output_shape) == pads_len # NCHW
if assume_pad:
self.paddle_graph.add_layer(
"custom_layer:PadAllDim4",
inputs={'x': val_x.name, 'pad': val_pad.name},
inputs={'x': val_x.name,
'pad': val_pad.name},
outputs=layer_outputs,
value=value,
mode=string(mode))
......@@ -638,8 +696,8 @@ class OpSet9():
val_scale = self.graph.get_input_node(node, idx=1, copy=True)
val_b = self.graph.get_input_node(node, idx=2, copy=True)
epsilon = node.get_attr('epsilon', 1e-5)
self.weights[op_name+'.scale'] = self.weights[val_scale.name]
self.weights[op_name+'.bias'] = self.weights[val_b.name]
self.weights[op_name + '.scale'] = self.weights[val_scale.name]
self.weights[op_name + '.bias'] = self.weights[val_b.name]
layer_attrs = {
'num_features': node.out_shapes[0][1],
'epsilon': epsilon,
......@@ -652,7 +710,9 @@ class OpSet9():
elif dim == 5:
paddle_op = "paddle.nn.InstanceNorm3D"
else:
raise Exception("The paddle only support 2D, 3D, 4D or 5D input in InstanceNormalization.")
raise Exception(
"The paddle only support 2D, 3D, 4D or 5D input in InstanceNormalization."
)
self.paddle_graph.add_layer(
paddle_op,
inputs={"x": val_x.name},
......@@ -671,16 +731,10 @@ class OpSet9():
'fill_value': 1
}
self.paddle_graph.add_layer(
'paddle.full',
inputs={},
outputs=[name_ones],
**attr_ones)
inputs_dict = {'x': name_ones,
'y': val_x.name}
'paddle.full', inputs={}, outputs=[name_ones], **attr_ones)
inputs_dict = {'x': name_ones, 'y': val_x.name}
self.paddle_graph.add_layer(
'paddle.multiply',
inputs=inputs_dict,
outputs=[node.name])
'paddle.multiply', inputs=inputs_dict, outputs=[node.name])
@print_mapping_info
def Gather(self, node):
......@@ -843,9 +897,11 @@ class OpSet9():
if len(indices.out_shapes[0]) == 1:
self.paddle_graph.add_layer(
'paddle.scatter',
inputs={'x': val_x.name,
inputs={
'x': val_x.name,
'index': indices.name,
'updates': updates.name},
'updates': updates.name
},
outputs=[node.name])
else:
input_inner_indices = node.name + '_input_inner_indices'
......@@ -920,9 +976,11 @@ class OpSet9():
val_limit = self.graph.get_input_node(node, idx=1, copy=True)
val_delta = self.graph.get_input_node(node, idx=2, copy=True)
dtype = val_start.dtype
inputs = {'start': val_start.name,
inputs = {
'start': val_start.name,
'end': val_limit.name,
'step': val_delta.name}
'step': val_delta.name
}
self.paddle_graph.add_layer(
'paddle.arange',
inputs=inputs,
......@@ -962,7 +1020,8 @@ class OpSet9():
starts_value = starts_value.copy()
ends_value = ends_value.copy()
for idx in range(len(ends_value)):
if starts_value[idx] >= val_x.out_shapes[0][axes[idx]] and val_x.out_shapes[0][axes[idx]] > 0:
if starts_value[idx] >= val_x.out_shapes[0][axes[
idx]] and val_x.out_shapes[0][axes[idx]] > 0:
starts_value[idx] = val_x.out_shapes[0][axes[idx]] - 1
ends_value[idx] = val_x.out_shapes[0][axes[idx]]
elif ends_value[idx] > 2**31 - 1:
......@@ -1001,7 +1060,6 @@ class OpSet9():
ends[idx] = 2**31 - 1
layer_attrs = {"axes": axes, "starts": starts, "ends": ends}
if steps is not None:
layer_attrs['strides'] = steps
self.paddle_graph.add_layer(
......@@ -1028,10 +1086,7 @@ class OpSet9():
'this is not supported')
if len(value) == 1:
value = value[0]
layer_attrs = {
'dtype': string(dtype),
'fill_value': value
}
layer_attrs = {'dtype': string(dtype), 'fill_value': value}
self.paddle_graph.add_layer(
"paddle.full",
inputs={'shape': val_shape.name},
......@@ -1162,7 +1217,8 @@ class OpSet9():
@print_mapping_info
def Not(self, node):
val_input = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer('paddle.logical_not',
self.paddle_graph.add_layer(
'paddle.logical_not',
inputs={'x': val_input.name},
outputs=[node.name])
......@@ -1254,8 +1310,7 @@ class OpSet9():
trans_a = bool(node.get_attr('transA', 0)) # optional
trans_b = bool(node.get_attr('transB', 0)) # optional
val_mm = node.name + '_mm'
matmul_inputs = {"x": val_a.name,
"y": val_b.name}
matmul_inputs = {"x": val_a.name, "y": val_b.name}
attr_matmul = {
"transpose_x": trans_a,
"transpose_y": trans_b,
......@@ -1266,19 +1321,13 @@ class OpSet9():
outputs=[val_mm],
**attr_matmul)
self.paddle_graph.add_layer(
"paddle.scale",
inputs={"x": val_mm},
outputs=[val_mm],
scale=alpha)
"paddle.scale", inputs={"x": val_mm}, outputs=[val_mm], scale=alpha)
if beta != 0:
if beta == 1.:
add_inputs = {"x": val_mm,
"y": val_c.name}
add_inputs = {"x": val_mm, "y": val_c.name}
self.paddle_graph.add_layer(
"paddle.add",
inputs=add_inputs,
outputs=[node.name])
"paddle.add", inputs=add_inputs, outputs=[node.name])
else:
var_beta = node.name + '_beta'
self.paddle_graph.add_layer(
......@@ -1288,9 +1337,7 @@ class OpSet9():
scale=beta)
add_inputs = {"x": val_mm, "y": var_beta}
self.paddle_graph.add_layer(
"paddle.add",
inputs=add_inputs,
outputs=[node.name])
"paddle.add", inputs=add_inputs, outputs=[node.name])
@print_mapping_info
def Sum(self, node):
......@@ -1301,9 +1348,8 @@ class OpSet9():
"y": self.graph.get_input_node(
node, idx=1, copy=True).name,
}
self.paddle_graph.add_layer("paddle.add",
inputs=inputs_dict,
outputs=[node.name])
self.paddle_graph.add_layer(
"paddle.add", inputs=inputs_dict, outputs=[node.name])
for idx, ipt in enumerate(val_inps[2:]):
y = self.graph.get_input_node(node, idx=idx, copy=True)
......@@ -1312,9 +1358,7 @@ class OpSet9():
"y": y.name,
}
self.paddle_graph.add_layer(
"paddle.add",
inputs=inputs_dict,
outputs=[node.name])
"paddle.add", inputs=inputs_dict, outputs=[node.name])
@print_mapping_info
def MatMul(self, node):
......@@ -1322,8 +1366,7 @@ class OpSet9():
val_y = self.graph.get_input_node(node, idx=1, copy=True)
x_shape = val_x.out_shapes[0]
y_shape = val_y.out_shapes[0]
inputs_dict = {"x": val_x.name,
"y": val_y.name}
inputs_dict = {"x": val_x.name, "y": val_y.name}
if y_shape[0] == 1 and x_shape[-1] != 1 and x_shape[0] != 1:
y_squeeze = val_y.name + '_squeeze'
self.paddle_graph.add_layer(
......@@ -1333,14 +1376,10 @@ class OpSet9():
axis=[0])
inputs_dict['y'] = y_squeeze
self.paddle_graph.add_layer(
"paddle.matmul",
inputs=inputs_dict,
outputs=[node.name])
"paddle.matmul", inputs=inputs_dict, outputs=[node.name])
else:
self.paddle_graph.add_layer(
"paddle.matmul",
inputs=inputs_dict,
outputs=[node.name])
"paddle.matmul", inputs=inputs_dict, outputs=[node.name])
@print_mapping_info
def BatchNormalization(self, node):
......@@ -1357,10 +1396,13 @@ class OpSet9():
epsilon = node.get_attr('epsilon', 1e-5)
c = val_x.out_shapes[0][1]
_rename_or_remove_weight(self.weights, val_scale.name, op_name+'.weight')
_rename_or_remove_weight(self.weights, val_b.name, op_name+'.bias')
_rename_or_remove_weight(self.weights, val_var.name, op_name+'._variance')
_rename_or_remove_weight(self.weights, val_mean.name, op_name+'._mean')
_rename_or_remove_weight(self.weights, val_scale.name,
op_name + '.weight')
_rename_or_remove_weight(self.weights, val_b.name, op_name + '.bias')
_rename_or_remove_weight(self.weights, val_var.name,
op_name + '._variance')
_rename_or_remove_weight(self.weights, val_mean.name,
op_name + '._mean')
# Attribute: spatial is used in BatchNormalization-1,6,7
spatial = bool(node.get_attr('spatial'))
......@@ -1427,8 +1469,10 @@ class OpSet9():
outputs=[output_name + "__mul"])
self.paddle_graph.add_layer(
"paddle.add",
inputs={"x": output_name + "__max",
"y": output_name + "__mul"},
inputs={
"x": output_name + "__max",
"y": output_name + "__mul"
},
outputs=[output_name])
else:
if mode == 'channel':
......@@ -1447,12 +1491,14 @@ class OpSet9():
return
_rename_or_remove_weight(self.weights, val_slope.name)
if len(shape_slope) > 1:
self.weights[op_name+'._weight'] = np.reshape(slope_data, shape_slope[0])
self.weights[op_name + '._weight'] = np.reshape(
slope_data, shape_slope[0])
num_parameters = val_x.out_shapes[0][1]
else:
num_parameters = 1
_rename_or_remove_weight(self.weights, val_slope.name)
self.weights[op_name+'._weight'] = np.reshape(self.weights[val_slope.name], [1])
self.weights[op_name + '._weight'] = np.reshape(
self.weights[val_slope.name], [1])
self.paddle_graph.add_layer(
"paddle.nn.PReLU",
inputs={"x": val_x.name},
......@@ -1494,8 +1540,7 @@ class OpSet9():
"paddle.greater_than",
inputs={'x': val_x.name,
'y': val_y.name},
outputs=[node.name],
param_attr=None)
outputs=[node.name])
@print_mapping_info
def Where(self, node):
......@@ -1565,17 +1610,13 @@ class OpSet9():
num_or_sections=1,
axis=val_x_dim)
self.paddle_graph.add_layer(
"paddle.concat",
inputs={"x": val_x.name},
outputs=[node.name])
"paddle.concat", inputs={"x": val_x.name}, outputs=[node.name])
@print_mapping_info
def Identity(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
"paddle.assign",
inputs={"x": val_x.name},
outputs=[node.name])
"paddle.assign", inputs={"x": val_x.name}, outputs=[node.name])
@print_mapping_info
def Tile(self, node):
......@@ -1589,13 +1630,16 @@ class OpSet9():
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": repeats},
outputs=["{}.tmp".format(repeats)],
outputs=["{}_tmp".format(repeats)],
dtype=string("int32"))
repeats = "{}.tmp".format(repeats)
repeats = "{}_tmp".format(repeats)
elif isinstance(repeats, int):
repeats = [repeats]
elif type(repeats) is np.ndarray:
repeats = repeats.tolist()
attr = {
'expand_times': repeats,
"name": string(node.name),
......@@ -1738,15 +1782,18 @@ class OpSet9():
remove_weight = True if val_w.name in self.done_weight_list else False
if remove_weight:
self.done_weight_list.append(val_w.name)
_rename_or_remove_weight(self.weights, val_w.name, op_name+'.weight', remove_weight)
_rename_or_remove_weight(self.weights, val_w.name, op_name + '.weight',
remove_weight)
if has_bias:
remove_bias = True if val_b.name in self.done_weight_list else False
if remove_bias:
self.done_weight_list.append(val_b_name)
_rename_or_remove_weight(self.weights, val_b.name, op_name+'.bias', remove_bias)
_rename_or_remove_weight(self.weights, val_b.name,
op_name + '.bias', remove_bias)
else:
layer_attrs["bias_attr"] = False
if reduce(lambda x,y:x*y, input_shape) in [1, -1] and 1 not in input_shape:
if reduce(lambda x, y: x * y,
input_shape) in [1, -1] and 1 not in input_shape:
input_shape[1] = num_in_channels * num_groups
input_shape[0] = 0
input_shape[2] = 0
......@@ -1808,11 +1855,16 @@ class OpSet9():
"dilation": dilations,
"padding": paddings,
"groups": num_groups,
"output_padding":out_padding}
"output_padding": out_padding
}
_rename_or_remove_weight(self.weights, val_w.name, op_name+'.weight',)
_rename_or_remove_weight(
self.weights,
val_w.name,
op_name + '.weight', )
if val_b is not None:
_rename_or_remove_weight(self.weights, val_b.name, op_name+'.bias')
_rename_or_remove_weight(self.weights, val_b.name,
op_name + '.bias')
self.paddle_graph.add_layer(
kernel=paddle_op,
inputs=inputs_dict,
......@@ -1824,31 +1876,25 @@ class OpSet9():
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axis = node.get_attr('axis')
keepdims = False if node.get_attr('keepdims') == 0 else True
layer_attrs = {'axis': axis,
'keepdim': keepdims}
layer_attrs = {'axis': axis, 'keepdim': keepdims}
self.paddle_graph.add_layer(
'paddle.argmax',
inputs={"x": val_x.name},
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def Size(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
"paddle.shape",
inputs={"input": val_x.name},
outputs=[node.name])
"paddle.shape", inputs={"input": val_x.name}, outputs=[node.name])
self.paddle_graph.add_layer(
'paddle.cast',
inputs={"x": node.name},
outputs=[node.name],
dtype=string('int64'))
self.paddle_graph.add_layer(
"paddle.prod",
inputs={"x": node.name},
outputs=[node.name])
"paddle.prod", inputs={"x": node.name}, outputs=[node.name])
@print_mapping_info
def Sign(self, node):
......@@ -1860,9 +1906,7 @@ class OpSet9():
outputs=[val_x.name],
dtype=string("float32"))
self.paddle_graph.add_layer(
"paddle.sign",
inputs={"x": val_x.name},
outputs=[node.name])
"paddle.sign", inputs={"x": val_x.name}, outputs=[node.name])
if node.dtype not in ["float16", "float32", "float64"]:
self.paddle_graph.add_layer(
"paddle.cast",
......@@ -1881,9 +1925,11 @@ class OpSet9():
axis = node.get_attr('axis', -1)
self.paddle_graph.add_layer(
"custom_layer:OneHot",
inputs={"indices": indices.name,
inputs={
"indices": indices.name,
"depth": depth.name,
"values": values.name},
"values": values.name
},
outputs=layer_outputs,
axis=axis)
......@@ -1891,9 +1937,7 @@ class OpSet9():
def Reciprocal(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
"paddle.reciprocal",
inputs={"x": val_x.name},
outputs=[node.name])
"paddle.reciprocal", inputs={"x": val_x.name}, outputs=[node.name])
@print_mapping_info
def LSTM(self, node):
......@@ -1905,47 +1949,49 @@ class OpSet9():
exist_input_nums = 3
have_bias = False
if input_nums > 3 and node.layer.input[3] != '':
bias = self.graph.get_input_node(node, idx=exist_input_nums, copy=True)
bias = self.graph.get_input_node(
node, idx=exist_input_nums, copy=True)
have_bias = True
exist_input_nums += 1
if input_nums > 4 and node.layer.input[4] != '':
sequence_lens = self.graph.get_input_node(node, idx=exist_input_nums, copy=True)
sequence_lens = self.graph.get_input_node(
node, idx=exist_input_nums, copy=True)
exist_input_nums += 1
if input_nums > 5 and node.layer.input[5] != '':
init_h = self.graph.get_input_node(node, idx=exist_input_nums, copy=True)
init_h = self.graph.get_input_node(
node, idx=exist_input_nums, copy=True)
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": init_h.name},
outputs=[init_h.name],
shape=init_h.out_shapes[0]
)
shape=init_h.out_shapes[0])
exist_input_nums += 1
if input_nums > 6 and node.layer.input[6] != '':
init_c = self.graph.get_input_node(node, idx=exist_input_nums, copy=True)
init_c = self.graph.get_input_node(
node, idx=exist_input_nums, copy=True)
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": init_c.name},
outputs=[init_c.name],
shape=init_c.out_shapes[0]
)
shape=init_c.out_shapes[0])
input_weight_np = _const_weight_or_none(input_weight)
_rename_or_remove_weight(self.weights, input_weight.name)
hidden_size = node.get_attr('hidden_size', input_weight_np.shape[1]/4)
hidden_size = node.get_attr('hidden_size', input_weight_np.shape[1] / 4)
input_size = input_weight_np.shape[2]
hidden_weight_np = _const_weight_or_none(hidden_weight)
_rename_or_remove_weight(self.weights, hidden_weight.name)
bias_np = _const_weight_or_none(bias)
_rename_or_remove_weight(self.weights, bias.name)
input_bias_np = bias_np[:, :4*hidden_size]
hidden_bias_np = bias_np[:, 4*hidden_size:]
input_bias_np = bias_np[:, :4 * hidden_size]
hidden_bias_np = bias_np[:, 4 * hidden_size:]
# parameters order in paddle:lstm:
# 1. gate order in paddle is: input, forget, cell, output.
# 2. gate orfer in onnx is: input, output, forget, cell.
def reform_weights(w, n, intervals):
slices = [w[:,x * n: y * n] for x, y in intervals]
slices = [w[:, x * n:y * n] for x, y in intervals]
return np.concatenate(slices, axis=1)
def transform_weight_with_bias(weights, n, intervals):
......@@ -1973,12 +2019,13 @@ class OpSet9():
def assign_params(op_name, weights, weight_idx=0, suffix=''):
param_names = generate_paddle_param_names(op_name, suffix)
print(param_names)
for param_name, weight in zip(param_names, weights):
self.weights[param_name] = weight[weight_idx]
if direction == 'backward':
raise Exception("LSTM support 'forward' or 'bidirectional', except '{}'.".format(direction))
raise Exception(
"LSTM support 'forward' or 'bidirectional', except '{}'.".
format(direction))
else:
assign_params(op_name, weights)
if direction == 'bidirectional':
......@@ -1986,7 +2033,10 @@ class OpSet9():
self.paddle_graph.add_layer(
'paddle.nn.LSTM',
inputs={'input': x.name, 'initial_states': (init_h.name, init_c.name)},
inputs={
'input': x.name,
'initial_states': (init_h.name, init_c.name)
},
outputs=[op_name, y_out, yh_out, yc_out],
input_size=input_size,
hidden_size=hidden_size,
......@@ -1998,28 +2048,37 @@ class OpSet9():
'paddle.reshape',
inputs={"x": y_out},
outputs=[y_out],
shape=[0, 0, -1, hidden_size]
)
shape=[0, 0, -1, hidden_size])
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": y_out},
outputs=[y_out],
perm=[0,2,1,3]
)
perm=[0, 2, 1, 3])
@print_mapping_info
def TopK(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_k = self.graph.get_input_node(node, idx=1, copy=True)
if val_k.dtype != "int32":
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": val_k.name},
outputs=[val_k.name],
dtype=string('int32'))
layer_attrs = dict()
layer_attrs["axis"] = node.get_attr('axis', -1)
layer_attrs["largest"] = True if node.get_attr('largest', 1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted', 1) == 1 else False
layer_attrs["largest"] = True if node.get_attr('largest',
1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted',
1) == 1 else False
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name,
"k": val_k.name},
outputs=["{}_p{}".format(node.layer_name, 0), "{}_p{}".format(node.layer_name, 1)],
outputs=[
"{}_p{}".format(node.layer_name, 0),
"{}_p{}".format(node.layer_name, 1)
],
**layer_attrs)
@print_mapping_info
......@@ -2032,12 +2091,7 @@ class OpSet9():
beta = node.get_attr('beta', 0.75)
bias = node.get_attr('bias', 1.0)
size = node.get_attr('size')
layer_attrs = {
'size': size,
'alpha': alpha,
'beta': beta,
'k': bias
}
layer_attrs = {'size': size, 'alpha': alpha, 'beta': beta, 'k': bias}
self.paddle_graph.add_layer(
"custom_layer:LocalResponseNorm",
inputs={"x": val_x.name},
......@@ -2056,37 +2110,65 @@ class OpSet9():
'paddle.reshape',
inputs={"x": val_x.name},
outputs=[node.name],
shape=[b, blocksize, blocksize, c // (blocksize**2), h, w]
)
shape=[b, blocksize, blocksize, c // (blocksize**2), h, w])
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 3, 4, 1, 5, 2]
)
perm=[0, 3, 4, 1, 5, 2])
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": node.name},
outputs=[node.name],
shape=[b, c // (blocksize**2), h * blocksize, w * blocksize]
)
shape=[b, c // (blocksize**2), h * blocksize, w * blocksize])
else:
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": val_x.name},
outputs=[node.name],
shape=[b, c // (blocksize ** 2), blocksize, blocksize, h, w]
)
shape=[b, c // (blocksize**2), blocksize, blocksize, h, w])
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 1, 4, 2, 5, 3]
)
perm=[0, 1, 4, 2, 5, 3])
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": node.name},
outputs=[node.name],
shape=[b, c // (blocksize ** 2), h * blocksize, w * blocksize]
)
shape=[b, c // (blocksize**2), h * blocksize, w * blocksize])
@print_mapping_info
def NonMaxSuppression(self, node):
nn_op_name = name_generator("nms", self.nn_name2id)
output_name = node.name
layer_outputs = [nn_op_name, output_name]
boxes = self.graph.get_input_node(node, idx=0, copy=True)
scores = self.graph.get_input_node(node, idx=1, copy=True)
inputs_len = len(node.layer.input)
layer_attrs = dict()
if inputs_len > 2:
max_output_boxes_per_class = self.graph.get_input_node(
node, idx=2, copy=True)
layer_attrs["nms_top_k"] = _const_weight_or_none(
max_output_boxes_per_class).tolist()[0]
else:
layer_attrs["nms_top_k"] = 0
if inputs_len > 3:
iou_threshold = self.graph.get_input_node(node, idx=3, copy=True)
layer_attrs["nms_threshold"] = _const_weight_or_none(
iou_threshold).tolist()[0]
else:
layer_attrs["nms_threshold"] = 0.0
if inputs_len > 4:
score_threshold = self.graph.get_input_node(node, idx=4, copy=True)
layer_attrs["score_threshold"] = _const_weight_or_none(
score_threshold).tolist()[0]
else:
layer_attrs["score_threshold"] = 0.0
self.paddle_graph.add_layer(
"custom_layer:NMS",
inputs={"bboxes": boxes.name,
"scores": scores.name},
outputs=layer_outputs,
**layer_attrs)
......@@ -89,48 +89,60 @@ class OpSet9():
'Sub': 'paddle.subtract',
'Mul': 'paddle.multiply',
'Pow': 'paddle.pow',
'Less': 'paddle.less_than',
}
directly_map_ops = {
'Ceil': ['paddle.ceil'],
# reduce function
'ReduceMean': ['paddle.mean',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdims=1)],
'ReduceSum': ['paddle.sum',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdims=1)],
'ReduceMin': ['paddle.min',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdim=1)],
'ReduceMax': ['paddle.max',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdim=1)],
'ReduceProd': ['paddle.prod',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdim=1)],
'ReduceMean': [
'paddle.mean', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdims=1)
],
'ReduceSum': [
'paddle.sum', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdims=1)
],
'ReduceMin': [
'paddle.min', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdim=1)
],
'ReduceMax': [
'paddle.max', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdim=1)
],
'ReduceProd': [
'paddle.prod', dict(
axes='axis', keepdims='keepdim'), dict(
axes=None, keepdim=1)
],
# active function
'Relu': ['paddle.nn.functional.relu'],
'LeakyRelu': ['paddle.nn.functional.leaky_relu',
dict(alpha='negative_slope'),
dict(negative_slope=.01)],
'Elu': ['paddle.nn.functional.elu',
dict(alpha='alpha'),
dict(alpha=1.)],
'ThresholdedRelu': ['paddle.nn.functional.thresholded_relu',
dict(alpha='threshold'),
dict(alpha=1.)],
'LeakyRelu': [
'paddle.nn.functional.leaky_relu', dict(alpha='negative_slope'),
dict(negative_slope=.01)
],
'Elu':
['paddle.nn.functional.elu', dict(alpha='alpha'), dict(alpha=1.)],
'ThresholdedRelu': [
'paddle.nn.functional.thresholded_relu', dict(alpha='threshold'),
dict(alpha=1.)
],
'Tanh': ['paddle.nn.functional.tanh'],
'Sigmoid': ['paddle.nn.functional.sigmoid'],
'Softsign': ['paddle.nn.functional.softsign'],
'Softplus': ['paddle.nn.functional.softplus',
dict(threshold='threshold'),
dict(threshold=float(sys.maxsize))],
'Softplus': [
'paddle.nn.functional.softplus', dict(threshold='threshold'),
dict(threshold=float(sys.maxsize))
],
'Exp': ['paddle.exp'],
'Log': ['paddle.log'],
'Softmax': ['paddle.nn.functional.softmax',
dict(axis='axis'),
dict(axis=1)],
'Softmax':
['paddle.nn.functional.softmax', dict(axis='axis'), dict(axis=1)],
'Sqrt': ['paddle.sqrt'],
'Floor': ['paddle.floor'],
'Abs': ['paddle.abs'],
......@@ -176,12 +188,9 @@ class OpSet9():
op_type = self.elementwise_ops[node.layer_type]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
inputs_dict = {'x': val_x.name,
'y': val_y.name}
inputs_dict = {'x': val_x.name, 'y': val_y.name}
self.paddle_graph.add_layer(
op_type,
inputs=inputs_dict,
outputs=[node.name])
op_type, inputs=inputs_dict, outputs=[node.name])
@print_mapping_info
def place_holder(self, node):
......@@ -249,15 +258,17 @@ class OpSet9():
val_scales = self.graph.get_input_node(node, idx=1, copy=True)
# TODO(syf): paddle.nn.functional.interpolate will support the length
# which is the same as the rank of input.
# inputs['scale_factor'] = val_scales.name
attrs['scale_factor'] = self.params[val_scales.name].tolist()[2:]
# inputs['scale_factor'] = val_scales.name
attrs['scale_factor'] = self.params[val_scales.name].tolist()[
2:]
elif len(node.layer.input) == 3:
# opset 11
val_scales = self.graph.get_input_node(node, idx=2, copy=True)
# TODO(syf): paddle.nn.functional.interpolate will support the length
# which is the same as the rank of input.
# inputs['scale_factor'] = val_scales.name
attrs['scale_factor'] = self.params[val_scales.name].tolist()[2:]
# inputs['scale_factor'] = val_scales.name
attrs['scale_factor'] = self.params[val_scales.name].tolist()[
2:]
elif len(node.layer.input) == 4:
# opset 11
val_sizes = self.graph.get_input_node(node, idx=3, copy=True)
......@@ -274,8 +285,17 @@ class OpSet9():
outputs=[var_hw],
dtype=string('int32'))
inputs['size'] = var_hw
attrs = {"align_corners": False,
"mode": string(node.get_attr('mode', 'nearest'))}
attrs = {
"align_corners": False,
"mode": string(node.get_attr('mode', 'nearest'))
}
mode = node.get_attr('mode', 'nearest')
if mode == "linear":
attrs["mode"] = string("bilinear")
if node.get_attr('coordinate_transformation_mode',
'half_pixel') == 'pytorch_half_pixel':
attrs["align_corners"] = False
attrs["align_mode"] = 0
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.interpolate",
inputs=inputs,
......@@ -294,12 +314,19 @@ class OpSet9():
inputs['scale_factor'] = val_scales.name
mode = node.get_attr('mode', 'nearest')
attrs.update({"align_corners": False,
attrs.update({
"align_corners": False,
"mode": string(mode),
"align_mode": 1})
"align_mode": 1
})
val_x_shape = val_x.out_shapes[0]
if mode == "linear" and len(val_x_shape) == 4:
attrs["mode"] = string("bilinear")
if node.get_attr('coordinate_transformation_mode',
'half_pixel') == 'pytorch_half_pixel':
attrs["align_corners"] = False
attrs["align_mode"] = 0
else:
attrs["align_corners"] = True
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.interpolate",
......@@ -408,12 +435,14 @@ class OpSet9():
paddings = []
paddle_op = 'paddle.nn.functional.pad'
if len(pads) == 10 and sum(pads) == 0:
pads = pads[0: 6]
pads = pads[0:6]
if len(pads) in [2, 4, 6]:
if data_shape:
assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW
assume_pad |= data_shape and 2 * (len(data_shape) - 2
) == len(pads) # NCHW
if output_shape:
assume_pad |= output_shape and 2 * (len(output_shape) - 2) == len(pads) # NCHW
assume_pad |= output_shape and 2 * (len(output_shape) - 2
) == len(pads) # NCHW
if assume_pad:
if len(pads) == 2:
data_format = "NCL"
......@@ -429,20 +458,26 @@ class OpSet9():
layer_attrs['data_format'] = string(data_format)
else:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW
assume_pad |= data_shape and 2 * len(data_shape) == len(
pads) # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW
assume_pad |= output_shape and 2 * len(
output_shape) == len(pads) # NCHW
if assume_pad:
paddings = np.array(pads).reshape(
(2, -1)).transpose().astype("int32").flatten().tolist()
(2,
-1)).transpose().astype("int32").flatten().tolist()
layer_attrs['pad'] = paddings
else:
raise Exception("The padding value {} is wrong!".format(pads))
raise Exception("The padding value {} is wrong!".format(
pads))
elif len(pads) == 8:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW
assume_pad |= data_shape and 2 * len(data_shape) == len(
pads) # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW
assume_pad |= output_shape and 2 * len(output_shape) == len(
pads) # NCHW
if assume_pad:
paddings = np.array(pads).reshape(
(2, -1)).transpose().astype("int32")
......@@ -466,9 +501,11 @@ class OpSet9():
pads_len = val_pad.out_shapes[0][0]
if pads_len in [2, 4, 6]:
if data_shape:
assume_pad |= data_shape and 2 * (len(data_shape) - 2) == pads_len # NCHW
assume_pad |= data_shape and 2 * (len(data_shape) - 2
) == pads_len # NCHW
if output_shape:
assume_pad |= output_shape and 2 * (len(output_shape) - 2) == pads_len # NCHW
assume_pad |= output_shape and 2 * (len(output_shape) - 2
) == pads_len # NCHW
if assume_pad:
if pads_len == 2:
data_format = "NCL"
......@@ -478,21 +515,25 @@ class OpSet9():
data_format = "NCDHW"
self.paddle_graph.add_layer(
"custom_layer:pad_with_two_input",
inputs={'x': val_x.name, 'pad': val_pad.name},
inputs={'x': val_x.name,
'pad': val_pad.name},
outputs=layer_outputs,
value=value,
mode=string(mode),
data_format=string(data_format))
else:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW
assume_pad |= data_shape and 2 * len(
data_shape) == pads_len # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW
assume_pad |= output_shape and 2 * len(
output_shape) == pads_len # NCHW
if assume_pad:
if pads_len == 4:
self.paddle_graph.add_layer(
"custom_layer:pad_all_dim2",
inputs={'x': val_x.name, 'pad': val_pad.name},
inputs={'x': val_x.name,
'pad': val_pad.name},
outputs=layer_outputs,
value=value,
mode=string(mode))
......@@ -500,13 +541,16 @@ class OpSet9():
raise Exception("The padding value is wrong!")
elif pads_len == 8:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW
assume_pad |= data_shape and 2 * len(
data_shape) == pads_len # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW
assume_pad |= output_shape and 2 * len(
output_shape) == pads_len # NCHW
if assume_pad:
self.paddle_graph.add_layer(
"custom_layer:pad_all_dim4",
inputs={'x': val_x.name, 'pad': val_pad.name},
inputs={'x': val_x.name,
'pad': val_pad.name},
outputs=layer_outputs,
value=value,
mode=string(mode))
......@@ -603,11 +647,9 @@ class OpSet9():
val_scale = self.graph.get_input_node(node, idx=1, copy=True)
val_b = self.graph.get_input_node(node, idx=2, copy=True)
epsilon = node.get_attr('epsilon', 1e-5)
layer_attrs = {
'eps': epsilon,
}
layer_attrs = {'eps': epsilon, }
dim = len(val_x.out_shapes[0])
if dim ==2 :
if dim == 2:
layer_attrs["data_format"] = string("NC")
elif dim == 3:
layer_attrs["data_format"] = string("NCL")
......@@ -616,12 +658,16 @@ class OpSet9():
elif dim == 5:
layer_attrs["data_format"] = string("NCDHW")
else:
raise Exception("The paddle only support 2D, 3D, 4D or 5D input in InstanceNormalization.")
raise Exception(
"The paddle only support 2D, 3D, 4D or 5D input in InstanceNormalization."
)
self.paddle_graph.add_layer(
"paddle.nn.functional.instance_norm",
inputs={"x": val_x.name,
inputs={
"x": val_x.name,
"weight": val_scale.name,
"bias": val_b.name},
"bias": val_b.name
},
outputs=[node.name],
**layer_attrs)
......@@ -637,16 +683,10 @@ class OpSet9():
'fill_value': 1
}
self.paddle_graph.add_layer(
'paddle.full',
inputs={},
outputs=[name_ones],
**attr_ones)
inputs_dict = {'x': name_ones,
'y': val_x.name}
'paddle.full', inputs={}, outputs=[name_ones], **attr_ones)
inputs_dict = {'x': name_ones, 'y': val_x.name}
self.paddle_graph.add_layer(
'paddle.multiply',
inputs=inputs_dict,
outputs=[node.name])
'paddle.multiply', inputs=inputs_dict, outputs=[node.name])
@print_mapping_info
def Gather(self, node):
......@@ -804,9 +844,11 @@ class OpSet9():
if len(indices.out_shapes[0]) == 1:
self.paddle_graph.add_layer(
'paddle.scatter',
inputs={'x': val_x.name,
inputs={
'x': val_x.name,
'index': indices.name,
'updates': updates.name},
'updates': updates.name
},
outputs=[node.name])
else:
input_inner_indices = node.name + '_input_inner_indices'
......@@ -881,9 +923,11 @@ class OpSet9():
val_limit = self.graph.get_input_node(node, idx=1, copy=True)
val_delta = self.graph.get_input_node(node, idx=2, copy=True)
dtype = val_start.dtype
inputs = {'start': val_start.name,
inputs = {
'start': val_start.name,
'end': val_limit.name,
'step': val_delta.name}
'step': val_delta.name
}
self.paddle_graph.add_layer(
'paddle.arange',
inputs=inputs,
......@@ -927,7 +971,8 @@ class OpSet9():
# ends_value[idx] = 2**31 - 1
#print(val_x.out_shapes)
for idx in range(len(ends_value)):
if starts_value[idx] >= val_x.out_shapes[0][axes[idx]] and val_x.out_shapes[0][axes[idx]] > 0:
if starts_value[idx] >= val_x.out_shapes[0][axes[
idx]] and val_x.out_shapes[0][axes[idx]] > 0:
starts_value[idx] = val_x.out_shapes[0][axes[idx]] - 1
ends_value[idx] = val_x.out_shapes[0][axes[idx]]
elif ends_value[idx] > 2**31 - 1:
......@@ -965,7 +1010,6 @@ class OpSet9():
ends[idx] = 2**31 - 1
layer_attrs = {"axes": axes, "starts": starts, "ends": ends}
if steps is not None:
layer_attrs['strides'] = steps
self.paddle_graph.add_layer(
......@@ -992,10 +1036,7 @@ class OpSet9():
'this is not supported')
if len(value) == 1:
value = value[0]
layer_attrs = {
'dtype': string(dtype),
'fill_value': value
}
layer_attrs = {'dtype': string(dtype), 'fill_value': value}
self.paddle_graph.add_layer(
"paddle.full",
inputs={'shape': val_shape.name},
......@@ -1125,7 +1166,8 @@ class OpSet9():
@print_mapping_info
def Not(self, node):
val_input = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer('paddle.logical_not',
self.paddle_graph.add_layer(
'paddle.logical_not',
inputs={'x': val_input.name},
outputs=[node.name])
......@@ -1215,8 +1257,7 @@ class OpSet9():
trans_a = bool(node.get_attr('transA', 0)) # optional
trans_b = bool(node.get_attr('transB', 0)) # optional
val_mm = node.name + '_mm'
matmul_inputs = {"x": val_a.name,
"y": val_b.name}
matmul_inputs = {"x": val_a.name, "y": val_b.name}
attr_matmul = {
"transpose_x": trans_a,
"transpose_y": trans_b,
......@@ -1227,19 +1268,13 @@ class OpSet9():
outputs=[val_mm],
**attr_matmul)
self.paddle_graph.add_layer(
"paddle.scale",
inputs={"x": val_mm},
outputs=[val_mm],
scale=alpha)
"paddle.scale", inputs={"x": val_mm}, outputs=[val_mm], scale=alpha)
if beta != 0:
if beta == 1.:
add_inputs = {"x": val_mm,
"y": val_c.name}
add_inputs = {"x": val_mm, "y": val_c.name}
self.paddle_graph.add_layer(
"paddle.add",
inputs=add_inputs,
outputs=[node.name])
"paddle.add", inputs=add_inputs, outputs=[node.name])
else:
var_beta = node.name + '_beta'
self.paddle_graph.add_layer(
......@@ -1249,9 +1284,7 @@ class OpSet9():
scale=beta)
add_inputs = {"x": val_mm, "y": var_beta}
self.paddle_graph.add_layer(
"paddle.add",
inputs=add_inputs,
outputs=[node.name])
"paddle.add", inputs=add_inputs, outputs=[node.name])
@print_mapping_info
def Sum(self, node):
......@@ -1262,9 +1295,8 @@ class OpSet9():
"y": self.graph.get_input_node(
node, idx=1, copy=True).name,
}
self.paddle_graph.add_layer("paddle.add",
inputs=inputs_dict,
outputs=[node.name])
self.paddle_graph.add_layer(
"paddle.add", inputs=inputs_dict, outputs=[node.name])
for idx, ipt in enumerate(val_inps[2:]):
y = self.graph.get_input_node(node, idx=idx, copy=True)
......@@ -1273,9 +1305,7 @@ class OpSet9():
"y": y.name,
}
self.paddle_graph.add_layer(
"paddle.add",
inputs=inputs_dict,
outputs=[node.name])
"paddle.add", inputs=inputs_dict, outputs=[node.name])
@print_mapping_info
def MatMul(self, node):
......@@ -1283,8 +1313,7 @@ class OpSet9():
val_y = self.graph.get_input_node(node, idx=1, copy=True)
x_shape = val_x.out_shapes[0]
y_shape = val_y.out_shapes[0]
inputs_dict = {"x": val_x.name,
"y": val_y.name}
inputs_dict = {"x": val_x.name, "y": val_y.name}
if y_shape[0] == 1 and x_shape[-1] != 1 and x_shape[0] != 1:
y_squeeze = val_y.name + '_squeeze'
self.paddle_graph.add_layer(
......@@ -1294,14 +1323,10 @@ class OpSet9():
axis=[0])
inputs_dict['y'] = y_squeeze
self.paddle_graph.add_layer(
"paddle.matmul",
inputs=inputs_dict,
outputs=[node.name])
"paddle.matmul", inputs=inputs_dict, outputs=[node.name])
else:
self.paddle_graph.add_layer(
"paddle.matmul",
inputs=inputs_dict,
outputs=[node.name])
"paddle.matmul", inputs=inputs_dict, outputs=[node.name])
@print_mapping_info
def BatchNormalization(self, node):
......@@ -1322,11 +1347,13 @@ class OpSet9():
}
self.paddle_graph.add_layer(
"paddle.nn.functional.batch_norm",
inputs={"x": val_x.name,
inputs={
"x": val_x.name,
"weight": val_scale.name,
"bias": val_b.name,
"running_mean": val_mean.name,
"running_var": val_var.name},
"running_var": val_var.name
},
outputs=[node.name],
**layer_attrs)
......@@ -1409,8 +1436,7 @@ class OpSet9():
"paddle.greater_than",
inputs={'x': val_x.name,
'y': val_y.name},
outputs=[node.name],
param_attr=None)
outputs=[node.name])
@print_mapping_info
def Where(self, node):
......@@ -1480,17 +1506,13 @@ class OpSet9():
num_or_sections=1,
axis=val_x_dim)
self.paddle_graph.add_layer(
"paddle.concat",
inputs={"x": val_x.name},
outputs=[node.name])
"paddle.concat", inputs={"x": val_x.name}, outputs=[node.name])
@print_mapping_info
def Identity(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
"paddle.assign",
inputs={"x": val_x.name},
outputs=[node.name])
"paddle.assign", inputs={"x": val_x.name}, outputs=[node.name])
@print_mapping_info
def Tile(self, node):
......@@ -1504,13 +1526,16 @@ class OpSet9():
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": repeats},
outputs=["{}.tmp".format(repeats)],
outputs=["{}_tmp".format(repeats)],
dtype=string("int32"))
repeats = "{}.tmp".format(repeats)
repeats = "{}_tmp".format(repeats)
elif isinstance(repeats, int):
repeats = [repeats]
elif type(repeats) is np.ndarray:
repeats = repeats.tolist()
attr = {
'expand_times': repeats,
"name": string(node.name),
......@@ -1640,7 +1665,8 @@ class OpSet9():
}
if has_bias:
layer_inputs["bias"] = val_b.name
if reduce(lambda x,y:x*y, input_shape) in [1, -1] and 1 not in input_shape:
if reduce(lambda x, y: x * y,
input_shape) in [1, -1] and 1 not in input_shape:
input_shape[1] = num_in_channels * num_groups
input_shape[0] = 0
input_shape[2] = 0
......@@ -1650,10 +1676,7 @@ class OpSet9():
outputs=[layer_inputs["x"]],
shape=input_shape)
self.paddle_graph.add_layer(
paddle_op,
inputs=layer_inputs,
outputs=[node.name],
**layer_attrs)
paddle_op, inputs=layer_inputs, outputs=[node.name], **layer_attrs)
@print_mapping_info
def ConvTranspose(self, node):
......@@ -1688,14 +1711,14 @@ class OpSet9():
output_size[1] = (val_x.out_shapes[0][3] - 1
) * strides[1] - 2 * paddings[1] + dilations[1] * (
kernel_shape[1] - 1) + 1 + out_padding[1]
layer_inputs = {'x': val_x.name,
"weight": val_w.name}
layer_inputs = {'x': val_x.name, "weight": val_w.name}
layer_attrs = {
"stride": strides,
"dilation": dilations,
"padding": paddings,
"groups": num_groups,
"output_size": node.out_shapes[0][2:]}
"output_size": node.out_shapes[0][2:]
}
if val_b is not None:
layer_inputs["bias"] = val_b.name
self.paddle_graph.add_layer(
......@@ -1709,8 +1732,7 @@ class OpSet9():
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axis = node.get_attr('axis')
keepdims = False if node.get_attr('keepdims') == 0 else True
layer_attrs = {'axis': axis,
'keepdim': keepdims}
layer_attrs = {'axis': axis, 'keepdim': keepdims}
self.paddle_graph.add_layer(
'paddle.argmax',
inputs={"x": val_x.name},
......@@ -1721,18 +1743,14 @@ class OpSet9():
def Size(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
"paddle.shape",
inputs={"input": val_x.name},
outputs=[node.name])
"paddle.shape", inputs={"input": val_x.name}, outputs=[node.name])
self.paddle_graph.add_layer(
'paddle.cast',
inputs={"x": node.name},
outputs=[node.name],
dtype=string('int64'))
self.paddle_graph.add_layer(
"paddle.prod",
inputs={"x": node.name},
outputs=[node.name])
"paddle.prod", inputs={"x": node.name}, outputs=[node.name])
@print_mapping_info
def Sign(self, node):
......@@ -1744,9 +1762,7 @@ class OpSet9():
outputs=[val_x.name],
dtype=string("float32"))
self.paddle_graph.add_layer(
"paddle.sign",
inputs={"x": val_x.name},
outputs=[node.name])
"paddle.sign", inputs={"x": val_x.name}, outputs=[node.name])
if node.dtype not in ["float16", "float32", "float64"]:
self.paddle_graph.add_layer(
"paddle.cast",
......@@ -1762,9 +1778,11 @@ class OpSet9():
axis = node.get_attr('axis', -1)
self.paddle_graph.add_layer(
"custom_layer:one_hot",
inputs={"indices": indices.name,
inputs={
"indices": indices.name,
"depth": depth.name,
"values": values.name},
"values": values.name
},
outputs=[node.name],
axis=axis)
......@@ -1772,9 +1790,7 @@ class OpSet9():
def Reciprocal(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
"paddle.reciprocal",
inputs={"x": val_x.name},
outputs=[node.name])
"paddle.reciprocal", inputs={"x": val_x.name}, outputs=[node.name])
@print_mapping_info
def TopK(self, node):
......@@ -1782,13 +1798,18 @@ class OpSet9():
val_k = self.graph.get_input_node(node, idx=1, copy=True)
layer_attrs = dict()
layer_attrs["axis"] = node.get_attr('axis', -1)
layer_attrs["largest"] = True if node.get_attr('largest', 1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted', 1) == 1 else False
layer_attrs["largest"] = True if node.get_attr('largest',
1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted',
1) == 1 else False
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name,
"k": val_k.name},
outputs=["{}_p{}".format(node.layer_name, 0), "{}_p{}".format(node.layer_name, 1)],
outputs=[
"{}_p{}".format(node.layer_name, 0),
"{}_p{}".format(node.layer_name, 1)
],
**layer_attrs)
@print_mapping_info
......@@ -1799,12 +1820,7 @@ class OpSet9():
beta = node.get_attr('beta', 0.75)
bias = node.get_attr('bias', 1.0)
size = node.get_attr('size')
layer_attrs = {
'size': size,
'alpha': alpha,
'beta': beta,
'k': bias
}
layer_attrs = {'size': size, 'alpha': alpha, 'beta': beta, 'k': bias}
self.paddle_graph.add_layer(
"custom_layer:local_response_norm",
inputs={"x": val_x.name},
......@@ -1823,37 +1839,30 @@ class OpSet9():
'paddle.reshape',
inputs={"x": val_x.name},
outputs=[node.name],
shape=[b, blocksize, blocksize, c // (blocksize**2), h, w]
)
shape=[b, blocksize, blocksize, c // (blocksize**2), h, w])
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 3, 4, 1, 5, 2]
)
perm=[0, 3, 4, 1, 5, 2])
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": node.name},
outputs=[node.name],
shape=[b, c // (blocksize**2), h * blocksize, w * blocksize]
)
shape=[b, c // (blocksize**2), h * blocksize, w * blocksize])
else:
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": val_x.name},
outputs=[node.name],
shape=[b, c // (blocksize ** 2), blocksize, blocksize, h, w]
)
shape=[b, c // (blocksize**2), blocksize, blocksize, h, w])
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 1, 4, 2, 5, 3]
)
perm=[0, 1, 4, 2, 5, 3])
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": node.name},
outputs=[node.name],
shape=[b, c // (blocksize ** 2), h * blocksize, w * blocksize]
)
\ No newline at end of file
shape=[b, c // (blocksize**2), h * blocksize, w * blocksize])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册