提交 5aad32ff 编写于 作者: W wjj19950828

Merge remote-tracking branch 'upstream/develop' into fixed_Misspell

......@@ -117,7 +117,7 @@ Aten:
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft\_rfftn |
| 129 | aten::fft\_irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 133 | aten::rsqrt | 134 | aten::replication\_pad1d | 135 | aten::full | 136 | aten::group\_norm |
| 137 | aten::argmax | 138 | aten::copy | | | | |
| 137 | aten::argmax | 138 | aten::copy | 139 | aten::upsample\_trilinear3d | | |
Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
......
__version__ = "1.3.6"
__version__ = "1.3.7"
from .core.program import PaddleGraph
......
......@@ -192,6 +192,14 @@ def tf2paddle(model_path,
ConverterCheck(
task="TensorFlow", time_info=time_info,
lite_state="Success").start()
# for convert survey
logging.info("================================================")
logging.info("")
logging.info(
"Model Convertd! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 "
)
logging.info("")
logging.info("================================================")
def caffe2paddle(proto_file,
......@@ -240,6 +248,14 @@ def caffe2paddle(proto_file,
if not disable_feedback:
ConverterCheck(
task="Caffe", time_info=time_info, lite_state="Success").start()
# for convert survey
logging.info("================================================")
logging.info("")
logging.info(
"Model Convertd! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 "
)
logging.info("")
logging.info("================================================")
def onnx2paddle(model_path,
......@@ -293,6 +309,14 @@ def onnx2paddle(model_path,
if not disable_feedback:
ConverterCheck(
task="ONNX", time_info=time_info, lite_state="Success").start()
# for convert survey
logging.info("================================================")
logging.info("")
logging.info(
"Model Convertd! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 "
)
logging.info("")
logging.info("================================================")
def pytorch2paddle(module,
......@@ -364,6 +388,14 @@ def pytorch2paddle(module,
ConverterCheck(
task="PyTorch", time_info=time_info,
lite_state="Success").start()
# for convert survey
logging.info("================================================")
logging.info("")
logging.info(
"Model Convertd! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 "
)
logging.info("")
logging.info("================================================")
def main():
......
......@@ -34,7 +34,7 @@ class DetectionOutput(object):
pbv = priorbox_list[1]
pb = paddle.reshape(x=pb, shape=[-1, 4])
pbv = paddle.reshape(x=pbv, shape=[-1, 4])
pb_dim = fluid.layers.shape(pb)[0]
pb_dim = paddle.shape(pb)[0]
loc = paddle.reshape(x0, shape=[-1, pb_dim, 4])
conf_flatten = paddle.reshape(x1, shape=[0, pb_dim, -1])
out = fluid.layers.detection_output(
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import paddle
import paddle.fluid as fluid
class Normalize(object):
......@@ -21,7 +20,7 @@ class Normalize(object):
self.axis = axis
def __call__(self, x, param):
l2_norm = fluid.layers.l2_normalize(x=x, axis=1)
l2_norm = paddle.norm(x=x, p=2, axis=1, keepdim=True)
param = paddle.reshape(param, [param.shape[-1]])
perm = list(range(len(l2_norm.shape)))
perm.pop(self.axis)
......
......@@ -13,7 +13,87 @@
# limitations under the License.
import paddle
import paddle.fluid as fluid
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.common_ops_import import Variable, LayerHelper, check_variable_and_dtype, check_type, check_dtype
@paddle.jit.not_to_static
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
min_max_aspect_ratios_order=False,
name=None):
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
cur_max_sizes = None
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
cur_max_sizes = max_sizes
if in_dynamic_mode():
attrs = ('min_sizes', min_sizes, 'aspect_ratios', aspect_ratios,
'variances', variance, 'flip', flip, 'clip', clip, 'step_w',
steps[0], 'step_h', steps[1], 'offset', offset,
'min_max_aspect_ratios_order', min_max_aspect_ratios_order)
if cur_max_sizes is not None:
attrs += ('max_sizes', cur_max_sizes)
box, var = _C_ops.prior_box(input, image, *attrs)
return box, var
else:
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if cur_max_sizes is not None:
attrs['max_sizes'] = cur_max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
class PriorBox(object):
......@@ -32,8 +112,7 @@ class PriorBox(object):
}
def __call__(self, x0, x1):
box, var = fluid.layers.prior_box(
input=x0, image=x1, **self.priorbox_layer_attrs)
box, var = prior_box(input=x0, image=x1, **self.priorbox_layer_attrs)
box = paddle.reshape(x=box, shape=[1, 1, -1])
var = paddle.reshape(x=var, shape=[1, 1, -1])
out = paddle.concat(x=[box, var], axis=1)
......
......@@ -13,7 +13,51 @@
# limitations under the License.
import paddle
import paddle.fluid as fluid
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.common_ops_import import Variable, LayerHelper, check_variable_and_dtype, check_type, check_dtype
@paddle.jit.not_to_static
def roi_pool(input,
rois,
pooled_height,
pooled_width,
spatial_scale=1.0,
rois_num=None,
name=None):
if in_dynamic_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
pool_out, argmaxes = _C_ops.roi_pool(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale)
return pool_out, argmaxes
else:
check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool')
check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool')
helper = LayerHelper('roi_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
argmaxes = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": input,
"ROIs": rois,
}
if rois_num is not None:
inputs['RoisNum'] = rois_num
helper.append_op(
type="roi_pool",
inputs=inputs,
outputs={"Out": pool_out,
"Argmax": argmaxes},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale
})
return pool_out, argmaxes
class ROIPooling(object):
......@@ -26,6 +70,5 @@ class ROIPooling(object):
def __call__(self, x0, x1):
slice_x1 = paddle.slice(input=x1, axes=[1], starts=[1], ends=[5])
out = fluid.layers.roi_pool(
input=x0, rois=slice_x1, **self.roipooling_layer_attrs)
out = roi_pool(input=x0, rois=slice_x1, **self.roipooling_layer_attrs)
return out
......@@ -13,7 +13,6 @@
# limitations under the License.
import paddle
import paddle.fluid as fluid
class Select(object):
......
......@@ -429,13 +429,13 @@ class CaffeOpMapper():
assert params.local_size % 2 == 1
alpha = params.alpha / float(params.local_size)
layer_attrs = {
"n": params.local_size,
"k": params.k,
"size": params.local_size,
"alpha": alpha,
"beta": params.beta,
"k": params.k,
}
self.paddle_graph.add_layer(
"paddle.fluid.layers.lrn",
"paddle.nn.LocalResponseNorm",
inputs={"input": input.name},
outputs=[node.layer_name],
**layer_attrs)
......@@ -1209,10 +1209,10 @@ class CaffeOpMapper():
input = self.graph.get_input_node(node, idx=0, copy=True)
params = node.layer.shuffle_channel_param
self.paddle_graph.add_layer(
"paddle.fluid.layers.shuffle_channel",
"paddle.nn.functional.channel_shuffle",
inputs={"x": input.name},
outputs=[node.layer_name],
group=params.group)
groups=params.group)
def Upsample(self, node):
assert len(
......
......@@ -18,3 +18,5 @@ from .pad_all_dim2 import PadAllDim2
from .pad_all_dim4 import PadAllDim4
from .pad_all_dim4_one_input import PadAllDim4WithOneInput
from .nms import NMS
from .roi_align import ROIAlign
from .roi_pooling import ROIPooling
......@@ -13,9 +13,9 @@
# limitations under the License.
import paddle
from paddle.fluid import core
from paddle.fluid.framework import Variable, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.common_ops_import import Variable, LayerHelper
def multiclass_nms(bboxes,
......@@ -33,13 +33,13 @@ def multiclass_nms(bboxes,
name=None):
helper = LayerHelper('multiclass_nms3', **locals())
if in_dygraph_mode():
if in_dynamic_mode():
attrs = ('background_label', background_label, 'score_threshold',
score_threshold, 'nms_top_k', nms_top_k, 'nms_threshold',
nms_threshold, 'keep_top_k', keep_top_k, 'nms_eta', nms_eta,
'normalized', normalized)
output, index, nms_rois_num = core.ops.multiclass_nms3(bboxes, scores,
rois_num, *attrs)
output, index, nms_rois_num = _C_ops.multiclass_nms3(bboxes, scores,
rois_num, *attrs)
if not return_index:
index = None
return output, nms_rois_num, index
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.common_ops_import import Variable, LayerHelper, check_variable_and_dtype, check_type, check_dtype
@paddle.jit.not_to_static
def roi_align(input,
rois,
pooled_height,
pooled_width,
spatial_scale=1.0,
sampling_ratio=-1,
rois_num=None,
aligned=False,
name=None):
if in_dynamic_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
align_out = _C_ops.roi_align(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale,
"sampling_ratio", sampling_ratio, "aligned", aligned)
return align_out
else:
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'roi_align')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],
'roi_align')
helper = LayerHelper('roi_align', **locals())
dtype = helper.input_dtype()
align_out = helper.create_variable_for_type_inference(dtype)
inputs = {
"X": input,
"ROIs": rois,
}
if rois_num is not None:
inputs['RoisNum'] = rois_num
helper.append_op(
type="roi_align",
inputs=inputs,
outputs={"Out": align_out},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale,
"sampling_ratio": sampling_ratio,
"aligned": aligned,
})
return align_out
class ROIAlign(object):
def __init__(self, pooled_height, pooled_width, spatial_scale,
sampling_ratio):
self.roialign_layer_attrs = {
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale,
'sampling_ratio': sampling_ratio,
}
def __call__(self, x0, x1, x2):
out = roi_align(
input=x0, rois=x1, rois_num=x2, **self.roialign_layer_attrs)
return out
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.common_ops_import import Variable, LayerHelper, check_variable_and_dtype, check_type, check_dtype
@paddle.jit.not_to_static
def roi_pool(input,
rois,
pooled_height,
pooled_width,
spatial_scale=1.0,
rois_num=None,
name=None):
if in_dynamic_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
pool_out, argmaxes = _C_ops.roi_pool(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale)
return pool_out, argmaxes
else:
check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool')
check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool')
helper = LayerHelper('roi_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
argmaxes = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": input,
"ROIs": rois,
}
if rois_num is not None:
inputs['RoisNum'] = rois_num
helper.append_op(
type="roi_pool",
inputs=inputs,
outputs={"Out": pool_out,
"Argmax": argmaxes},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale
})
return pool_out, argmaxes
class ROIPooling(object):
def __init__(self, pooled_height, pooled_width, spatial_scale):
self.roipooling_layer_attrs = {
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale
}
def __call__(self, x0, x1):
out = roi_pool(input=x0, rois=x1, **self.roipooling_layer_attrs)
return out
......@@ -262,6 +262,8 @@ class OpSet9():
shape = node.out_shapes[0]
if hasattr(node.weight, "shape") and len(node.weight.shape) == 0:
if node.weight == float('inf') or node.weight == float('-inf'):
node.weight = string(node.weight)
self.paddle_graph.add_layer(
"paddle.full",
inputs={},
......@@ -536,12 +538,14 @@ class OpSet9():
'pooled_width': pooled_width,
'spatial_scale': spatial_scale,
'sampling_ratio': sampling_ratio,
'rois_num': val_rois_num,
}
self.paddle_graph.add_layer(
'paddle.fluid.layers.roi_align',
inputs={'input': val_x.name,
'rois': val_rois.name},
'custom_layer:ROIAlign',
inputs={
'input': val_x.name,
'rois': val_rois.name,
'rois_num': val_rois_num
},
outputs=[node.name],
**layer_attrs)
......@@ -558,7 +562,7 @@ class OpSet9():
'spatial_scale': spatial_scale,
}
self.paddle_graph.add_layer(
'paddle.fluid.layers.roi_pool',
'custom_layer:ROIPooling',
inputs={'input': val_x.name,
'rois': val_rois.name},
outputs=[node.name],
......@@ -792,6 +796,8 @@ class OpSet9():
if len(value) == 1:
value = value.tolist()
value = value[0]
if value == float('inf') or value == float('-inf'):
value = string(value)
self.paddle_graph.add_layer(
"paddle.full",
inputs={},
......@@ -1093,6 +1099,12 @@ class OpSet9():
val_x = self.graph.get_input_node(node, idx=0, copy=True)
starts, ends, axes, steps = None, None, None, None
layer_attrs = {}
if val_x.dtype == 'uint8':
self.paddle_graph.add_layer(
'paddle.cast',
inputs={"x": val_x.name},
outputs=[val_x.name],
dtype=string('int32'))
if len(node.inputs) > 1:
starts = self.graph.get_input_node(node, idx=1, copy=True)
ends = self.graph.get_input_node(node, idx=2, copy=True)
......@@ -1121,8 +1133,9 @@ class OpSet9():
starts_value = starts_value.copy()
ends_value = ends_value.copy()
for idx in range(len(ends_value)):
if starts_value[idx] >= val_x.out_shapes[0][axes[
idx]] and val_x.out_shapes[0][axes[idx]] > 0:
if len(val_x.out_shapes[0]) != 0 and starts_value[
idx] >= val_x.out_shapes[0][axes[
idx]] and val_x.out_shapes[0][axes[idx]] > 0:
starts_value[idx] = val_x.out_shapes[0][axes[idx]] - 1
ends_value[idx] = val_x.out_shapes[0][axes[idx]]
elif ends_value[idx] > 2**31 - 1:
......@@ -1178,11 +1191,16 @@ class OpSet9():
inputs={"input": val_x.name},
outputs=[node.name],
**layer_attrs)
if val_x.dtype == 'uint8':
self.paddle_graph.add_layer(
'paddle.cast',
inputs={"x": node.name},
outputs=[node.name],
dtype=string('uint8'))
@print_mapping_info
def ConstantOfShape(self, node):
val_shape = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_node(node.layer.output[0], copy=True)
value = node.get_attr('value')
dtype = value.dtype
......@@ -1191,6 +1209,8 @@ class OpSet9():
'this is not supported')
if len(value) == 1:
value = value[0]
if value == float('inf') or value == float('-inf'):
value = string(value)
layer_attrs = {'dtype': string(dtype), 'fill_value': value}
self.paddle_graph.add_layer(
"paddle.full",
......@@ -1550,20 +1570,37 @@ class OpSet9():
val_x = self.graph.get_input_node(node, idx=0, copy=True)
output_shape = val_x.out_shapes[0]
axis = node.get_attr('axis', 1)
shape_list = [1, 1]
if axis == 0:
for s in output_shape:
shape_list[1] *= s
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": val_x.name},
outputs=[node.name],
shape=[1, -1])
else:
for s in output_shape[:axis]:
shape_list[0] *= s
for s in output_shape[axis:]:
shape_list[1] *= s
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": val_x.name},
outputs=[node.name],
shape=shape_list)
if len(output_shape) != 0:
shape_list = [1, 1]
for s in output_shape[:axis]:
shape_list[0] *= s
for s in output_shape[axis:]:
shape_list[1] *= s
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": val_x.name},
outputs=[node.name],
shape=shape_list)
else:
# flatten + reshape
self.paddle_graph.add_layer(
"paddle.flatten",
inputs={"input": val_x.name},
outputs=[val_x.name + "_flatten"],
start_axis=[0],
stop_axis=[axis])
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={'x': val_x.name + "_flatten"},
outputs=[node.name],
shape=[0, -1])
@print_mapping_info
def Gemm(self, node):
......@@ -1790,7 +1827,11 @@ class OpSet9():
def Squeeze(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axes = node.get_attr('axes')
if len(val_x.out_shapes[0]) == 1:
if axes is None:
axes_node = self.graph.get_input_node(node, idx=1, copy=True)
axes = _const_weight_or_none(axes_node, necessary=True)
# deal with scalar(0D) tensor
if len(val_x.out_shapes[0]) <= 1 and len(axes) == 1 and axes[0] == 0:
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": val_x.name},
......@@ -1829,69 +1870,25 @@ class OpSet9():
val_x = self.graph.get_input_node(node, idx=1, copy=True)
val_y = self.graph.get_input_node(node, idx=2, copy=True)
not_condition = condition.name + '_not'
self.paddle_graph.add_layer(
"paddle.logical_not",
inputs={"x": condition.name},
outputs=[not_condition])
cast_not_condition = not_condition + '_cast'
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": not_condition},
outputs=[cast_not_condition],
dtype=string(val_x.dtype))
cast_condition = condition.name + '_cast'
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": condition.name},
outputs=[cast_condition],
dtype=string(val_x.dtype))
mul_val_x = val_x.name + '_mul'
self.paddle_graph.add_layer(
"paddle.multiply",
inputs={'x': val_x.name,
'y': cast_condition},
outputs=[mul_val_x])
mul_val_y = val_y.name + '_mul'
self.paddle_graph.add_layer(
"paddle.multiply",
inputs={'x': val_y.name,
'y': cast_not_condition},
outputs=[mul_val_y])
self.paddle_graph.add_layer(
"paddle.add",
inputs={'x': mul_val_x,
'y': mul_val_y},
"paddle.where",
inputs={
'condition': condition.name,
'x': val_x.name,
'y': val_y.name
},
outputs=[node.name])
@print_mapping_info
def NonZero(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_x_dim = len(val_x.out_shapes[0])
if val_x_dim == 1:
self.paddle_graph.add_layer(
"paddle.nonzero",
inputs={"x": val_x.name},
outputs=[val_x.name])
self.paddle_graph.add_layer(
"paddle.transpose",
inputs={"x": val_x.name},
outputs=[node.layer_name],
perm=[1, 0])
if val_x_dim > 1:
self.paddle_graph.add_layer(
"paddle.nonzero",
inputs={"x": val_x.name},
outputs=[val_x.name])
self.paddle_graph.add_layer(
"paddle.split",
inputs={"x": val_x.name},
outputs=[val_x.name],
num_or_sections=1,
axis=val_x_dim)
self.paddle_graph.add_layer(
"paddle.concat", inputs={"x": val_x.name}, outputs=[node.name])
self.paddle_graph.add_layer(
"paddle.nonzero",
inputs={"x": val_x.name},
outputs=[val_x.name],
as_tuple=True)
self.paddle_graph.add_layer(
"paddle.concat", inputs={"x": val_x.name}, outputs=[node.name])
@print_mapping_info
def Identity(self, node):
......@@ -2565,27 +2562,42 @@ class OpSet9():
def TopK(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_k = self.graph.get_input_node(node, idx=1, copy=True)
if val_k.dtype != "int32":
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": val_k.name},
outputs=[val_k.name],
dtype=string('int32'))
layer_attrs = dict()
layer_attrs["axis"] = node.get_attr('axis', -1)
layer_attrs["largest"] = True if node.get_attr('largest',
1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted',
1) == 1 else False
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name,
"k": val_k.name},
outputs=[
"{}_p{}".format(node.layer_name, 0),
"{}_p{}".format(node.layer_name, 1)
],
**layer_attrs)
k = _const_weight_or_none(val_k)
if isinstance(k, (list, tuple, np.ndarray)):
k = k[0]
# If k can get the value directly, it is used as an attribute; otherwise it is used as an input tensor
if k is not None:
layer_attrs["k"] = k
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name},
outputs=[
"{}_p{}".format(node.layer_name, 0),
"{}_p{}".format(node.layer_name, 1)
],
**layer_attrs)
else:
if val_k.dtype != "int32":
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": val_k.name},
outputs=[val_k.name],
dtype=string('int32'))
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name,
"k": val_k.name},
outputs=[
"{}_p{}".format(node.layer_name, 0),
"{}_p{}".format(node.layer_name, 1)
],
**layer_attrs)
@print_mapping_info
def LRN(self, node):
......
......@@ -612,7 +612,7 @@ def prim_shape_dim(layer,
forward_func=[],
layer_id=None,
different_attrs=None):
line = "{} = fluid.layers.shape({})[{}]".format(
line = "{} = paddle.shape({})[{}]".format(
layer.outputs[0],
get_value(layer, "input", different_attrs),
get_value(layer, "dim", different_attrs))
......
......@@ -1189,129 +1189,66 @@ def aten___contains__(mapper, graph, node):
def aten_constant_pad_nd(mapper, graph, node):
""" 构造填充固定值的PaddleLayer。
TorchScript示例:
"""
TorchScript Code:
%58 : Tensor = aten::constant_pad_nd(%input1.24, %4876, %42)
参数含义:
%58 (Tensor): 输出,填充后的Tensor。
%input1.24 (Tensor): 需要填充的Tensor。
%4876 (list): 填充大小。
%42 (-): 填充值。
Parameter meaning:
%58 (Tensor): Output Tensor
%input1.24 (Tensor): Input Tensor
%4876 (list): pad
%42 (-): value
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("pad", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
# Output list
current_outputs = [output_name]
# 处理输入0,即%input1.24
# process Input Tensor
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["input"] = inputs_name[0]
# 处理输入1,即%4876
is_padding_tensor = False
# process pad
padding_attr = None
if inputs_name[1] in mapper.attrs:
layer_attrs["padding"] = mapper.attrs[inputs_name[1]]
padding_attr = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs, scope_name)
layer_inputs["pad"] = inputs_name[1]
is_padding_tensor = True
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入2,即%42
# process value
layer_attrs["value"] = mapper.attrs[inputs_name[2]]
if not is_padding_tensor:
graph.add_layer(
"prim.shape",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_shape"],
scope_name=scope_name)
graph.add_layer(
"prim.len",
inputs={"input": inputs_name[0] + "_shape"},
outputs=[inputs_name[0] + "_len"],
scope_name=scope_name)
def add_pad_layers(kernel, dim):
graph.add_layer(
"prim.ne",
inputs={"x": inputs_name[0] + "_len"},
outputs=[inputs_name[0] + "_cond"],
scope_name=scope_name,
y=dim)
if padding_attr is not None:
layer_inputs["x"] = inputs_name[0]
kernel_name = "paddle.nn.functional.pad"
if len(padding_attr) == 2:
layer_attrs["pad"] = [0, 0, 0, 0, 0, 0] + padding_attr
elif len(padding_attr) == 4:
layer_inputs["x"] = inputs_name[0]
layer_attrs["pad"] = [0, 0, 0, 0] + padding_attr
elif len(padding_attr) == 6:
layer_inputs["x"] = inputs_name[0]
layer_attrs["pad"] = [0, 0] + padding_attr
else:
layer_inputs["x"] = inputs_name[0]
layer_attrs["pad"] = padding_attr
graph.add_layer(
"prim.if", {'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if", output_name],
scope_name=scope_name)
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(source_type="pytorch", parent_layer=if_layer)
block.add_layer(
"prim.sub",
inputs={"y": inputs_name[0] + "_len"},
outputs=[inputs_name[0] + "_len0"],
scope_name=scope_name,
alpha=1.0,
x=dim)
block.add_layer(
"prim.len2list",
inputs={"len": inputs_name[0] + "_len0"},
outputs=[inputs_name[0] + "_list"],
scope_name=scope_name)
block.add_layer(
"paddle.unsqueeze",
inputs={"x": inputs_name[0],
"axis": inputs_name[0] + "_list"},
outputs=[inputs_name[0] + "_var"],
scope_name=scope_name)
block.add_layer(
kernel,
inputs={"input": inputs_name[0] + "_var"},
outputs=copy.deepcopy(layer_outputs),
scope_name=scope_name,
**layer_attrs)
block.add_layer(
"paddle.squeeze",
inputs={"x": output_name,
"axis": inputs_name[0] + "_list"},
outputs=[output_name],
scope_name=scope_name)
if_layer.add_block(block)
block = PaddleGraph(source_type="pytorch", parent_layer=if_layer)
layer_inputs["input"] = inputs_name[0]
block.add_layer(
kernel,
kernel_name,
inputs=layer_inputs,
outputs=layer_outputs,
outputs=[output_name],
scope_name=scope_name,
**layer_attrs)
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[0]
if_layer.inputs["input-1"] = inputs_name[0] + "_len"
if not is_padding_tensor:
if len(layer_attrs["padding"]) == 2:
layer_outputs[0] = layer_outputs[0].replace("pad", "pad1d")
add_pad_layers("paddle.nn.Pad1D", 3)
elif len(layer_attrs["padding"]) == 4:
layer_outputs[0] = layer_outputs[0].replace("pad", "pad2d")
add_pad_layers("paddle.nn.Pad2D", 4)
elif len(layer_attrs["padding"]) == 6:
layer_outputs[0] = layer_outputs[0].replace("pad", "pad3d")
add_pad_layers("paddle.nn.Pad3D", 5)
else:
raise Exception("The lenght of padding list must be 2, 4 or 6!")
else:
layer_inputs["input"] = inputs_name[0]
graph.add_layer(
"custom_layer:Pad",
inputs=layer_inputs,
outputs=[output_name],
scope_name=scope_name,
**layer_attrs)
current_inputs = list(layer_inputs.values())
return current_inputs, current_outputs
......@@ -6025,7 +5962,7 @@ def aten_upsample_bilinear2d(mapper, graph, node):
inputs={"input": inputs_name[1]},
outputs=[inputs_name[1] + "_isinstance"],
scope_name=scope_name,
cls="paddle.fluid.Variable")
cls="paddle.static.Variable")
# TODO(syf): paddle.Variable
graph.add_layer(
"prim.if", {"input": inputs_name[1] + "_isinstance"},
......@@ -6065,6 +6002,66 @@ def aten_upsample_bilinear2d(mapper, graph, node):
return current_inputs, current_outputs
def aten_upsample_trilinear3d(mapper, graph, node):
"""
TorchScript Code:
%4997 : Tensor = aten::upsample_trilinear3d(%x.13, %4963, %5421, %4995)
Parameter meaning:
%4997 (Tensor): Output Tensor
%x.13 (Tensor): Input Tensor
%4963 (list): output_size
%5421 (bool): align_corners
%4995 (float): scale_factors
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# Output list
current_outputs = [output_name]
# process Input Tensor
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
current_inputs = list(layer_inputs.values())
# process output_size
if inputs_name[1] in mapper.attrs:
layer_attrs["size"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs, scope_name)
layer_inputs["size"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# process align_corners
if inputs_name[2] in mapper.attrs:
layer_attrs["align_corners"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs, scope_name)
layer_inputs["align_corners"] = inputs_name[2]
current_inputs.append(inputs_name[2])
# process scale_factor
if inputs_name[3] in mapper.attrs:
layer_attrs["scale_factor"] = mapper.attrs[inputs_name[3]]
else:
mapper._check_input(graph, inputs_node[3], inputs_name[3],
current_outputs, scope_name)
layer_inputs["scale_factor"] = inputs_name[3]
current_inputs.append(inputs_name[3])
layer_attrs["align_mode"] = 0
layer_attrs["mode"] = string("trilinear")
layer_attrs["data_format"] = string("NCDHW")
graph.add_layer(
"paddle.nn.functional.interpolate",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs
def aten_upsample_nearest2d(mapper, graph, node):
""" 构造使用nearest上采样的PaddleLayer。
TorchScript示例:
......@@ -6103,7 +6100,7 @@ def aten_upsample_nearest2d(mapper, graph, node):
inputs={"input": inputs_name[1]},
outputs=[inputs_name[1] + "_isinstance"],
scope_name=scope_name,
cls="paddle.fluid.Variable")
cls="paddle.static.Variable")
# TODO(syf): paddle.Variable
graph.add_layer(
"prim.if", {"input": inputs_name[1] + "_isinstance"},
......
......@@ -14,7 +14,7 @@
import paddle
from paddle.nn.functional import instance_norm
from paddle.fluid.initializer import Constant
from paddle.nn.initializer import Constant
class InstanceNorm(paddle.nn.Layer):
......
......@@ -40,3 +40,7 @@ from .trace_fc_fuser import TraceFcFuser
from .trace_fc_fuse_pass import TraceFcFusePass
from .onnx_layernorm_fuser import LayerNormFuser
from .onnx_layernorm_fuse_pass import LayerNormFusePass
from .onnx_gelu_fuser import GeluFuser
from .onnx_gelu_fuse_pass import GeluFusePass
from .replace_div_to_scale import Div2Scale
from .replace_div_to_scale_pass import Div2ScalePass
......@@ -46,7 +46,7 @@ class InterpolateBilinearFuser(FuseBase):
if x2271 :
x2274 = x2197[0]
x2275 = x2197[1]
x2233_isinstance = isinstance(x2233, paddle.fluid.Variable)
x2233_isinstance = isinstance(x2233, paddle.static.Variable)
if x2233_isinstance :
x2233 = x2233.numpy().tolist()
x2276 = paddle.nn.functional.interpolate(x=x2181, size=x2233, scale_factor=x2274, align_corners=False, align_mode=0, mode='bilinear')
......@@ -146,7 +146,7 @@ class InterpolateBilinearFuser(FuseBase):
"prim.isinstance",
inputs={"input": "interpolate-input-3"},
outputs=["interpolate-input-0_isinstance"],
cls="paddle.fluid.Variable")
cls="paddle.static.Variable")
pattern_block_block.add_layer(
"prim.if", {"input": "interpolate-input-0_isinstance"},
outputs=["interpolate-input-0_if1"])
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import GeluFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class GeluFusePass(Pass):
name = "onnx_gelu_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = GeluFuser()
fuser.operate(graph, match_kind="edge")
# register gelu pass
onnx_gelu_fuse_pass = GeluFusePass()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from collections import OrderedDict
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class GeluFuser(FuseBase):
def __init__(self):
super(GeluFuser, self).__init__()
def build_pattern(self):
"""
code describe:
x2paddle_332 = paddle.full(dtype='float32', shape=[1], fill_value=1.4142135381698608)
x2paddle_335 = paddle.full(dtype='float32', shape=[1], fill_value=1.0)
x2paddle_338 = paddle.full(dtype='float32', shape=[1], fill_value=0.5)
x2paddle_333 = paddle.divide(x=x2paddle_331, y=x2paddle_332)
x2paddle_334 = paddle.erf(x=x2paddle_333)
x2paddle_336 = paddle.add(x=x2paddle_334, y=x2paddle_335)
x2paddle_337 = paddle.multiply(x=x2paddle_331, y=x2paddle_336)
x2paddle_339 = paddle.multiply(x=x2paddle_337, y=x2paddle_338)
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(0)],
shape=[1],
fill_value=1.4142135381698608)
self.pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(1)],
shape=[1],
fill_value=1.0)
self.pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(2)],
shape=[1],
fill_value=0.5)
self.pattern.add_layer(
"paddle.divide",
inputs={"x": "gelu-input-0",
"y": gen_name(0)},
outputs=[gen_name(3)])
self.pattern.add_layer(
"paddle.erf", inputs={"x": gen_name(3)}, outputs=[gen_name(4)])
self.pattern.add_layer(
"paddle.add",
inputs={"x": gen_name(4),
"y": gen_name(1)},
outputs=[gen_name(5)])
self.pattern.add_layer(
"paddle.multiply",
inputs={"x": "gelu-input-0",
"y": gen_name(5)},
outputs=[gen_name(6)])
self.pattern.add_layer(
"paddle.multiply",
inputs={"x": gen_name(6),
"y": gen_name(2)},
outputs=[gen_name(7)])
self.pattern.build(inputs={"input-0": "gelu-input-0", })
def insert_new_layer(self, graph, parameters, matches):
new_layer, new_layer_id = self.gen_new_layer(parameters, matches)
graph.layers[new_layer_id] = new_layer
matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches):
layer_id_list = list(matches.keys())
layer_id_list.sort(key=int)
layer_inputs = list()
layer_inputs_ids = list()
fill_value_list = list()
for layer_id, layer in matches.items():
if layer.kernel == "paddle.divide":
layer_inputs.append(layer.inputs["x"])
layer_inputs_ids.append(layer_id)
if layer.kernel == "paddle.multiply":
output_name = layer.outputs[0]
new_layer = PaddleLayer(
layer_id_list[0],
"paddle.nn.GELU",
inputs={"x": layer_inputs[0]},
outputs=[output_name],
approximate=False)
return new_layer, layer_inputs_ids[0]
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from collections import OrderedDict
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Div2Scale(FuseBase):
def __init__(self):
super(Div2Scale, self).__init__()
def build_pattern(self):
"""
code describe:
x2paddle_296 = paddle.full(dtype='float32', shape=[1], fill_value=8.0)
x2paddle_293 = paddle.transpose(x=x2paddle_292, perm=[0, 2, 1, 3])
x2paddle_294 = paddle.transpose(x=x2paddle_260, perm=[0, 2, 3, 1])
x2paddle_295 = paddle.matmul(x=x2paddle_293, y=x2paddle_294)
x2paddle_297 = paddle.divide(x=x2paddle_295, y=x2paddle_296)
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"paddle.full",
inputs={},
outputs=[gen_name(0)],
shape=[1],
fill_value=8)
self.pattern.add_layer(
"paddle.transpose",
inputs={"x": "div2scale-input-0"},
outputs=[gen_name(1)],
perm=[0, 2, 1, 3])
self.pattern.add_layer(
"paddle.transpose",
inputs={"x": "div2scale-input-1"},
outputs=[gen_name(2)],
perm=[0, 2, 1, 3])
self.pattern.add_layer(
"paddle.matmul",
inputs={"x": gen_name(1),
"y": gen_name(2)},
outputs=[gen_name(3)])
self.pattern.add_layer(
"paddle.divide",
inputs={"x": gen_name(3),
"y": gen_name(0)},
outputs=[gen_name(4)])
self.pattern.build(inputs={
"input-0": "div2scale-input-0",
"input-1": "div2scale-input-1",
})
def insert_new_layer(self, graph, parameters, matches):
new_layer, new_layer_id = self.gen_new_layer(parameters, matches)
graph.layers[new_layer_id] = new_layer
matches_copy = copy.deepcopy(matches)
for layer_id, layer in matches_copy.items():
if layer.kernel in ["paddle.transpose", "paddle.matmul"]:
matches.pop(layer_id)
matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches):
layer_id_list = list(matches.keys())
layer_id_list.sort(key=int)
layer_inputs = list()
layer_inputs_ids = list()
fill_value = 0
for layer_id, layer in matches.items():
if layer.kernel == "paddle.full":
fill_value = layer.attrs["fill_value"]
if layer.kernel == "paddle.divide":
layer_inputs.append(layer.inputs["x"])
layer_inputs_ids.append(layer_id)
output_name = layer.outputs[0]
new_layer = PaddleLayer(
layer_id_list[0],
"paddle.scale",
inputs={"x": layer_inputs[0]},
outputs=[output_name],
scale=1 / fill_value)
return new_layer, layer_inputs_ids[0]
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import Div2Scale
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Div2ScalePass(Pass):
name = "replace_div_to_scale_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Div2Scale()
fuser.operate(graph, match_kind="edge")
# register huggingface div2scale pass
replace_div_to_scale_pass = Div2ScalePass()
......@@ -118,7 +118,9 @@ class TraceFcFuser(FuseBase):
(1, 0))
self.rm_params.add(weight_name)
bias_numpy = parameters[bias_name]
parameters["{}.bias".format(linear_name)] = np.squeeze(bias_numpy)
if len(bias_numpy.shape) == 2:
bias_numpy = np.squeeze(bias_numpy)
parameters["{}.bias".format(linear_name)] = bias_numpy
self.rm_params.add(bias_name)
new_layer = PaddleLayer(
layers_id[0],
......
......@@ -37,7 +37,11 @@ class GraphOptimizer(object):
"prelu_fuse_pass", "transpose_eliminate_pass"
]
elif source_frame == "onnx":
self.passes = ["onnx_layernorm_fuse_pass"]
self.passes = [
"onnx_layernorm_fuse_pass",
"onnx_gelu_fuse_pass",
"replace_div_to_scale_pass",
]
else:
self.passes = []
......@@ -54,7 +58,7 @@ class GraphOptimizer(object):
before_len = len(graph.layers)
pass_.apply(graph)
after_len = len(graph.layers)
if after_len < before_len:
if after_len <= before_len:
show_pass_log = True
if before_len == after_len:
break
......
......@@ -103,15 +103,7 @@ class PaddleDtypes():
self.t_int64 = paddle.int64
self.t_bool = paddle.bool
else:
self.t_float16 = "paddle.fluid.core.VarDesc.VarType.FP16"
self.t_float32 = "paddle.fluid.core.VarDesc.VarType.FP32"
self.t_float64 = "paddle.fluid.core.VarDesc.VarType.FP64"
self.t_uint8 = "paddle.fluid.core.VarDesc.VarType.UINT8"
self.t_int8 = "paddle.fluid.core.VarDesc.VarType.INT8"
self.t_int16 = "paddle.fluid.core.VarDesc.VarType.INT16"
self.t_int32 = "paddle.fluid.core.VarDesc.VarType.INT32"
self.t_int64 = "paddle.fluid.core.VarDesc.VarType.INT64"
self.t_bool = "paddle.fluid.core.VarDesc.VarType.BOOL"
raise Exception("Paddle>=2.0.0 is required, Please update version!")
is_new_version = check_version()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册