Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
5aad32ff
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5aad32ff
编写于
6月 24, 2022
作者:
W
wjj19950828
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'upstream/develop' into fixed_Misspell
上级
feb0f157
2721567b
变更
26
隐藏空白更改
内联
并排
Showing
26 changed file
with
819 addition
and
228 deletion
+819
-228
docs/inference_model_convertor/op_list.md
docs/inference_model_convertor/op_list.md
+1
-1
x2paddle/__init__.py
x2paddle/__init__.py
+1
-1
x2paddle/convert.py
x2paddle/convert.py
+32
-0
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/detectionoutput.py
...mapper/caffe2paddle/caffe_custom_layer/detectionoutput.py
+1
-1
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/normalize.py
...le/op_mapper/caffe2paddle/caffe_custom_layer/normalize.py
+1
-2
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/priorbox.py
...dle/op_mapper/caffe2paddle/caffe_custom_layer/priorbox.py
+82
-3
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/roipooling.py
...e/op_mapper/caffe2paddle/caffe_custom_layer/roipooling.py
+46
-3
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/select.py
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/select.py
+0
-1
x2paddle/op_mapper/caffe2paddle/caffe_op_mapper.py
x2paddle/op_mapper/caffe2paddle/caffe_op_mapper.py
+5
-5
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/__init__.py
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/__init__.py
+2
-0
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/nms.py
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/nms.py
+6
-6
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/roi_align.py
...ddle/op_mapper/onnx2paddle/onnx_custom_layer/roi_align.py
+80
-0
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/roi_pooling.py
...le/op_mapper/onnx2paddle/onnx_custom_layer/roi_pooling.py
+73
-0
x2paddle/op_mapper/onnx2paddle/opset9/opset.py
x2paddle/op_mapper/onnx2paddle/opset9/opset.py
+105
-93
x2paddle/op_mapper/prim2code.py
x2paddle/op_mapper/prim2code.py
+1
-1
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+93
-96
x2paddle/op_mapper/pytorch2paddle/pytorch_custom_layer/instance_norm.py
...pper/pytorch2paddle/pytorch_custom_layer/instance_norm.py
+1
-1
x2paddle/optimizer/fusion/__init__.py
x2paddle/optimizer/fusion/__init__.py
+4
-0
x2paddle/optimizer/fusion/interpolate_bilinear_fuser.py
x2paddle/optimizer/fusion/interpolate_bilinear_fuser.py
+2
-2
x2paddle/optimizer/fusion/onnx_gelu_fuse_pass.py
x2paddle/optimizer/fusion/onnx_gelu_fuse_pass.py
+33
-0
x2paddle/optimizer/fusion/onnx_gelu_fuser.py
x2paddle/optimizer/fusion/onnx_gelu_fuser.py
+108
-0
x2paddle/optimizer/fusion/replace_div_to_scale.py
x2paddle/optimizer/fusion/replace_div_to_scale.py
+99
-0
x2paddle/optimizer/fusion/replace_div_to_scale_pass.py
x2paddle/optimizer/fusion/replace_div_to_scale_pass.py
+33
-0
x2paddle/optimizer/fusion/trace_fc_fuser.py
x2paddle/optimizer/fusion/trace_fc_fuser.py
+3
-1
x2paddle/optimizer/optimizer.py
x2paddle/optimizer/optimizer.py
+6
-2
x2paddle/utils.py
x2paddle/utils.py
+1
-9
未找到文件。
docs/inference_model_convertor/op_list.md
浏览文件 @
5aad32ff
...
@@ -117,7 +117,7 @@ Aten:
...
@@ -117,7 +117,7 @@ Aten:
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 129 | aten::fft
\_
irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 129 | aten::fft
\_
irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | 135 | aten::full | 136 | aten::group
\_
norm |
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | 135 | aten::full | 136 | aten::group
\_
norm |
| 137 | aten::argmax | 138 | aten::copy |
|
| | |
| 137 | aten::argmax | 138 | aten::copy |
139 | aten::upsample
\_
trilinear3d
| | |
Prim:
Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
...
...
x2paddle/__init__.py
浏览文件 @
5aad32ff
__version__
=
"1.3.
6
"
__version__
=
"1.3.
7
"
from
.core.program
import
PaddleGraph
from
.core.program
import
PaddleGraph
...
...
x2paddle/convert.py
浏览文件 @
5aad32ff
...
@@ -192,6 +192,14 @@ def tf2paddle(model_path,
...
@@ -192,6 +192,14 @@ def tf2paddle(model_path,
ConverterCheck
(
ConverterCheck
(
task
=
"TensorFlow"
,
time_info
=
time_info
,
task
=
"TensorFlow"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
lite_state
=
"Success"
).
start
()
# for convert survey
logging
.
info
(
"================================================"
)
logging
.
info
(
""
)
logging
.
info
(
"Model Convertd! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 "
)
logging
.
info
(
""
)
logging
.
info
(
"================================================"
)
def
caffe2paddle
(
proto_file
,
def
caffe2paddle
(
proto_file
,
...
@@ -240,6 +248,14 @@ def caffe2paddle(proto_file,
...
@@ -240,6 +248,14 @@ def caffe2paddle(proto_file,
if
not
disable_feedback
:
if
not
disable_feedback
:
ConverterCheck
(
ConverterCheck
(
task
=
"Caffe"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
task
=
"Caffe"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
# for convert survey
logging
.
info
(
"================================================"
)
logging
.
info
(
""
)
logging
.
info
(
"Model Convertd! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 "
)
logging
.
info
(
""
)
logging
.
info
(
"================================================"
)
def
onnx2paddle
(
model_path
,
def
onnx2paddle
(
model_path
,
...
@@ -293,6 +309,14 @@ def onnx2paddle(model_path,
...
@@ -293,6 +309,14 @@ def onnx2paddle(model_path,
if
not
disable_feedback
:
if
not
disable_feedback
:
ConverterCheck
(
ConverterCheck
(
task
=
"ONNX"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
task
=
"ONNX"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
# for convert survey
logging
.
info
(
"================================================"
)
logging
.
info
(
""
)
logging
.
info
(
"Model Convertd! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 "
)
logging
.
info
(
""
)
logging
.
info
(
"================================================"
)
def
pytorch2paddle
(
module
,
def
pytorch2paddle
(
module
,
...
@@ -364,6 +388,14 @@ def pytorch2paddle(module,
...
@@ -364,6 +388,14 @@ def pytorch2paddle(module,
ConverterCheck
(
ConverterCheck
(
task
=
"PyTorch"
,
time_info
=
time_info
,
task
=
"PyTorch"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
lite_state
=
"Success"
).
start
()
# for convert survey
logging
.
info
(
"================================================"
)
logging
.
info
(
""
)
logging
.
info
(
"Model Convertd! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 "
)
logging
.
info
(
""
)
logging
.
info
(
"================================================"
)
def
main
():
def
main
():
...
...
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/detectionoutput.py
浏览文件 @
5aad32ff
...
@@ -34,7 +34,7 @@ class DetectionOutput(object):
...
@@ -34,7 +34,7 @@ class DetectionOutput(object):
pbv
=
priorbox_list
[
1
]
pbv
=
priorbox_list
[
1
]
pb
=
paddle
.
reshape
(
x
=
pb
,
shape
=
[
-
1
,
4
])
pb
=
paddle
.
reshape
(
x
=
pb
,
shape
=
[
-
1
,
4
])
pbv
=
paddle
.
reshape
(
x
=
pbv
,
shape
=
[
-
1
,
4
])
pbv
=
paddle
.
reshape
(
x
=
pbv
,
shape
=
[
-
1
,
4
])
pb_dim
=
fluid
.
layers
.
shape
(
pb
)[
0
]
pb_dim
=
paddle
.
shape
(
pb
)[
0
]
loc
=
paddle
.
reshape
(
x0
,
shape
=
[
-
1
,
pb_dim
,
4
])
loc
=
paddle
.
reshape
(
x0
,
shape
=
[
-
1
,
pb_dim
,
4
])
conf_flatten
=
paddle
.
reshape
(
x1
,
shape
=
[
0
,
pb_dim
,
-
1
])
conf_flatten
=
paddle
.
reshape
(
x1
,
shape
=
[
0
,
pb_dim
,
-
1
])
out
=
fluid
.
layers
.
detection_output
(
out
=
fluid
.
layers
.
detection_output
(
...
...
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/normalize.py
浏览文件 @
5aad32ff
...
@@ -13,7 +13,6 @@
...
@@ -13,7 +13,6 @@
# limitations under the License.
# limitations under the License.
import
paddle
import
paddle
import
paddle.fluid
as
fluid
class
Normalize
(
object
):
class
Normalize
(
object
):
...
@@ -21,7 +20,7 @@ class Normalize(object):
...
@@ -21,7 +20,7 @@ class Normalize(object):
self
.
axis
=
axis
self
.
axis
=
axis
def
__call__
(
self
,
x
,
param
):
def
__call__
(
self
,
x
,
param
):
l2_norm
=
fluid
.
layers
.
l2_normalize
(
x
=
x
,
axis
=
1
)
l2_norm
=
paddle
.
norm
(
x
=
x
,
p
=
2
,
axis
=
1
,
keepdim
=
True
)
param
=
paddle
.
reshape
(
param
,
[
param
.
shape
[
-
1
]])
param
=
paddle
.
reshape
(
param
,
[
param
.
shape
[
-
1
]])
perm
=
list
(
range
(
len
(
l2_norm
.
shape
)))
perm
=
list
(
range
(
len
(
l2_norm
.
shape
)))
perm
.
pop
(
self
.
axis
)
perm
.
pop
(
self
.
axis
)
...
...
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/priorbox.py
浏览文件 @
5aad32ff
...
@@ -13,7 +13,87 @@
...
@@ -13,7 +13,87 @@
# limitations under the License.
# limitations under the License.
import
paddle
import
paddle
import
paddle.fluid
as
fluid
from
paddle
import
_C_ops
from
paddle
import
in_dynamic_mode
from
paddle.common_ops_import
import
Variable
,
LayerHelper
,
check_variable_and_dtype
,
check_type
,
check_dtype
@
paddle
.
jit
.
not_to_static
def
prior_box
(
input
,
image
,
min_sizes
,
max_sizes
=
None
,
aspect_ratios
=
[
1.
],
variance
=
[
0.1
,
0.1
,
0.2
,
0.2
],
flip
=
False
,
clip
=
False
,
steps
=
[
0.0
,
0.0
],
offset
=
0.5
,
min_max_aspect_ratios_order
=
False
,
name
=
None
):
helper
=
LayerHelper
(
"prior_box"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
check_variable_and_dtype
(
input
,
'input'
,
[
'uint8'
,
'int8'
,
'float32'
,
'float64'
],
'prior_box'
)
def
_is_list_or_tuple_
(
data
):
return
(
isinstance
(
data
,
list
)
or
isinstance
(
data
,
tuple
))
if
not
_is_list_or_tuple_
(
min_sizes
):
min_sizes
=
[
min_sizes
]
if
not
_is_list_or_tuple_
(
aspect_ratios
):
aspect_ratios
=
[
aspect_ratios
]
if
not
(
_is_list_or_tuple_
(
steps
)
and
len
(
steps
)
==
2
):
raise
ValueError
(
'steps should be a list or tuple '
,
'with length 2, (step_width, step_height).'
)
min_sizes
=
list
(
map
(
float
,
min_sizes
))
aspect_ratios
=
list
(
map
(
float
,
aspect_ratios
))
steps
=
list
(
map
(
float
,
steps
))
cur_max_sizes
=
None
if
max_sizes
is
not
None
and
len
(
max_sizes
)
>
0
and
max_sizes
[
0
]
>
0
:
if
not
_is_list_or_tuple_
(
max_sizes
):
max_sizes
=
[
max_sizes
]
cur_max_sizes
=
max_sizes
if
in_dynamic_mode
():
attrs
=
(
'min_sizes'
,
min_sizes
,
'aspect_ratios'
,
aspect_ratios
,
'variances'
,
variance
,
'flip'
,
flip
,
'clip'
,
clip
,
'step_w'
,
steps
[
0
],
'step_h'
,
steps
[
1
],
'offset'
,
offset
,
'min_max_aspect_ratios_order'
,
min_max_aspect_ratios_order
)
if
cur_max_sizes
is
not
None
:
attrs
+=
(
'max_sizes'
,
cur_max_sizes
)
box
,
var
=
_C_ops
.
prior_box
(
input
,
image
,
*
attrs
)
return
box
,
var
else
:
attrs
=
{
'min_sizes'
:
min_sizes
,
'aspect_ratios'
:
aspect_ratios
,
'variances'
:
variance
,
'flip'
:
flip
,
'clip'
:
clip
,
'step_w'
:
steps
[
0
],
'step_h'
:
steps
[
1
],
'offset'
:
offset
,
'min_max_aspect_ratios_order'
:
min_max_aspect_ratios_order
}
if
cur_max_sizes
is
not
None
:
attrs
[
'max_sizes'
]
=
cur_max_sizes
box
=
helper
.
create_variable_for_type_inference
(
dtype
)
var
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"prior_box"
,
inputs
=
{
"Input"
:
input
,
"Image"
:
image
},
outputs
=
{
"Boxes"
:
box
,
"Variances"
:
var
},
attrs
=
attrs
,
)
box
.
stop_gradient
=
True
var
.
stop_gradient
=
True
return
box
,
var
class
PriorBox
(
object
):
class
PriorBox
(
object
):
...
@@ -32,8 +112,7 @@ class PriorBox(object):
...
@@ -32,8 +112,7 @@ class PriorBox(object):
}
}
def
__call__
(
self
,
x0
,
x1
):
def
__call__
(
self
,
x0
,
x1
):
box
,
var
=
fluid
.
layers
.
prior_box
(
box
,
var
=
prior_box
(
input
=
x0
,
image
=
x1
,
**
self
.
priorbox_layer_attrs
)
input
=
x0
,
image
=
x1
,
**
self
.
priorbox_layer_attrs
)
box
=
paddle
.
reshape
(
x
=
box
,
shape
=
[
1
,
1
,
-
1
])
box
=
paddle
.
reshape
(
x
=
box
,
shape
=
[
1
,
1
,
-
1
])
var
=
paddle
.
reshape
(
x
=
var
,
shape
=
[
1
,
1
,
-
1
])
var
=
paddle
.
reshape
(
x
=
var
,
shape
=
[
1
,
1
,
-
1
])
out
=
paddle
.
concat
(
x
=
[
box
,
var
],
axis
=
1
)
out
=
paddle
.
concat
(
x
=
[
box
,
var
],
axis
=
1
)
...
...
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/roipooling.py
浏览文件 @
5aad32ff
...
@@ -13,7 +13,51 @@
...
@@ -13,7 +13,51 @@
# limitations under the License.
# limitations under the License.
import
paddle
import
paddle
import
paddle.fluid
as
fluid
from
paddle
import
_C_ops
from
paddle
import
in_dynamic_mode
from
paddle.common_ops_import
import
Variable
,
LayerHelper
,
check_variable_and_dtype
,
check_type
,
check_dtype
@
paddle
.
jit
.
not_to_static
def
roi_pool
(
input
,
rois
,
pooled_height
,
pooled_width
,
spatial_scale
=
1.0
,
rois_num
=
None
,
name
=
None
):
if
in_dynamic_mode
():
assert
rois_num
is
not
None
,
"rois_num should not be None in dygraph mode."
pool_out
,
argmaxes
=
_C_ops
.
roi_pool
(
input
,
rois
,
rois_num
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
pooled_width
,
"spatial_scale"
,
spatial_scale
)
return
pool_out
,
argmaxes
else
:
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
],
'roi_pool'
)
check_variable_and_dtype
(
rois
,
'rois'
,
[
'float32'
],
'roi_pool'
)
helper
=
LayerHelper
(
'roi_pool'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
argmaxes
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
inputs
=
{
"X"
:
input
,
"ROIs"
:
rois
,
}
if
rois_num
is
not
None
:
inputs
[
'RoisNum'
]
=
rois_num
helper
.
append_op
(
type
=
"roi_pool"
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
pool_out
,
"Argmax"
:
argmaxes
},
attrs
=
{
"pooled_height"
:
pooled_height
,
"pooled_width"
:
pooled_width
,
"spatial_scale"
:
spatial_scale
})
return
pool_out
,
argmaxes
class
ROIPooling
(
object
):
class
ROIPooling
(
object
):
...
@@ -26,6 +70,5 @@ class ROIPooling(object):
...
@@ -26,6 +70,5 @@ class ROIPooling(object):
def
__call__
(
self
,
x0
,
x1
):
def
__call__
(
self
,
x0
,
x1
):
slice_x1
=
paddle
.
slice
(
input
=
x1
,
axes
=
[
1
],
starts
=
[
1
],
ends
=
[
5
])
slice_x1
=
paddle
.
slice
(
input
=
x1
,
axes
=
[
1
],
starts
=
[
1
],
ends
=
[
5
])
out
=
fluid
.
layers
.
roi_pool
(
out
=
roi_pool
(
input
=
x0
,
rois
=
slice_x1
,
**
self
.
roipooling_layer_attrs
)
input
=
x0
,
rois
=
slice_x1
,
**
self
.
roipooling_layer_attrs
)
return
out
return
out
x2paddle/op_mapper/caffe2paddle/caffe_custom_layer/select.py
浏览文件 @
5aad32ff
...
@@ -13,7 +13,6 @@
...
@@ -13,7 +13,6 @@
# limitations under the License.
# limitations under the License.
import
paddle
import
paddle
import
paddle.fluid
as
fluid
class
Select
(
object
):
class
Select
(
object
):
...
...
x2paddle/op_mapper/caffe2paddle/caffe_op_mapper.py
浏览文件 @
5aad32ff
...
@@ -429,13 +429,13 @@ class CaffeOpMapper():
...
@@ -429,13 +429,13 @@ class CaffeOpMapper():
assert
params
.
local_size
%
2
==
1
assert
params
.
local_size
%
2
==
1
alpha
=
params
.
alpha
/
float
(
params
.
local_size
)
alpha
=
params
.
alpha
/
float
(
params
.
local_size
)
layer_attrs
=
{
layer_attrs
=
{
"n"
:
params
.
local_size
,
"size"
:
params
.
local_size
,
"k"
:
params
.
k
,
"alpha"
:
alpha
,
"alpha"
:
alpha
,
"beta"
:
params
.
beta
,
"beta"
:
params
.
beta
,
"k"
:
params
.
k
,
}
}
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
"paddle.
fluid.layers.lrn
"
,
"paddle.
nn.LocalResponseNorm
"
,
inputs
=
{
"input"
:
input
.
name
},
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
layer_name
],
outputs
=
[
node
.
layer_name
],
**
layer_attrs
)
**
layer_attrs
)
...
@@ -1209,10 +1209,10 @@ class CaffeOpMapper():
...
@@ -1209,10 +1209,10 @@ class CaffeOpMapper():
input
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
input
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
params
=
node
.
layer
.
shuffle_channel_param
params
=
node
.
layer
.
shuffle_channel_param
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
"paddle.
fluid.layers.shuffle_channel
"
,
"paddle.
nn.functional.channel_shuffle
"
,
inputs
=
{
"x"
:
input
.
name
},
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
layer_name
],
outputs
=
[
node
.
layer_name
],
group
=
params
.
group
)
group
s
=
params
.
group
)
def
Upsample
(
self
,
node
):
def
Upsample
(
self
,
node
):
assert
len
(
assert
len
(
...
...
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/__init__.py
浏览文件 @
5aad32ff
...
@@ -18,3 +18,5 @@ from .pad_all_dim2 import PadAllDim2
...
@@ -18,3 +18,5 @@ from .pad_all_dim2 import PadAllDim2
from
.pad_all_dim4
import
PadAllDim4
from
.pad_all_dim4
import
PadAllDim4
from
.pad_all_dim4_one_input
import
PadAllDim4WithOneInput
from
.pad_all_dim4_one_input
import
PadAllDim4WithOneInput
from
.nms
import
NMS
from
.nms
import
NMS
from
.roi_align
import
ROIAlign
from
.roi_pooling
import
ROIPooling
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/nms.py
浏览文件 @
5aad32ff
...
@@ -13,9 +13,9 @@
...
@@ -13,9 +13,9 @@
# limitations under the License.
# limitations under the License.
import
paddle
import
paddle
from
paddle
.fluid
import
core
from
paddle
import
_C_ops
from
paddle
.fluid.framework
import
Variable
,
in_dygraph
_mode
from
paddle
import
in_dynamic
_mode
from
paddle.
fluid.layer_helper
import
LayerHelper
from
paddle.
common_ops_import
import
Variable
,
LayerHelper
def
multiclass_nms
(
bboxes
,
def
multiclass_nms
(
bboxes
,
...
@@ -33,13 +33,13 @@ def multiclass_nms(bboxes,
...
@@ -33,13 +33,13 @@ def multiclass_nms(bboxes,
name
=
None
):
name
=
None
):
helper
=
LayerHelper
(
'multiclass_nms3'
,
**
locals
())
helper
=
LayerHelper
(
'multiclass_nms3'
,
**
locals
())
if
in_dy
graph
_mode
():
if
in_dy
namic
_mode
():
attrs
=
(
'background_label'
,
background_label
,
'score_threshold'
,
attrs
=
(
'background_label'
,
background_label
,
'score_threshold'
,
score_threshold
,
'nms_top_k'
,
nms_top_k
,
'nms_threshold'
,
score_threshold
,
'nms_top_k'
,
nms_top_k
,
'nms_threshold'
,
nms_threshold
,
'keep_top_k'
,
keep_top_k
,
'nms_eta'
,
nms_eta
,
nms_threshold
,
'keep_top_k'
,
keep_top_k
,
'nms_eta'
,
nms_eta
,
'normalized'
,
normalized
)
'normalized'
,
normalized
)
output
,
index
,
nms_rois_num
=
core
.
ops
.
multiclass_nms3
(
bboxes
,
scores
,
output
,
index
,
nms_rois_num
=
_C_
ops
.
multiclass_nms3
(
bboxes
,
scores
,
rois_num
,
*
attrs
)
rois_num
,
*
attrs
)
if
not
return_index
:
if
not
return_index
:
index
=
None
index
=
None
return
output
,
nms_rois_num
,
index
return
output
,
nms_rois_num
,
index
...
...
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/roi_align.py
0 → 100644
浏览文件 @
5aad32ff
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
paddle
from
paddle
import
_C_ops
from
paddle
import
in_dynamic_mode
from
paddle.common_ops_import
import
Variable
,
LayerHelper
,
check_variable_and_dtype
,
check_type
,
check_dtype
@
paddle
.
jit
.
not_to_static
def
roi_align
(
input
,
rois
,
pooled_height
,
pooled_width
,
spatial_scale
=
1.0
,
sampling_ratio
=-
1
,
rois_num
=
None
,
aligned
=
False
,
name
=
None
):
if
in_dynamic_mode
():
assert
rois_num
is
not
None
,
"rois_num should not be None in dygraph mode."
align_out
=
_C_ops
.
roi_align
(
input
,
rois
,
rois_num
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
pooled_width
,
"spatial_scale"
,
spatial_scale
,
"sampling_ratio"
,
sampling_ratio
,
"aligned"
,
aligned
)
return
align_out
else
:
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'roi_align'
)
check_variable_and_dtype
(
rois
,
'rois'
,
[
'float32'
,
'float64'
],
'roi_align'
)
helper
=
LayerHelper
(
'roi_align'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
align_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
inputs
=
{
"X"
:
input
,
"ROIs"
:
rois
,
}
if
rois_num
is
not
None
:
inputs
[
'RoisNum'
]
=
rois_num
helper
.
append_op
(
type
=
"roi_align"
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
align_out
},
attrs
=
{
"pooled_height"
:
pooled_height
,
"pooled_width"
:
pooled_width
,
"spatial_scale"
:
spatial_scale
,
"sampling_ratio"
:
sampling_ratio
,
"aligned"
:
aligned
,
})
return
align_out
class
ROIAlign
(
object
):
def
__init__
(
self
,
pooled_height
,
pooled_width
,
spatial_scale
,
sampling_ratio
):
self
.
roialign_layer_attrs
=
{
"pooled_height"
:
pooled_height
,
"pooled_width"
:
pooled_width
,
"spatial_scale"
:
spatial_scale
,
'sampling_ratio'
:
sampling_ratio
,
}
def
__call__
(
self
,
x0
,
x1
,
x2
):
out
=
roi_align
(
input
=
x0
,
rois
=
x1
,
rois_num
=
x2
,
**
self
.
roialign_layer_attrs
)
return
out
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/roi_pooling.py
0 → 100644
浏览文件 @
5aad32ff
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
paddle
from
paddle
import
_C_ops
from
paddle
import
in_dynamic_mode
from
paddle.common_ops_import
import
Variable
,
LayerHelper
,
check_variable_and_dtype
,
check_type
,
check_dtype
@
paddle
.
jit
.
not_to_static
def
roi_pool
(
input
,
rois
,
pooled_height
,
pooled_width
,
spatial_scale
=
1.0
,
rois_num
=
None
,
name
=
None
):
if
in_dynamic_mode
():
assert
rois_num
is
not
None
,
"rois_num should not be None in dygraph mode."
pool_out
,
argmaxes
=
_C_ops
.
roi_pool
(
input
,
rois
,
rois_num
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
pooled_width
,
"spatial_scale"
,
spatial_scale
)
return
pool_out
,
argmaxes
else
:
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
],
'roi_pool'
)
check_variable_and_dtype
(
rois
,
'rois'
,
[
'float32'
],
'roi_pool'
)
helper
=
LayerHelper
(
'roi_pool'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
argmaxes
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
inputs
=
{
"X"
:
input
,
"ROIs"
:
rois
,
}
if
rois_num
is
not
None
:
inputs
[
'RoisNum'
]
=
rois_num
helper
.
append_op
(
type
=
"roi_pool"
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
pool_out
,
"Argmax"
:
argmaxes
},
attrs
=
{
"pooled_height"
:
pooled_height
,
"pooled_width"
:
pooled_width
,
"spatial_scale"
:
spatial_scale
})
return
pool_out
,
argmaxes
class
ROIPooling
(
object
):
def
__init__
(
self
,
pooled_height
,
pooled_width
,
spatial_scale
):
self
.
roipooling_layer_attrs
=
{
"pooled_height"
:
pooled_height
,
"pooled_width"
:
pooled_width
,
"spatial_scale"
:
spatial_scale
}
def
__call__
(
self
,
x0
,
x1
):
out
=
roi_pool
(
input
=
x0
,
rois
=
x1
,
**
self
.
roipooling_layer_attrs
)
return
out
x2paddle/op_mapper/onnx2paddle/opset9/opset.py
浏览文件 @
5aad32ff
...
@@ -262,6 +262,8 @@ class OpSet9():
...
@@ -262,6 +262,8 @@ class OpSet9():
shape
=
node
.
out_shapes
[
0
]
shape
=
node
.
out_shapes
[
0
]
if
hasattr
(
node
.
weight
,
"shape"
)
and
len
(
node
.
weight
.
shape
)
==
0
:
if
hasattr
(
node
.
weight
,
"shape"
)
and
len
(
node
.
weight
.
shape
)
==
0
:
if
node
.
weight
==
float
(
'inf'
)
or
node
.
weight
==
float
(
'-inf'
):
node
.
weight
=
string
(
node
.
weight
)
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
"paddle.full"
,
"paddle.full"
,
inputs
=
{},
inputs
=
{},
...
@@ -536,12 +538,14 @@ class OpSet9():
...
@@ -536,12 +538,14 @@ class OpSet9():
'pooled_width'
:
pooled_width
,
'pooled_width'
:
pooled_width
,
'spatial_scale'
:
spatial_scale
,
'spatial_scale'
:
spatial_scale
,
'sampling_ratio'
:
sampling_ratio
,
'sampling_ratio'
:
sampling_ratio
,
'rois_num'
:
val_rois_num
,
}
}
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
'paddle.fluid.layers.roi_align'
,
'custom_layer:ROIAlign'
,
inputs
=
{
'input'
:
val_x
.
name
,
inputs
=
{
'rois'
:
val_rois
.
name
},
'input'
:
val_x
.
name
,
'rois'
:
val_rois
.
name
,
'rois_num'
:
val_rois_num
},
outputs
=
[
node
.
name
],
outputs
=
[
node
.
name
],
**
layer_attrs
)
**
layer_attrs
)
...
@@ -558,7 +562,7 @@ class OpSet9():
...
@@ -558,7 +562,7 @@ class OpSet9():
'spatial_scale'
:
spatial_scale
,
'spatial_scale'
:
spatial_scale
,
}
}
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
'
paddle.fluid.layers.roi_pool
'
,
'
custom_layer:ROIPooling
'
,
inputs
=
{
'input'
:
val_x
.
name
,
inputs
=
{
'input'
:
val_x
.
name
,
'rois'
:
val_rois
.
name
},
'rois'
:
val_rois
.
name
},
outputs
=
[
node
.
name
],
outputs
=
[
node
.
name
],
...
@@ -792,6 +796,8 @@ class OpSet9():
...
@@ -792,6 +796,8 @@ class OpSet9():
if
len
(
value
)
==
1
:
if
len
(
value
)
==
1
:
value
=
value
.
tolist
()
value
=
value
.
tolist
()
value
=
value
[
0
]
value
=
value
[
0
]
if
value
==
float
(
'inf'
)
or
value
==
float
(
'-inf'
):
value
=
string
(
value
)
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
"paddle.full"
,
"paddle.full"
,
inputs
=
{},
inputs
=
{},
...
@@ -1093,6 +1099,12 @@ class OpSet9():
...
@@ -1093,6 +1099,12 @@ class OpSet9():
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
starts
,
ends
,
axes
,
steps
=
None
,
None
,
None
,
None
starts
,
ends
,
axes
,
steps
=
None
,
None
,
None
,
None
layer_attrs
=
{}
layer_attrs
=
{}
if
val_x
.
dtype
==
'uint8'
:
self
.
paddle_graph
.
add_layer
(
'paddle.cast'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
val_x
.
name
],
dtype
=
string
(
'int32'
))
if
len
(
node
.
inputs
)
>
1
:
if
len
(
node
.
inputs
)
>
1
:
starts
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
starts
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
ends
=
self
.
graph
.
get_input_node
(
node
,
idx
=
2
,
copy
=
True
)
ends
=
self
.
graph
.
get_input_node
(
node
,
idx
=
2
,
copy
=
True
)
...
@@ -1121,8 +1133,9 @@ class OpSet9():
...
@@ -1121,8 +1133,9 @@ class OpSet9():
starts_value
=
starts_value
.
copy
()
starts_value
=
starts_value
.
copy
()
ends_value
=
ends_value
.
copy
()
ends_value
=
ends_value
.
copy
()
for
idx
in
range
(
len
(
ends_value
)):
for
idx
in
range
(
len
(
ends_value
)):
if
starts_value
[
idx
]
>=
val_x
.
out_shapes
[
0
][
axes
[
if
len
(
val_x
.
out_shapes
[
0
])
!=
0
and
starts_value
[
idx
]]
and
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
>
0
:
idx
]
>=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
and
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
>
0
:
starts_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
-
1
starts_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
-
1
ends_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
ends_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
elif
ends_value
[
idx
]
>
2
**
31
-
1
:
elif
ends_value
[
idx
]
>
2
**
31
-
1
:
...
@@ -1178,11 +1191,16 @@ class OpSet9():
...
@@ -1178,11 +1191,16 @@ class OpSet9():
inputs
=
{
"input"
:
val_x
.
name
},
inputs
=
{
"input"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
outputs
=
[
node
.
name
],
**
layer_attrs
)
**
layer_attrs
)
if
val_x
.
dtype
==
'uint8'
:
self
.
paddle_graph
.
add_layer
(
'paddle.cast'
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
dtype
=
string
(
'uint8'
))
@
print_mapping_info
@
print_mapping_info
def
ConstantOfShape
(
self
,
node
):
def
ConstantOfShape
(
self
,
node
):
val_shape
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_shape
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
value
=
node
.
get_attr
(
'value'
)
value
=
node
.
get_attr
(
'value'
)
dtype
=
value
.
dtype
dtype
=
value
.
dtype
...
@@ -1191,6 +1209,8 @@ class OpSet9():
...
@@ -1191,6 +1209,8 @@ class OpSet9():
'this is not supported'
)
'this is not supported'
)
if
len
(
value
)
==
1
:
if
len
(
value
)
==
1
:
value
=
value
[
0
]
value
=
value
[
0
]
if
value
==
float
(
'inf'
)
or
value
==
float
(
'-inf'
):
value
=
string
(
value
)
layer_attrs
=
{
'dtype'
:
string
(
dtype
),
'fill_value'
:
value
}
layer_attrs
=
{
'dtype'
:
string
(
dtype
),
'fill_value'
:
value
}
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
"paddle.full"
,
"paddle.full"
,
...
@@ -1550,20 +1570,37 @@ class OpSet9():
...
@@ -1550,20 +1570,37 @@ class OpSet9():
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
output_shape
=
val_x
.
out_shapes
[
0
]
output_shape
=
val_x
.
out_shapes
[
0
]
axis
=
node
.
get_attr
(
'axis'
,
1
)
axis
=
node
.
get_attr
(
'axis'
,
1
)
shape_list
=
[
1
,
1
]
if
axis
==
0
:
if
axis
==
0
:
for
s
in
output_shape
:
self
.
paddle_graph
.
add_layer
(
shape_list
[
1
]
*=
s
'paddle.reshape'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
1
,
-
1
])
else
:
else
:
for
s
in
output_shape
[:
axis
]:
if
len
(
output_shape
)
!=
0
:
shape_list
[
0
]
*=
s
shape_list
=
[
1
,
1
]
for
s
in
output_shape
[
axis
:]:
for
s
in
output_shape
[:
axis
]:
shape_list
[
1
]
*=
s
shape_list
[
0
]
*=
s
self
.
paddle_graph
.
add_layer
(
for
s
in
output_shape
[
axis
:]:
'paddle.reshape'
,
shape_list
[
1
]
*=
s
inputs
=
{
"x"
:
val_x
.
name
},
self
.
paddle_graph
.
add_layer
(
outputs
=
[
node
.
name
],
'paddle.reshape'
,
shape
=
shape_list
)
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
shape
=
shape_list
)
else
:
# flatten + reshape
self
.
paddle_graph
.
add_layer
(
"paddle.flatten"
,
inputs
=
{
"input"
:
val_x
.
name
},
outputs
=
[
val_x
.
name
+
"_flatten"
],
start_axis
=
[
0
],
stop_axis
=
[
axis
])
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
'x'
:
val_x
.
name
+
"_flatten"
},
outputs
=
[
node
.
name
],
shape
=
[
0
,
-
1
])
@
print_mapping_info
@
print_mapping_info
def
Gemm
(
self
,
node
):
def
Gemm
(
self
,
node
):
...
@@ -1790,7 +1827,11 @@ class OpSet9():
...
@@ -1790,7 +1827,11 @@ class OpSet9():
def
Squeeze
(
self
,
node
):
def
Squeeze
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
axes
=
node
.
get_attr
(
'axes'
)
axes
=
node
.
get_attr
(
'axes'
)
if
len
(
val_x
.
out_shapes
[
0
])
==
1
:
if
axes
is
None
:
axes_node
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
axes
=
_const_weight_or_none
(
axes_node
,
necessary
=
True
)
# deal with scalar(0D) tensor
if
len
(
val_x
.
out_shapes
[
0
])
<=
1
and
len
(
axes
)
==
1
and
axes
[
0
]
==
0
:
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
"paddle.cast"
,
"paddle.cast"
,
inputs
=
{
"x"
:
val_x
.
name
},
inputs
=
{
"x"
:
val_x
.
name
},
...
@@ -1829,69 +1870,25 @@ class OpSet9():
...
@@ -1829,69 +1870,25 @@ class OpSet9():
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_y
=
self
.
graph
.
get_input_node
(
node
,
idx
=
2
,
copy
=
True
)
val_y
=
self
.
graph
.
get_input_node
(
node
,
idx
=
2
,
copy
=
True
)
not_condition
=
condition
.
name
+
'_not'
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
"paddle.logical_not"
,
"paddle.where"
,
inputs
=
{
"x"
:
condition
.
name
},
inputs
=
{
outputs
=
[
not_condition
])
'condition'
:
condition
.
name
,
cast_not_condition
=
not_condition
+
'_cast'
'x'
:
val_x
.
name
,
self
.
paddle_graph
.
add_layer
(
'y'
:
val_y
.
name
"paddle.cast"
,
},
inputs
=
{
"x"
:
not_condition
},
outputs
=
[
cast_not_condition
],
dtype
=
string
(
val_x
.
dtype
))
cast_condition
=
condition
.
name
+
'_cast'
self
.
paddle_graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
condition
.
name
},
outputs
=
[
cast_condition
],
dtype
=
string
(
val_x
.
dtype
))
mul_val_x
=
val_x
.
name
+
'_mul'
self
.
paddle_graph
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
'x'
:
val_x
.
name
,
'y'
:
cast_condition
},
outputs
=
[
mul_val_x
])
mul_val_y
=
val_y
.
name
+
'_mul'
self
.
paddle_graph
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
'x'
:
val_y
.
name
,
'y'
:
cast_not_condition
},
outputs
=
[
mul_val_y
])
self
.
paddle_graph
.
add_layer
(
"paddle.add"
,
inputs
=
{
'x'
:
mul_val_x
,
'y'
:
mul_val_y
},
outputs
=
[
node
.
name
])
outputs
=
[
node
.
name
])
@
print_mapping_info
@
print_mapping_info
def
NonZero
(
self
,
node
):
def
NonZero
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x_dim
=
len
(
val_x
.
out_shapes
[
0
])
self
.
paddle_graph
.
add_layer
(
if
val_x_dim
==
1
:
"paddle.nonzero"
,
self
.
paddle_graph
.
add_layer
(
inputs
=
{
"x"
:
val_x
.
name
},
"paddle.nonzero"
,
outputs
=
[
val_x
.
name
],
inputs
=
{
"x"
:
val_x
.
name
},
as_tuple
=
True
)
outputs
=
[
val_x
.
name
])
self
.
paddle_graph
.
add_layer
(
self
.
paddle_graph
.
add_layer
(
"paddle.concat"
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
])
"paddle.transpose"
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
layer_name
],
perm
=
[
1
,
0
])
if
val_x_dim
>
1
:
self
.
paddle_graph
.
add_layer
(
"paddle.nonzero"
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
val_x
.
name
])
self
.
paddle_graph
.
add_layer
(
"paddle.split"
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
val_x
.
name
],
num_or_sections
=
1
,
axis
=
val_x_dim
)
self
.
paddle_graph
.
add_layer
(
"paddle.concat"
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
])
@
print_mapping_info
@
print_mapping_info
def
Identity
(
self
,
node
):
def
Identity
(
self
,
node
):
...
@@ -2565,27 +2562,42 @@ class OpSet9():
...
@@ -2565,27 +2562,42 @@ class OpSet9():
def
TopK
(
self
,
node
):
def
TopK
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_k
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_k
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
if
val_k
.
dtype
!=
"int32"
:
self
.
paddle_graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
val_k
.
name
},
outputs
=
[
val_k
.
name
],
dtype
=
string
(
'int32'
))
layer_attrs
=
dict
()
layer_attrs
=
dict
()
layer_attrs
[
"axis"
]
=
node
.
get_attr
(
'axis'
,
-
1
)
layer_attrs
[
"axis"
]
=
node
.
get_attr
(
'axis'
,
-
1
)
layer_attrs
[
"largest"
]
=
True
if
node
.
get_attr
(
'largest'
,
layer_attrs
[
"largest"
]
=
True
if
node
.
get_attr
(
'largest'
,
1
)
==
1
else
False
1
)
==
1
else
False
layer_attrs
[
"sorted"
]
=
True
if
node
.
get_attr
(
'sorted'
,
layer_attrs
[
"sorted"
]
=
True
if
node
.
get_attr
(
'sorted'
,
1
)
==
1
else
False
1
)
==
1
else
False
self
.
paddle_graph
.
add_layer
(
k
=
_const_weight_or_none
(
val_k
)
"paddle.topk"
,
if
isinstance
(
k
,
(
list
,
tuple
,
np
.
ndarray
)):
inputs
=
{
"x"
:
val_x
.
name
,
k
=
k
[
0
]
"k"
:
val_k
.
name
},
# If k can get the value directly, it is used as an attribute; otherwise it is used as an input tensor
outputs
=
[
if
k
is
not
None
:
"{}_p{}"
.
format
(
node
.
layer_name
,
0
),
layer_attrs
[
"k"
]
=
k
"{}_p{}"
.
format
(
node
.
layer_name
,
1
)
self
.
paddle_graph
.
add_layer
(
],
"paddle.topk"
,
**
layer_attrs
)
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
"{}_p{}"
.
format
(
node
.
layer_name
,
0
),
"{}_p{}"
.
format
(
node
.
layer_name
,
1
)
],
**
layer_attrs
)
else
:
if
val_k
.
dtype
!=
"int32"
:
self
.
paddle_graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
val_k
.
name
},
outputs
=
[
val_k
.
name
],
dtype
=
string
(
'int32'
))
self
.
paddle_graph
.
add_layer
(
"paddle.topk"
,
inputs
=
{
"x"
:
val_x
.
name
,
"k"
:
val_k
.
name
},
outputs
=
[
"{}_p{}"
.
format
(
node
.
layer_name
,
0
),
"{}_p{}"
.
format
(
node
.
layer_name
,
1
)
],
**
layer_attrs
)
@
print_mapping_info
@
print_mapping_info
def
LRN
(
self
,
node
):
def
LRN
(
self
,
node
):
...
...
x2paddle/op_mapper/prim2code.py
浏览文件 @
5aad32ff
...
@@ -612,7 +612,7 @@ def prim_shape_dim(layer,
...
@@ -612,7 +612,7 @@ def prim_shape_dim(layer,
forward_func
=
[],
forward_func
=
[],
layer_id
=
None
,
layer_id
=
None
,
different_attrs
=
None
):
different_attrs
=
None
):
line
=
"{} =
fluid.layers
.shape({})[{}]"
.
format
(
line
=
"{} =
paddle
.shape({})[{}]"
.
format
(
layer
.
outputs
[
0
],
layer
.
outputs
[
0
],
get_value
(
layer
,
"input"
,
different_attrs
),
get_value
(
layer
,
"input"
,
different_attrs
),
get_value
(
layer
,
"dim"
,
different_attrs
))
get_value
(
layer
,
"dim"
,
different_attrs
))
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
5aad32ff
...
@@ -1189,129 +1189,66 @@ def aten___contains__(mapper, graph, node):
...
@@ -1189,129 +1189,66 @@ def aten___contains__(mapper, graph, node):
def
aten_constant_pad_nd
(
mapper
,
graph
,
node
):
def
aten_constant_pad_nd
(
mapper
,
graph
,
node
):
"""
构造填充固定值的PaddleLayer。
"""
TorchScript
示例
:
TorchScript
Code
:
%58 : Tensor = aten::constant_pad_nd(%input1.24, %4876, %42)
%58 : Tensor = aten::constant_pad_nd(%input1.24, %4876, %42)
参数含义
:
Parameter meaning
:
%58 (Tensor):
输出,填充后的Tensor。
%58 (Tensor):
Output Tensor
%input1.24 (Tensor):
需要填充的Tensor。
%input1.24 (Tensor):
Input Tensor
%4876 (list):
填充大小。
%4876 (list):
pad
%42 (-):
填充值。
%42 (-):
value
"""
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"pad"
,
mapper
.
nn_name2id
)
op_name
=
name_generator
(
"pad"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
layer_attrs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
#
获取当前节点输出的
list
#
Output
list
current_outputs
=
[
output_name
]
current_outputs
=
[
output_name
]
#
处理输入0,即%input1.24
#
process Input Tensor
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
scope_name
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# process pad
# 处理输入1,即%4876
padding_attr
=
None
is_padding_tensor
=
False
if
inputs_name
[
1
]
in
mapper
.
attrs
:
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"padding"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
padding_attr
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
current_outputs
,
scope_name
)
layer_inputs
[
"pad"
]
=
inputs_name
[
1
]
layer_inputs
[
"pad"
]
=
inputs_name
[
1
]
is_padding_tensor
=
True
# process value
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
# 处理输入2,即%42
layer_attrs
[
"value"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
layer_attrs
[
"value"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
if
not
is_padding_tensor
:
if
padding_attr
is
not
None
:
graph
.
add_layer
(
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
"prim.shape"
,
kernel_name
=
"paddle.nn.functional.pad"
inputs
=
{
"input"
:
inputs_name
[
0
]},
if
len
(
padding_attr
)
==
2
:
outputs
=
[
inputs_name
[
0
]
+
"_shape"
],
layer_attrs
[
"pad"
]
=
[
0
,
0
,
0
,
0
,
0
,
0
]
+
padding_attr
scope_name
=
scope_name
)
elif
len
(
padding_attr
)
==
4
:
graph
.
add_layer
(
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
"prim.len"
,
layer_attrs
[
"pad"
]
=
[
0
,
0
,
0
,
0
]
+
padding_attr
inputs
=
{
"input"
:
inputs_name
[
0
]
+
"_shape"
},
elif
len
(
padding_attr
)
==
6
:
outputs
=
[
inputs_name
[
0
]
+
"_len"
],
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
scope_name
=
scope_name
)
layer_attrs
[
"pad"
]
=
[
0
,
0
]
+
padding_attr
else
:
def
add_pad_layers
(
kernel
,
dim
):
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
graph
.
add_layer
(
layer_attrs
[
"pad"
]
=
padding_attr
"prim.ne"
,
inputs
=
{
"x"
:
inputs_name
[
0
]
+
"_len"
},
outputs
=
[
inputs_name
[
0
]
+
"_cond"
],
scope_name
=
scope_name
,
y
=
dim
)
graph
.
add_layer
(
graph
.
add_layer
(
"prim.if"
,
{
'input'
:
inputs_name
[
0
]
+
"_cond"
},
kernel_name
,
outputs
=
[
inputs_name
[
0
]
+
"_if"
,
output_name
],
scope_name
=
scope_name
)
if_layer
=
graph
.
layers
[
list
(
graph
.
layers
.
keys
())[
-
1
]]
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
)
block
.
add_layer
(
"prim.sub"
,
inputs
=
{
"y"
:
inputs_name
[
0
]
+
"_len"
},
outputs
=
[
inputs_name
[
0
]
+
"_len0"
],
scope_name
=
scope_name
,
alpha
=
1.0
,
x
=
dim
)
block
.
add_layer
(
"prim.len2list"
,
inputs
=
{
"len"
:
inputs_name
[
0
]
+
"_len0"
},
outputs
=
[
inputs_name
[
0
]
+
"_list"
],
scope_name
=
scope_name
)
block
.
add_layer
(
"paddle.unsqueeze"
,
inputs
=
{
"x"
:
inputs_name
[
0
],
"axis"
:
inputs_name
[
0
]
+
"_list"
},
outputs
=
[
inputs_name
[
0
]
+
"_var"
],
scope_name
=
scope_name
)
block
.
add_layer
(
kernel
,
inputs
=
{
"input"
:
inputs_name
[
0
]
+
"_var"
},
outputs
=
copy
.
deepcopy
(
layer_outputs
),
scope_name
=
scope_name
,
**
layer_attrs
)
block
.
add_layer
(
"paddle.squeeze"
,
inputs
=
{
"x"
:
output_name
,
"axis"
:
inputs_name
[
0
]
+
"_list"
},
outputs
=
[
output_name
],
scope_name
=
scope_name
)
if_layer
.
add_block
(
block
)
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
block
.
add_layer
(
kernel
,
inputs
=
layer_inputs
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
outputs
=
[
output_name
]
,
scope_name
=
scope_name
,
scope_name
=
scope_name
,
**
layer_attrs
)
**
layer_attrs
)
if_layer
.
add_block
(
block
)
if_layer
.
inputs
[
"input-0"
]
=
inputs_name
[
0
]
if_layer
.
inputs
[
"input-1"
]
=
inputs_name
[
0
]
+
"_len"
if
not
is_padding_tensor
:
if
len
(
layer_attrs
[
"padding"
])
==
2
:
layer_outputs
[
0
]
=
layer_outputs
[
0
].
replace
(
"pad"
,
"pad1d"
)
add_pad_layers
(
"paddle.nn.Pad1D"
,
3
)
elif
len
(
layer_attrs
[
"padding"
])
==
4
:
layer_outputs
[
0
]
=
layer_outputs
[
0
].
replace
(
"pad"
,
"pad2d"
)
add_pad_layers
(
"paddle.nn.Pad2D"
,
4
)
elif
len
(
layer_attrs
[
"padding"
])
==
6
:
layer_outputs
[
0
]
=
layer_outputs
[
0
].
replace
(
"pad"
,
"pad3d"
)
add_pad_layers
(
"paddle.nn.Pad3D"
,
5
)
else
:
raise
Exception
(
"The lenght of padding list must be 2, 4 or 6!"
)
else
:
else
:
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
graph
.
add_layer
(
graph
.
add_layer
(
"custom_layer:Pad"
,
"custom_layer:Pad"
,
inputs
=
layer_inputs
,
inputs
=
layer_inputs
,
outputs
=
[
output_name
],
outputs
=
[
output_name
],
scope_name
=
scope_name
,
scope_name
=
scope_name
,
**
layer_attrs
)
**
layer_attrs
)
current_inputs
=
list
(
layer_inputs
.
values
())
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
...
@@ -6025,7 +5962,7 @@ def aten_upsample_bilinear2d(mapper, graph, node):
...
@@ -6025,7 +5962,7 @@ def aten_upsample_bilinear2d(mapper, graph, node):
inputs
=
{
"input"
:
inputs_name
[
1
]},
inputs
=
{
"input"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]
+
"_isinstance"
],
outputs
=
[
inputs_name
[
1
]
+
"_isinstance"
],
scope_name
=
scope_name
,
scope_name
=
scope_name
,
cls
=
"paddle.
fluid
.Variable"
)
cls
=
"paddle.
static
.Variable"
)
# TODO(syf): paddle.Variable
# TODO(syf): paddle.Variable
graph
.
add_layer
(
graph
.
add_layer
(
"prim.if"
,
{
"input"
:
inputs_name
[
1
]
+
"_isinstance"
},
"prim.if"
,
{
"input"
:
inputs_name
[
1
]
+
"_isinstance"
},
...
@@ -6065,6 +6002,66 @@ def aten_upsample_bilinear2d(mapper, graph, node):
...
@@ -6065,6 +6002,66 @@ def aten_upsample_bilinear2d(mapper, graph, node):
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
aten_upsample_trilinear3d
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%4997 : Tensor = aten::upsample_trilinear3d(%x.13, %4963, %5421, %4995)
Parameter meaning:
%4997 (Tensor): Output Tensor
%x.13 (Tensor): Input Tensor
%4963 (list): output_size
%5421 (bool): align_corners
%4995 (float): scale_factors
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# Output list
current_outputs
=
[
output_name
]
# process Input Tensor
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
current_inputs
=
list
(
layer_inputs
.
values
())
# process output_size
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"size"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"size"
]
=
inputs_name
[
1
]
current_inputs
.
append
(
inputs_name
[
1
])
# process align_corners
if
inputs_name
[
2
]
in
mapper
.
attrs
:
layer_attrs
[
"align_corners"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
,
scope_name
)
layer_inputs
[
"align_corners"
]
=
inputs_name
[
2
]
current_inputs
.
append
(
inputs_name
[
2
])
# process scale_factor
if
inputs_name
[
3
]
in
mapper
.
attrs
:
layer_attrs
[
"scale_factor"
]
=
mapper
.
attrs
[
inputs_name
[
3
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
3
],
inputs_name
[
3
],
current_outputs
,
scope_name
)
layer_inputs
[
"scale_factor"
]
=
inputs_name
[
3
]
current_inputs
.
append
(
inputs_name
[
3
])
layer_attrs
[
"align_mode"
]
=
0
layer_attrs
[
"mode"
]
=
string
(
"trilinear"
)
layer_attrs
[
"data_format"
]
=
string
(
"NCDHW"
)
graph
.
add_layer
(
"paddle.nn.functional.interpolate"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_upsample_nearest2d
(
mapper
,
graph
,
node
):
def
aten_upsample_nearest2d
(
mapper
,
graph
,
node
):
""" 构造使用nearest上采样的PaddleLayer。
""" 构造使用nearest上采样的PaddleLayer。
TorchScript示例:
TorchScript示例:
...
@@ -6103,7 +6100,7 @@ def aten_upsample_nearest2d(mapper, graph, node):
...
@@ -6103,7 +6100,7 @@ def aten_upsample_nearest2d(mapper, graph, node):
inputs
=
{
"input"
:
inputs_name
[
1
]},
inputs
=
{
"input"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]
+
"_isinstance"
],
outputs
=
[
inputs_name
[
1
]
+
"_isinstance"
],
scope_name
=
scope_name
,
scope_name
=
scope_name
,
cls
=
"paddle.
fluid
.Variable"
)
cls
=
"paddle.
static
.Variable"
)
# TODO(syf): paddle.Variable
# TODO(syf): paddle.Variable
graph
.
add_layer
(
graph
.
add_layer
(
"prim.if"
,
{
"input"
:
inputs_name
[
1
]
+
"_isinstance"
},
"prim.if"
,
{
"input"
:
inputs_name
[
1
]
+
"_isinstance"
},
...
...
x2paddle/op_mapper/pytorch2paddle/pytorch_custom_layer/instance_norm.py
浏览文件 @
5aad32ff
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
import
paddle
import
paddle
from
paddle.nn.functional
import
instance_norm
from
paddle.nn.functional
import
instance_norm
from
paddle.
fluid
.initializer
import
Constant
from
paddle.
nn
.initializer
import
Constant
class
InstanceNorm
(
paddle
.
nn
.
Layer
):
class
InstanceNorm
(
paddle
.
nn
.
Layer
):
...
...
x2paddle/optimizer/fusion/__init__.py
浏览文件 @
5aad32ff
...
@@ -40,3 +40,7 @@ from .trace_fc_fuser import TraceFcFuser
...
@@ -40,3 +40,7 @@ from .trace_fc_fuser import TraceFcFuser
from
.trace_fc_fuse_pass
import
TraceFcFusePass
from
.trace_fc_fuse_pass
import
TraceFcFusePass
from
.onnx_layernorm_fuser
import
LayerNormFuser
from
.onnx_layernorm_fuser
import
LayerNormFuser
from
.onnx_layernorm_fuse_pass
import
LayerNormFusePass
from
.onnx_layernorm_fuse_pass
import
LayerNormFusePass
from
.onnx_gelu_fuser
import
GeluFuser
from
.onnx_gelu_fuse_pass
import
GeluFusePass
from
.replace_div_to_scale
import
Div2Scale
from
.replace_div_to_scale_pass
import
Div2ScalePass
x2paddle/optimizer/fusion/interpolate_bilinear_fuser.py
浏览文件 @
5aad32ff
...
@@ -46,7 +46,7 @@ class InterpolateBilinearFuser(FuseBase):
...
@@ -46,7 +46,7 @@ class InterpolateBilinearFuser(FuseBase):
if x2271 :
if x2271 :
x2274 = x2197[0]
x2274 = x2197[0]
x2275 = x2197[1]
x2275 = x2197[1]
x2233_isinstance = isinstance(x2233, paddle.
fluid
.Variable)
x2233_isinstance = isinstance(x2233, paddle.
static
.Variable)
if x2233_isinstance :
if x2233_isinstance :
x2233 = x2233.numpy().tolist()
x2233 = x2233.numpy().tolist()
x2276 = paddle.nn.functional.interpolate(x=x2181, size=x2233, scale_factor=x2274, align_corners=False, align_mode=0, mode='bilinear')
x2276 = paddle.nn.functional.interpolate(x=x2181, size=x2233, scale_factor=x2274, align_corners=False, align_mode=0, mode='bilinear')
...
@@ -146,7 +146,7 @@ class InterpolateBilinearFuser(FuseBase):
...
@@ -146,7 +146,7 @@ class InterpolateBilinearFuser(FuseBase):
"prim.isinstance"
,
"prim.isinstance"
,
inputs
=
{
"input"
:
"interpolate-input-3"
},
inputs
=
{
"input"
:
"interpolate-input-3"
},
outputs
=
[
"interpolate-input-0_isinstance"
],
outputs
=
[
"interpolate-input-0_isinstance"
],
cls
=
"paddle.
fluid
.Variable"
)
cls
=
"paddle.
static
.Variable"
)
pattern_block_block
.
add_layer
(
pattern_block_block
.
add_layer
(
"prim.if"
,
{
"input"
:
"interpolate-input-0_isinstance"
},
"prim.if"
,
{
"input"
:
"interpolate-input-0_isinstance"
},
outputs
=
[
"interpolate-input-0_if1"
])
outputs
=
[
"interpolate-input-0_if1"
])
...
...
x2paddle/optimizer/fusion/onnx_gelu_fuse_pass.py
0 → 100644
浏览文件 @
5aad32ff
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.optimizer.pass_
import
Pass
from
x2paddle.optimizer.fusion
import
GeluFuser
from
x2paddle.optimizer.pass_manager
import
pass_register
@
pass_register
class
GeluFusePass
(
Pass
):
name
=
"onnx_gelu_fuse_pass"
def
__init__
(
self
):
Pass
.
__init__
(
self
)
def
apply
(
self
,
graph
):
fuser
=
GeluFuser
()
fuser
.
operate
(
graph
,
match_kind
=
"edge"
)
# register gelu pass
onnx_gelu_fuse_pass
=
GeluFusePass
()
x2paddle/optimizer/fusion/onnx_gelu_fuser.py
0 → 100644
浏览文件 @
5aad32ff
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
copy
import
numpy
as
np
from
collections
import
OrderedDict
from
x2paddle.optimizer.pattern_matcher
import
FuseBase
from
x2paddle.core.program
import
PaddleGraph
,
PaddleLayer
from
x2paddle.core.util
import
*
class
GeluFuser
(
FuseBase
):
def
__init__
(
self
):
super
(
GeluFuser
,
self
).
__init__
()
def
build_pattern
(
self
):
"""
code describe:
x2paddle_332 = paddle.full(dtype='float32', shape=[1], fill_value=1.4142135381698608)
x2paddle_335 = paddle.full(dtype='float32', shape=[1], fill_value=1.0)
x2paddle_338 = paddle.full(dtype='float32', shape=[1], fill_value=0.5)
x2paddle_333 = paddle.divide(x=x2paddle_331, y=x2paddle_332)
x2paddle_334 = paddle.erf(x=x2paddle_333)
x2paddle_336 = paddle.add(x=x2paddle_334, y=x2paddle_335)
x2paddle_337 = paddle.multiply(x=x2paddle_331, y=x2paddle_336)
x2paddle_339 = paddle.multiply(x=x2paddle_337, y=x2paddle_338)
"""
def
gen_name
(
id
):
return
"x"
+
str
(
id
)
self
.
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
0
)],
shape
=
[
1
],
fill_value
=
1.4142135381698608
)
self
.
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
1
)],
shape
=
[
1
],
fill_value
=
1.0
)
self
.
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
2
)],
shape
=
[
1
],
fill_value
=
0.5
)
self
.
pattern
.
add_layer
(
"paddle.divide"
,
inputs
=
{
"x"
:
"gelu-input-0"
,
"y"
:
gen_name
(
0
)},
outputs
=
[
gen_name
(
3
)])
self
.
pattern
.
add_layer
(
"paddle.erf"
,
inputs
=
{
"x"
:
gen_name
(
3
)},
outputs
=
[
gen_name
(
4
)])
self
.
pattern
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
4
),
"y"
:
gen_name
(
1
)},
outputs
=
[
gen_name
(
5
)])
self
.
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
"gelu-input-0"
,
"y"
:
gen_name
(
5
)},
outputs
=
[
gen_name
(
6
)])
self
.
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
gen_name
(
6
),
"y"
:
gen_name
(
2
)},
outputs
=
[
gen_name
(
7
)])
self
.
pattern
.
build
(
inputs
=
{
"input-0"
:
"gelu-input-0"
,
})
def
insert_new_layer
(
self
,
graph
,
parameters
,
matches
):
new_layer
,
new_layer_id
=
self
.
gen_new_layer
(
parameters
,
matches
)
graph
.
layers
[
new_layer_id
]
=
new_layer
matches
.
pop
(
new_layer_id
)
def
gen_new_layer
(
self
,
parameters
,
matches
):
layer_id_list
=
list
(
matches
.
keys
())
layer_id_list
.
sort
(
key
=
int
)
layer_inputs
=
list
()
layer_inputs_ids
=
list
()
fill_value_list
=
list
()
for
layer_id
,
layer
in
matches
.
items
():
if
layer
.
kernel
==
"paddle.divide"
:
layer_inputs
.
append
(
layer
.
inputs
[
"x"
])
layer_inputs_ids
.
append
(
layer_id
)
if
layer
.
kernel
==
"paddle.multiply"
:
output_name
=
layer
.
outputs
[
0
]
new_layer
=
PaddleLayer
(
layer_id_list
[
0
],
"paddle.nn.GELU"
,
inputs
=
{
"x"
:
layer_inputs
[
0
]},
outputs
=
[
output_name
],
approximate
=
False
)
return
new_layer
,
layer_inputs_ids
[
0
]
x2paddle/optimizer/fusion/replace_div_to_scale.py
0 → 100644
浏览文件 @
5aad32ff
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
copy
import
numpy
as
np
from
collections
import
OrderedDict
from
x2paddle.optimizer.pattern_matcher
import
FuseBase
from
x2paddle.core.program
import
PaddleGraph
,
PaddleLayer
from
x2paddle.core.util
import
*
class
Div2Scale
(
FuseBase
):
def
__init__
(
self
):
super
(
Div2Scale
,
self
).
__init__
()
def
build_pattern
(
self
):
"""
code describe:
x2paddle_296 = paddle.full(dtype='float32', shape=[1], fill_value=8.0)
x2paddle_293 = paddle.transpose(x=x2paddle_292, perm=[0, 2, 1, 3])
x2paddle_294 = paddle.transpose(x=x2paddle_260, perm=[0, 2, 3, 1])
x2paddle_295 = paddle.matmul(x=x2paddle_293, y=x2paddle_294)
x2paddle_297 = paddle.divide(x=x2paddle_295, y=x2paddle_296)
"""
def
gen_name
(
id
):
return
"x"
+
str
(
id
)
self
.
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
0
)],
shape
=
[
1
],
fill_value
=
8
)
self
.
pattern
.
add_layer
(
"paddle.transpose"
,
inputs
=
{
"x"
:
"div2scale-input-0"
},
outputs
=
[
gen_name
(
1
)],
perm
=
[
0
,
2
,
1
,
3
])
self
.
pattern
.
add_layer
(
"paddle.transpose"
,
inputs
=
{
"x"
:
"div2scale-input-1"
},
outputs
=
[
gen_name
(
2
)],
perm
=
[
0
,
2
,
1
,
3
])
self
.
pattern
.
add_layer
(
"paddle.matmul"
,
inputs
=
{
"x"
:
gen_name
(
1
),
"y"
:
gen_name
(
2
)},
outputs
=
[
gen_name
(
3
)])
self
.
pattern
.
add_layer
(
"paddle.divide"
,
inputs
=
{
"x"
:
gen_name
(
3
),
"y"
:
gen_name
(
0
)},
outputs
=
[
gen_name
(
4
)])
self
.
pattern
.
build
(
inputs
=
{
"input-0"
:
"div2scale-input-0"
,
"input-1"
:
"div2scale-input-1"
,
})
def
insert_new_layer
(
self
,
graph
,
parameters
,
matches
):
new_layer
,
new_layer_id
=
self
.
gen_new_layer
(
parameters
,
matches
)
graph
.
layers
[
new_layer_id
]
=
new_layer
matches_copy
=
copy
.
deepcopy
(
matches
)
for
layer_id
,
layer
in
matches_copy
.
items
():
if
layer
.
kernel
in
[
"paddle.transpose"
,
"paddle.matmul"
]:
matches
.
pop
(
layer_id
)
matches
.
pop
(
new_layer_id
)
def
gen_new_layer
(
self
,
parameters
,
matches
):
layer_id_list
=
list
(
matches
.
keys
())
layer_id_list
.
sort
(
key
=
int
)
layer_inputs
=
list
()
layer_inputs_ids
=
list
()
fill_value
=
0
for
layer_id
,
layer
in
matches
.
items
():
if
layer
.
kernel
==
"paddle.full"
:
fill_value
=
layer
.
attrs
[
"fill_value"
]
if
layer
.
kernel
==
"paddle.divide"
:
layer_inputs
.
append
(
layer
.
inputs
[
"x"
])
layer_inputs_ids
.
append
(
layer_id
)
output_name
=
layer
.
outputs
[
0
]
new_layer
=
PaddleLayer
(
layer_id_list
[
0
],
"paddle.scale"
,
inputs
=
{
"x"
:
layer_inputs
[
0
]},
outputs
=
[
output_name
],
scale
=
1
/
fill_value
)
return
new_layer
,
layer_inputs_ids
[
0
]
x2paddle/optimizer/fusion/replace_div_to_scale_pass.py
0 → 100644
浏览文件 @
5aad32ff
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.optimizer.pass_
import
Pass
from
x2paddle.optimizer.fusion
import
Div2Scale
from
x2paddle.optimizer.pass_manager
import
pass_register
@
pass_register
class
Div2ScalePass
(
Pass
):
name
=
"replace_div_to_scale_pass"
def
__init__
(
self
):
Pass
.
__init__
(
self
)
def
apply
(
self
,
graph
):
fuser
=
Div2Scale
()
fuser
.
operate
(
graph
,
match_kind
=
"edge"
)
# register huggingface div2scale pass
replace_div_to_scale_pass
=
Div2ScalePass
()
x2paddle/optimizer/fusion/trace_fc_fuser.py
浏览文件 @
5aad32ff
...
@@ -118,7 +118,9 @@ class TraceFcFuser(FuseBase):
...
@@ -118,7 +118,9 @@ class TraceFcFuser(FuseBase):
(
1
,
0
))
(
1
,
0
))
self
.
rm_params
.
add
(
weight_name
)
self
.
rm_params
.
add
(
weight_name
)
bias_numpy
=
parameters
[
bias_name
]
bias_numpy
=
parameters
[
bias_name
]
parameters
[
"{}.bias"
.
format
(
linear_name
)]
=
np
.
squeeze
(
bias_numpy
)
if
len
(
bias_numpy
.
shape
)
==
2
:
bias_numpy
=
np
.
squeeze
(
bias_numpy
)
parameters
[
"{}.bias"
.
format
(
linear_name
)]
=
bias_numpy
self
.
rm_params
.
add
(
bias_name
)
self
.
rm_params
.
add
(
bias_name
)
new_layer
=
PaddleLayer
(
new_layer
=
PaddleLayer
(
layers_id
[
0
],
layers_id
[
0
],
...
...
x2paddle/optimizer/optimizer.py
浏览文件 @
5aad32ff
...
@@ -37,7 +37,11 @@ class GraphOptimizer(object):
...
@@ -37,7 +37,11 @@ class GraphOptimizer(object):
"prelu_fuse_pass"
,
"transpose_eliminate_pass"
"prelu_fuse_pass"
,
"transpose_eliminate_pass"
]
]
elif
source_frame
==
"onnx"
:
elif
source_frame
==
"onnx"
:
self
.
passes
=
[
"onnx_layernorm_fuse_pass"
]
self
.
passes
=
[
"onnx_layernorm_fuse_pass"
,
"onnx_gelu_fuse_pass"
,
"replace_div_to_scale_pass"
,
]
else
:
else
:
self
.
passes
=
[]
self
.
passes
=
[]
...
@@ -54,7 +58,7 @@ class GraphOptimizer(object):
...
@@ -54,7 +58,7 @@ class GraphOptimizer(object):
before_len
=
len
(
graph
.
layers
)
before_len
=
len
(
graph
.
layers
)
pass_
.
apply
(
graph
)
pass_
.
apply
(
graph
)
after_len
=
len
(
graph
.
layers
)
after_len
=
len
(
graph
.
layers
)
if
after_len
<
before_len
:
if
after_len
<
=
before_len
:
show_pass_log
=
True
show_pass_log
=
True
if
before_len
==
after_len
:
if
before_len
==
after_len
:
break
break
...
...
x2paddle/utils.py
浏览文件 @
5aad32ff
...
@@ -103,15 +103,7 @@ class PaddleDtypes():
...
@@ -103,15 +103,7 @@ class PaddleDtypes():
self
.
t_int64
=
paddle
.
int64
self
.
t_int64
=
paddle
.
int64
self
.
t_bool
=
paddle
.
bool
self
.
t_bool
=
paddle
.
bool
else
:
else
:
self
.
t_float16
=
"paddle.fluid.core.VarDesc.VarType.FP16"
raise
Exception
(
"Paddle>=2.0.0 is required, Please update version!"
)
self
.
t_float32
=
"paddle.fluid.core.VarDesc.VarType.FP32"
self
.
t_float64
=
"paddle.fluid.core.VarDesc.VarType.FP64"
self
.
t_uint8
=
"paddle.fluid.core.VarDesc.VarType.UINT8"
self
.
t_int8
=
"paddle.fluid.core.VarDesc.VarType.INT8"
self
.
t_int16
=
"paddle.fluid.core.VarDesc.VarType.INT16"
self
.
t_int32
=
"paddle.fluid.core.VarDesc.VarType.INT32"
self
.
t_int64
=
"paddle.fluid.core.VarDesc.VarType.INT64"
self
.
t_bool
=
"paddle.fluid.core.VarDesc.VarType.BOOL"
is_new_version
=
check_version
()
is_new_version
=
check_version
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录