Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
d509569c
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d509569c
编写于
2月 03, 2021
作者:
J
Jason
提交者:
GitHub
2月 03, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #507 from SunAhong1993/develop
fix the caffe and onnx
上级
d9344dc8
4db8d282
变更
13
展开全部
显示空白变更内容
内联
并排
Showing
13 changed file
with
1281 addition
and
846 deletion
+1281
-846
x2paddle/decoder/caffe.proto
x2paddle/decoder/caffe.proto
+5
-0
x2paddle/decoder/caffe_decoder.py
x2paddle/decoder/caffe_decoder.py
+1
-1
x2paddle/decoder/caffe_pb2.py
x2paddle/decoder/caffe_pb2.py
+1095
-796
x2paddle/decoder/caffe_shape_inference.py
x2paddle/decoder/caffe_shape_inference.py
+10
-1
x2paddle/op_mapper/dygraph/caffe2paddle/caffe_custom_layer/normalize.py
...pper/dygraph/caffe2paddle/caffe_custom_layer/normalize.py
+13
-9
x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py
x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py
+44
-14
x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py
x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py
+2
-2
x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py
x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py
+50
-0
x2paddle/op_mapper/dygraph/pytorch2paddle/pytorch_op_mapper.py
...dle/op_mapper/dygraph/pytorch2paddle/pytorch_op_mapper.py
+2
-1
x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/normalize.py
...apper/static/caffe2paddle/caffe_custom_layer/normalize.py
+14
-5
x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py
x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py
+42
-13
x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py
x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py
+1
-2
x2paddle/optimizer/pytorch_code_optimizer/layer_code_generator.py
.../optimizer/pytorch_code_optimizer/layer_code_generator.py
+2
-2
未找到文件。
x2paddle/decoder/caffe.proto
浏览文件 @
d509569c
...
...
@@ -1378,6 +1378,11 @@ message PoolingParameter {
// If global_pooling then it will pool over the size of the bottom by doing
// kernel_h = bottom->height and kernel_w = bottom->width
optional
bool
global_pooling
=
12
[
default
=
false
];
enum
RoundMode
{
CEIL
=
0
;
FLOOR
=
1
;
}
optional
RoundMode
round_mode
=
13
[
default
=
CEIL
];
}
message
PowerParameter
{
...
...
x2paddle/decoder/caffe_decoder.py
浏览文件 @
d509569c
...
...
@@ -258,7 +258,7 @@ class CaffeGraph(Graph):
assert
input_node_name
in
self
.
node_map
,
'The {} isn
\'
t a valid node'
.
format
(
name
)
input_node
=
self
.
node_map
[
input_node_name
]
if
len
(
input_node
.
layer
.
top
)
>
1
and
input_node
.
layer_type
!=
"Input"
:
if
len
(
input_node
.
layer
.
top
)
>
1
and
input_node
.
layer_type
not
in
[
"Input"
,
"MemoryData"
]
:
need_idx
=
list
(
input_node
.
layer
.
top
).
index
(
node
.
layer
.
bottom
[
idx
])
name
=
input_node_name
+
':'
+
str
(
need_idx
)
else
:
...
...
x2paddle/decoder/caffe_pb2.py
浏览文件 @
d509569c
此差异已折叠。
点击以展开。
x2paddle/decoder/caffe_shape_inference.py
浏览文件 @
d509569c
...
...
@@ -115,6 +115,12 @@ def shape_pooling(layer, input_shape):
method
=
math
.
ceil
else
:
method
=
math
.
floor
if
not
hasattr
(
params
,
'ceil_mode'
):
round_mode
=
getattr
(
params
,
'round_mode'
,
0
)
if
round_mode
==
1
:
method
=
math
.
floor
else
:
method
=
math
.
ceil
return
get_strided_kernel_output_shape
(
params
,
input_shape
[
0
],
method
)
...
...
@@ -240,7 +246,9 @@ def shape_reshape(layer, input_shape):
params
=
layer
.
reshape_param
axis
=
params
.
axis
if
hasattr
(
params
,
'axis'
)
else
0
num_axes
=
params
.
num_axes
if
hasattr
(
params
,
'num_axes'
)
else
-
1
is_unknow_batch
=
False
if
inshape
[
0
]
==
-
1
:
is_unknow_batch
=
True
inshape
[
0
]
=
1
input_count
=
count
(
inshape
)
...
...
@@ -310,6 +318,7 @@ def shape_reshape(layer, input_shape):
output_count
=
count
(
output_shape
)
assert
output_count
==
input_count
,
"[Reshape]output count[%d] must match input count[%d]"
%
(
output_count
,
input_count
)
if
is_unknow_batch
:
output_shape
[
0
]
=
-
1
return
[
output_shape
]
...
...
x2paddle/op_mapper/dygraph/caffe2paddle/caffe_custom_layer/normalize.py
浏览文件 @
d509569c
...
...
@@ -16,17 +16,21 @@ import paddle
import
paddle.fluid
as
fluid
class
Normalize
(
object
):
def
__init__
(
self
,
axis
,
param_name
,
param_shape
):
def
__init__
(
self
,
axis
):
self
.
axis
=
axis
self
.
param_name
=
param_name
self
.
param_shape
=
param_shape
def
__call__
(
self
,
x
):
l2
=
fluid
.
layers
.
prior_box
(
x
=
x
,
p
=
2
,
axis
=
1
)
attr
=
fluid
.
ParamAttr
(
name
=
self
.
param_name
,
trainable
=
False
)
param
=
paddle
.
nn
.
Layer
.
create_parameter
(
shape
=
self
.
param_shape
,
attr
=
atr
)
out
=
paddle
.
multiply
(
x
=
l2
,
y
=
param
,
axis
=
self
.
axis
)
def
__call__
(
self
,
x
,
param
):
l2_norm
=
fluid
.
layers
.
l2_normalize
(
x
=
x
,
axis
=
1
)
param
=
paddle
.
reshape
(
param
,
[
param
.
shape
[
-
1
]])
perm
=
list
(
range
(
len
(
l2_norm
.
shape
)))
perm
.
pop
(
self
.
axis
)
perm
=
perm
+
[
self
.
axis
]
l2_norm
=
paddle
.
transpose
(
l2_norm
,
perm
=
perm
)
out
=
paddle
.
multiply
(
x
=
l2_norm
,
y
=
param
)
perm
=
list
(
range
(
len
(
l2_norm
.
shape
)))
dim
=
perm
.
pop
(
-
1
)
perm
.
insert
(
self
.
axis
,
dim
)
out
=
paddle
.
transpose
(
out
,
perm
=
perm
)
return
out
\ No newline at end of file
x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py
浏览文件 @
d509569c
...
...
@@ -199,6 +199,26 @@ class CaffeOpMapper(OpMapper):
self
.
inputs_info
[
"x{}"
.
format
(
self
.
input_index
)]
=
[[
-
1
]
+
shape
,
"float32"
]
self
.
input_index
+=
1
def
MemoryData
(
self
,
node
):
params
=
node
.
layer
.
memory_data_param
transform_params
=
node
.
layer
.
transform_param
self
.
paddle_graph
.
add_layer
(
"paddle.to_tensor"
,
inputs
=
{},
outputs
=
[
node
.
layer_name
],
data
=
"x{}"
.
format
(
self
.
input_index
))
shape
=
list
()
shape
.
append
(
params
.
batch_size
)
shape
.
append
(
params
.
channels
)
if
hasattr
(
transform_params
,
"crop_size"
):
shape
.
append
(
transform_params
.
crop_size
)
shape
.
append
(
transform_params
.
crop_size
)
else
:
shape
.
append
(
params
.
width
)
shape
.
append
(
params
.
height
)
self
.
inputs_info
[
"x{}"
.
format
(
self
.
input_index
)]
=
[
shape
,
"float32"
]
self
.
input_index
+=
1
def
Convolution
(
self
,
node
):
conv2d_name
=
name_generator
(
"conv"
,
self
.
nn_name2id
)
output_name
=
node
.
layer_name
...
...
@@ -338,7 +358,9 @@ class CaffeOpMapper(OpMapper):
output_name
=
node
.
layer_name
layer_outputs
=
[
pool2d_name
,
output_name
]
params
=
node
.
layer
.
pooling_param
ceil_mode
=
getattr
(
params
,
"ceil_mod"
,
True
)
ceil_mode
=
getattr
(
params
,
"ceil_mode"
,
True
)
if
not
hasattr
(
params
,
'ceil_mode'
):
ceil_mode
=
True
if
getattr
(
params
,
"round_mode"
,
0
)
==
0
else
False
global_pool
=
getattr
(
params
,
"global_pooling"
,
False
)
kernel_default
=
[
1
,
1
]
channel
,
kernel
,
stride
,
pad
,
dilation
,
group
=
_get_kernel_parameters
(
...
...
@@ -615,7 +637,7 @@ class CaffeOpMapper(OpMapper):
"paddle.scale"
,
inputs
=
{
"x"
:
input1_name
},
outputs
=
[
node
.
layer_name
+
'_mul1'
],
scale
=
coeff
[
2
])
scale
=
coeff
[
1
])
inputs_dict
=
{}
inputs_dict
[
'x'
]
=
node
.
layer_name
+
'_mul0'
inputs_dict
[
'y'
]
=
node
.
layer_name
+
'_mul1'
...
...
@@ -782,7 +804,7 @@ class CaffeOpMapper(OpMapper):
out_max_val
=
params
.
out_max_val
if
hasattr
(
params
,
out_max_val
)
else
False
top_k
=
params
.
top_k
if
hasattr
(
params
,
top_k
)
else
1
axis
=
par
ma
s
.
axis
if
hasattr
(
params
,
axis
)
else
-
1
axis
=
par
am
s
.
axis
if
hasattr
(
params
,
axis
)
else
-
1
if
axis
<
0
:
axis
+=
len
(
input_shape
)
if
out_max_val
is
True
:
...
...
@@ -952,12 +974,12 @@ class CaffeOpMapper(OpMapper):
# operation = MEAN
else
:
layer_attrs
=
{
"
dim
"
:
dim
[
axis
:],
"keep
_
dim"
:
False
,
"
axis
"
:
dim
[
axis
:],
"keepdim"
:
False
,
}
self
.
paddle_graph
.
add_layer
(
"paddle.mean"
,
inputs
=
{
"
input
"
:
input
.
name
},
inputs
=
{
"
x
"
:
input
.
name
},
outputs
=
[
node
.
layer_name
],
**
layer_attrs
)
self
.
paddle_graph
.
add_layer
(
...
...
@@ -1018,22 +1040,30 @@ class CaffeOpMapper(OpMapper):
node
.
inputs
)
==
1
,
"The count of Normalize node
\'
s input is not 1."
input
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
params
=
node
.
layer
.
norm_param
param_name
=
node
.
layer_name
+
"_scale"
if
node
.
data
is
None
or
len
(
node
.
data
)
!=
1
:
print
(
"The parameter of {} (type is {}) is not set. So we set the parameters as 0"
.
format
(
node
.
layer_name
,
node
.
layer_type
))
self
.
par
mas
[
node
.
layer_name
+
".scale"
]
=
\
np
.
zeros
([
1
]
if
params
.
channel_shared
else
[
1
,
1
,
1
,
node
.
in_shapes
[
0
][
1
]]).
astype
(
"float32"
)
self
.
par
ams
[
param_name
]
=
\
np
.
zeros
([
1
]
if
params
.
channel_shared
else
[
node
.
in_shapes
[
0
][
1
]]).
astype
(
"float32"
)
else
:
self
.
parmas
[
node
.
layer_name
+
".scale"
]
=
_adjust_parameters
(
node
)[
0
]
self
.
params
[
param_name
]
=
_adjust_parameters
(
node
)[
0
]
self
.
paddle_graph
.
add_layer
(
"self.create_parameter"
,
inputs
=
{},
outputs
=
[
param_name
],
shape
=
self
.
params
[
param_name
].
shape
,
attr
=
string
(
param_name
))
inputs_dict
=
{}
layer_attrs
=
{
"axis"
:
-
1
if
params
.
channel_shared
else
1
,
"param_name"
:
node
.
layer_name
+
".scale"
,
"param_shape"
:
self
.
parmas
[
node
.
layer_name
+
".scale"
].
shape
}
self
.
pd_pdgraph
.
add_layer
(
"axis"
:
-
1
if
params
.
channel_shared
else
1
}
self
.
paddle_graph
.
add_layer
(
"custom_layer:Normalize"
,
inputs
=
{
"x"
:
input
.
name
},
inputs
=
{
"x"
:
input
.
name
,
"param"
:
param_name
},
outputs
=
layer_outputs
,
**
layer_attrs
)
...
...
x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py
浏览文件 @
d509569c
...
...
@@ -954,12 +954,12 @@ class OpSet9():
starts_value
=
starts_value
.
copy
()
ends_value
=
ends_value
.
copy
()
for
idx
in
range
(
len
(
ends_value
)):
if
starts_value
[
idx
]
>=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]:
if
starts_value
[
idx
]
>=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
and
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
>
0
:
starts_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
-
1
ends_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
starts_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
-
1
elif
ends_value
[
idx
]
>
2
**
31
-
1
:
ends_value
[
idx
]
=
2
**
31
-
1
layer_attrs
=
{
"axes"
:
axes
,
"starts"
:
starts_value
,
...
...
x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py
浏览文件 @
d509569c
...
...
@@ -3976,6 +3976,56 @@ def aten_softplus(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_split_with_sizes
(
mapper
,
graph
,
node
):
""" 构构造split的PaddleLayer。
TorchScript示例:
%1450 : Tensor[] = aten::split_with_sizes(%1446, %1750, %41)
参数含义:
%1450 (Tensor): 输出,split后的Tensor。
%1446 (Tensor): 需要获取split的Tensor。
%1750 (list): 子Tensor的数量列表。
%41 (int): 需要分割的维度。
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%1446
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%1750
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"num_or_sections"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"num_or_sections"
]
=
inputs_name
[
1
]
current_inputs
.
append
(
inputs_name
[
1
])
# 处理输入2,即%135
if
inputs_name
[
2
]
in
mapper
.
attrs
:
layer_attrs
[
"axis"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
,
scope_name
)
layer_inputs
[
"axis"
]
=
inputs_name
[
2
]
current_inputs
.
append
(
inputs_name
[
2
])
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.split"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_sqrt
(
mapper
,
graph
,
node
):
""" 构构造sqrt的PaddleLayer。
...
...
x2paddle/op_mapper/dygraph/pytorch2paddle/pytorch_op_mapper.py
浏览文件 @
d509569c
...
...
@@ -137,7 +137,8 @@ class PyTorchOpMapper(OpMapper):
graph
.
outputs
=
inputs_name
# 更新split参数
for
layer
in
graph
.
layers
.
values
():
if
layer
.
kernel
==
"paddle.split"
and
"num_or_sections"
in
layer
.
attrs
:
if
layer
.
kernel
==
"paddle.split"
and
"num_or_sections"
in
layer
.
attrs
\
and
not
isinstance
(
layer
.
attrs
[
"num_or_sections"
],
int
)
and
len
(
set
(
layer
.
attrs
[
"num_or_sections"
]))
==
1
:
layer
.
attrs
[
"num_or_sections"
]
=
self
.
split_len
[
layer
.
outputs
[
0
]]
return
graph
,
graph_inputs
...
...
x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/normalize.py
浏览文件 @
d509569c
...
...
@@ -13,12 +13,21 @@
# limitations under the License.
import
paddle
import
paddle.fluid
as
fluid
def
normalize
(
x
,
axis
,
param_name
,
param_shape
,
param_dtype
):
l2
=
fluid
.
layers
.
prior_box
(
x
=
x
,
p
=
2
,
axis
=
1
)
l2
_norm
=
paddle
.
fluid
.
layers
.
l2_normalize
(
x
=
x
,
axis
=
1
)
param
=
paddle
.
static
.
nn
.
create_parameter
(
shape
=
param_shape
,
dtype
=
string
(
param_dtype
),
name
=
string
(
param_name
))
out
=
paddle
.
multiply
(
x
=
l2
,
y
=
param
,
axis
=
axis
)
dtype
=
param_dtype
,
name
=
param_name
)
param
=
paddle
.
reshape
(
param
,
[
param
.
shape
[
-
1
]])
perm
=
list
(
range
(
len
(
l2_norm
.
shape
)))
perm
.
pop
(
axis
)
perm
=
perm
+
[
axis
]
l2_norm
=
paddle
.
transpose
(
l2_norm
,
perm
=
perm
)
out
=
paddle
.
multiply
(
x
=
l2_norm
,
y
=
param
)
perm
=
list
(
range
(
len
(
l2_norm
.
shape
)))
dim
=
perm
.
pop
(
-
1
)
perm
.
insert
(
axis
,
dim
)
out
=
paddle
.
transpose
(
out
,
perm
=
perm
)
return
out
\ No newline at end of file
x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py
浏览文件 @
d509569c
...
...
@@ -124,6 +124,8 @@ class CaffeOpMapper(OpMapper):
def
__init__
(
self
,
decoder
):
super
(
CaffeOpMapper
,
self
).
__init__
()
self
.
graph
=
decoder
.
caffe_graph
if
not
self
.
op_checker
():
raise
Exception
(
"Model is not supported yet."
)
self
.
params
=
dict
()
resolver
=
decoder
.
resolver
self
.
used_custom_layers
=
{}
...
...
@@ -191,6 +193,31 @@ class CaffeOpMapper(OpMapper):
outputs
=
[
node
.
name
],
**
layer_attrs
)
def
MemoryData
(
self
,
node
):
params
=
node
.
layer
.
memory_data_param
transform_params
=
node
.
layer
.
transform_param
shape
=
list
()
shape
.
append
(
params
.
batch_size
)
shape
.
append
(
params
.
channels
)
if
hasattr
(
transform_params
,
"crop_size"
):
shape
.
append
(
transform_params
.
crop_size
)
shape
.
append
(
transform_params
.
crop_size
)
else
:
shape
.
append
(
params
.
width
)
shape
.
append
(
params
.
height
)
dtype
=
'float32'
layer_attrs
=
{
"dtype"
:
string
(
dtype
),
"shape"
:
shape
,
"name"
:
string
(
node
.
name
)
}
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.static.data"
,
inputs
=
{},
outputs
=
[
node
.
name
],
**
layer_attrs
)
def
Convolution
(
self
,
node
):
data
=
node
.
data
params
=
node
.
layer
.
convolution_param
...
...
@@ -368,6 +395,8 @@ class CaffeOpMapper(OpMapper):
def
Pooling
(
self
,
node
):
params
=
node
.
layer
.
pooling_param
ceil_mode
=
getattr
(
params
,
'ceil_mode'
,
True
)
if
not
hasattr
(
params
,
'ceil_mode'
):
ceil_mode
=
True
if
getattr
(
params
,
"round_mode"
,
0
)
==
0
else
False
global_pool
=
getattr
(
params
,
'global_pooling'
,
False
)
kernel_default
=
[
1
,
1
]
channel
,
kernel
,
stride
,
pad
,
dilation
,
group
=
_get_kernel_parameters
(
...
...
@@ -652,7 +681,7 @@ class CaffeOpMapper(OpMapper):
"paddle.scale"
,
inputs
=
{
"x"
:
input1_name
},
outputs
=
[
node
.
name
+
'_mul1'
],
scale
=
coeff
[
2
])
scale
=
coeff
[
1
])
inputs_dict
=
{}
inputs_dict
[
'x'
]
=
node
.
name
+
'_mul0'
inputs_dict
[
'y'
]
=
node
.
name
+
'_mul1'
...
...
@@ -855,7 +884,7 @@ class CaffeOpMapper(OpMapper):
out_max_val
=
params
.
out_max_val
if
hasattr
(
params
,
out_max_val
)
else
False
top_k
=
params
.
top_k
if
hasattr
(
params
,
top_k
)
else
1
axis
=
par
ma
s
.
axis
if
hasattr
(
params
,
axis
)
else
-
1
axis
=
par
am
s
.
axis
if
hasattr
(
params
,
axis
)
else
-
1
if
axis
<
0
:
axis
+=
len
(
in_shapes
)
if
out_max_val
is
True
:
...
...
@@ -997,12 +1026,12 @@ class CaffeOpMapper(OpMapper):
# operation = MEAN
else
:
layer_attrs
=
{
"
dim
"
:
dim
[
axis
:],
"keep
_
dim"
:
False
,
"
axis
"
:
dim
[
axis
:],
"keepdim"
:
False
,
}
self
.
paddle_graph
.
add_layer
(
"paddle.mean"
,
inputs
=
{
"
input
"
:
input
.
name
},
inputs
=
{
"
x
"
:
input
.
name
},
outputs
=
[
node
.
name
],
**
layer_attrs
)
self
.
paddle_graph
.
add_layer
(
...
...
@@ -1090,17 +1119,17 @@ class CaffeOpMapper(OpMapper):
print
(
"The parameter of {} (type is {}) is not set. So we set the parameters as 0"
.
format
(
scale_name
,
node
.
layer_type
))
self
.
par
ma
s
[
scale_name
]
=
\
np
.
zeros
([
1
]
if
params
.
channel_shared
else
[
1
,
1
,
1
,
node
.
in_shapes
[
0
][
1
]]).
astype
(
"float32"
)
self
.
par
am
s
[
scale_name
]
=
\
np
.
zeros
([
1
]
if
params
.
channel_shared
else
[
node
.
in_shapes
[
0
][
1
]]).
astype
(
"float32"
)
else
:
self
.
par
ma
s
[
scale_name
]
=
_adjust_parameters
(
node
)[
0
]
self
.
par
am
s
[
scale_name
]
=
_adjust_parameters
(
node
)[
0
]
layer_attrs
=
{
"axis"
:
-
1
if
params
.
channel_shared
else
1
,
"param_name"
:
s
cale_name
,
"param_shape"
:
self
.
par
ma
s
[
scale_name
].
shape
,
"param_dtype"
:
str
(
self
.
parma
s
[
scale_name
].
dtype
)}
self
.
p
d_pd
graph
.
add_layer
(
"param_name"
:
s
tring
(
scale_name
)
,
"param_shape"
:
self
.
par
am
s
[
scale_name
].
shape
,
"param_dtype"
:
str
ing
(
self
.
param
s
[
scale_name
].
dtype
)}
self
.
p
addle_
graph
.
add_layer
(
"custom_layer:normalize"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
...
...
x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py
浏览文件 @
d509569c
...
...
@@ -918,10 +918,9 @@ class OpSet9():
# ends_value[idx] = 2**31 - 1
#print(val_x.out_shapes)
for
idx
in
range
(
len
(
ends_value
)):
if
starts_value
[
idx
]
>=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]:
if
starts_value
[
idx
]
>=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
and
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
>
0
:
starts_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
-
1
ends_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
starts_value
[
idx
]
=
val_x
.
out_shapes
[
0
][
axes
[
idx
]]
-
1
elif
ends_value
[
idx
]
>
2
**
31
-
1
:
ends_value
[
idx
]
=
2
**
31
-
1
layer_attrs
=
{
...
...
x2paddle/optimizer/pytorch_code_optimizer/layer_code_generator.py
浏览文件 @
d509569c
...
...
@@ -32,8 +32,8 @@ NN_KERNEL_NAME = {"paddle.nn.BatchNorm": "bn",
"paddle.nn.Softmax"
:
"softmax"
,
"paddle.nn.Softplus"
:
"softplus"
,
"paddle.nn.Tanh"
:
"tanh"
,
"paddle.nn.AvgPool2D"
:
"pool"
,
"paddle.nn.MaxPool2D"
:
"pool"
,
"paddle.nn.AvgPool2D"
:
"
avg
pool"
,
"paddle.nn.MaxPool2D"
:
"
max
pool"
,
"paddle.nn.Pad1D"
:
"pad"
,
"paddle.nn.Pad2D"
:
"pad"
,
"paddle.nn.Pad3D"
:
"pad"
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录