Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
d77bc64b
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d77bc64b
编写于
2月 23, 2022
作者:
W
wjj19950828
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
HF params convert
上级
7408ada6
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
116 addition
and
0 deletion
+116
-0
docs/inference_model_convertor/pytorch2paddle.md
docs/inference_model_convertor/pytorch2paddle.md
+2
-0
x2paddle/paddlenlp/__init__.py
x2paddle/paddlenlp/__init__.py
+0
-0
x2paddle/paddlenlp/utils.py
x2paddle/paddlenlp/utils.py
+114
-0
未找到文件。
docs/inference_model_convertor/pytorch2paddle.md
浏览文件 @
d77bc64b
...
@@ -16,6 +16,8 @@ treelib
...
@@ -16,6 +16,8 @@ treelib
```
python
```
python
from
x2paddle.convert
import
pytorch2paddle
from
x2paddle.convert
import
pytorch2paddle
torch_module
.
eval
()
pytorch2paddle
(
module
=
torch_module
,
pytorch2paddle
(
module
=
torch_module
,
save_dir
=
"./pd_model"
,
save_dir
=
"./pd_model"
,
jit_type
=
"trace"
,
jit_type
=
"trace"
,
...
...
x2paddle/paddlenlp/__init__.py
0 → 100644
浏览文件 @
d77bc64b
x2paddle/paddlenlp/utils.py
0 → 100644
浏览文件 @
d77bc64b
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
copy
import
io
import
json
import
os
import
six
import
inspect
from
collections
import
OrderedDict
import
torch
import
paddle
def
convert_weight_from_hf
(
weight_path
,
class_name
):
"""
Args:
weight_path (str): HF weight file path
class_name (str): The class name used by the user
Return:
paddle_state_dict (dict): PaddleNLP state_dict
"""
pytorch_state_dict
=
torch
.
load
(
weight_path
,
map_location
=
"cpu"
)
paddle_state_dict
=
OrderedDict
()
hf_to_paddle
=
{
"embeddings.LayerNorm"
:
"embeddings.layer_norm"
,
"encoder.layer"
:
"encoder.layers"
,
"attention.self.query"
:
"self_attn.q_proj"
,
"attention.self.key"
:
"self_attn.k_proj"
,
"attention.self.value"
:
"self_attn.v_proj"
,
"attention.output.dense"
:
"self_attn.out_proj"
,
"intermediate.dense"
:
"linear1"
,
"output.dense"
:
"linear2"
,
"attention.output.LayerNorm"
:
"norm1"
,
"output.LayerNorm"
:
"norm2"
,
"predictions.decoder."
:
"predictions.decoder_"
,
"predictions.transform.dense"
:
"predictions.transform"
,
"predictions.transform.LayerNorm"
:
"predictions.layer_norm"
,
}
for
k
,
v
in
pytorch_state_dict
.
items
():
if
k
[
-
7
:]
==
".weight"
:
if
".embeddings."
not
in
k
and
".LayerNorm."
not
in
k
:
if
v
.
ndim
==
2
:
v
=
v
.
transpose
(
0
,
1
)
for
hf_name
,
paddle_name
in
hf_to_paddle
.
items
():
k
=
k
.
replace
(
hf_name
,
paddle_name
)
if
"bert."
not
in
k
and
"cls."
not
in
k
and
"classifier"
not
in
k
:
k
=
"bert."
+
k
paddle_state_dict
[
k
]
=
paddle
.
to_tensor
(
v
.
data
.
numpy
())
return
paddle_state_dict
def
convert_config_from_hf
(
config_path
,
derived_parameters_dict
,
class_name
):
"""
Args:
config_path (str): HF config file path
derived_parameters_dict (dict): The parameter dict required by the init function to initialize
class_name (str): The class name used by the user
Return:
derived_config (dict): PaddleNLP config
"""
default_config
=
{
"vocab_size"
:
28996
,
"hidden_size"
:
768
,
"num_hidden_layers"
:
12
,
"num_attention_heads"
:
12
,
"intermediate_size"
:
3072
,
"hidden_act"
:
"gelu"
,
"hidden_dropout_prob"
:
0.1
,
"attention_probs_dropout_prob"
:
0.1
,
"max_position_embeddings"
:
512
,
"type_vocab_size"
:
2
,
"initializer_range"
:
0.02
,
"pad_token_id"
:
0
,
"init_class"
:
"BertModel"
}
with
io
.
open
(
config_path
,
encoding
=
"utf-8"
)
as
f
:
init_kwargs
=
json
.
load
(
f
)
base_config
=
default_config
for
k
,
v
in
init_kwargs
.
items
():
if
k
in
base_config
:
base_config
[
k
]
=
v
if
class_name
==
"BertModel"
:
return
base_config
else
:
derived_config
=
{
"init_args"
:
[
base_config
],
"init_class"
:
class_name
}
for
k
,
v
in
derived_parameters_dict
.
items
():
if
k
==
"self"
or
k
==
"bert"
:
continue
derived_config
[
k
]
=
v
.
default
for
k
,
v
in
init_kwargs
.
items
():
if
k
in
derived_config
:
derived_config
[
k
]
=
v
if
"id2label"
in
init_kwargs
:
if
"num_classes"
in
derived_config
:
derived_config
[
"num_classes"
]
=
len
(
init_kwargs
[
"id2label"
])
elif
"num_choices"
in
derived_config
:
derived_config
[
"num_choices"
]
=
len
(
init_kwargs
[
"id2label"
])
return
derived_config
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录