未验证 提交 71defaf0 编写于 作者: K KP 提交者: GitHub

Fix compatibility issue in machine translation models (#1542)

上级 c052dc67
......@@ -119,3 +119,7 @@ paddlehub >= 2.1.0
* 1.0.0
初始发布
* 1.0.1
修复模型初始化的兼容性问题
......@@ -12,13 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import os
from packaging.version import Version
from typing import List
import paddle
import paddle.nn as nn
from paddlehub.env import MODULE_HOME
from paddlehub.module.module import moduleinfo, serving
import paddlenlp
from paddlenlp.data import Pad, Vocab
from paddlenlp.transformers import InferTransformerModel, position_encoding_init
......@@ -27,7 +29,7 @@ from transformer_en_de.utils import MTTokenizer, post_process_seq
@moduleinfo(
name="transformer_en-de",
version="1.0.0",
version="1.0.1",
summary="",
author="PaddlePaddle",
author_email="",
......@@ -42,8 +44,6 @@ class MTTransformer(nn.Layer):
# Model config
model_config = {
# Number of sub-layers to be stacked in the encoder and decoder.
"n_layer": 6,
# Number of head used in multi-head attention.
"n_head": 8,
# The dimension for word embeddings, which is also the last dimension of
......@@ -59,6 +59,12 @@ class MTTransformer(nn.Layer):
'dropout': 0
}
# Number of sub-layers to be stacked in the encoder and decoder.
if Version(paddlenlp.__version__) <= Version('2.0.5'):
model_config.update({"n_layer": 6})
else:
model_config.update({"num_encoder_layers": 6, "num_decoder_layers": 6})
# Vocab config
vocab_config = {
# Used to pad vocab size to be multiple of pad_factor.
......
......@@ -117,3 +117,7 @@ paddlehub >= 2.1.0
* 1.0.0
初始发布
* 1.0.1
修复模型初始化的兼容性问题
......@@ -12,13 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import os
from packaging.version import Version
from typing import List
import paddle
import paddle.nn as nn
from paddlehub.env import MODULE_HOME
from paddlehub.module.module import moduleinfo, serving
import paddlenlp
from paddlenlp.data import Pad, Vocab
from paddlenlp.transformers import InferTransformerModel, position_encoding_init
......@@ -27,7 +29,7 @@ from transformer_zh_en.utils import MTTokenizer, post_process_seq
@moduleinfo(
name="transformer_zh-en",
version="1.0.0",
version="1.0.1",
summary="",
author="PaddlePaddle",
author_email="",
......@@ -42,8 +44,6 @@ class MTTransformer(nn.Layer):
# Model config
model_config = {
# Number of sub-layers to be stacked in the encoder and decoder.
"n_layer": 6,
# Number of head used in multi-head attention.
"n_head": 8,
# The dimension for word embeddings, which is also the last dimension of
......@@ -59,6 +59,12 @@ class MTTransformer(nn.Layer):
'dropout': 0
}
# Number of sub-layers to be stacked in the encoder and decoder.
if Version(paddlenlp.__version__) <= Version('2.0.5'):
model_config.update({"n_layer": 6})
else:
model_config.update({"num_encoder_layers": 6, "num_decoder_layers": 6})
# Vocab config
vocab_config = {
# Used to pad vocab size to be multiple of pad_factor.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册