未验证 提交 b53779df 编写于 作者: 骑马小猫 提交者: GitHub

community models: fix testing bug (#5678)

* update project

* update icon and keyword

* fix running bug

* fix conflict
上级 59568bbb
......@@ -53,7 +53,8 @@
"\n",
"model = AutoModel.from_pretrained(\"allenai/macaw-large\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -53,7 +53,8 @@
"\n",
"model = AutoModel.from_pretrained(\"allenai/macaw-large\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -44,9 +44,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertModel\n",
"\n",
"model = AutoModel.from_pretrained(\"allenai/specter\")\n",
"model = BertModel.from_pretrained(\"allenai/specter\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -44,9 +44,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertModel\n",
"\n",
"model = AutoModel.from_pretrained(\"allenai/specter\")\n",
"model = BertModel.from_pretrained(\"allenai/specter\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -50,9 +50,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertForSequenceClassification\n",
"\n",
"model = AutoModel.from_pretrained(\"cross-encoder/ms-marco-MiniLM-L-12-v2\")\n",
"model = BertForSequenceClassification.from_pretrained(\"cross-encoder/ms-marco-MiniLM-L-12-v2\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -50,9 +50,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertForSequenceClassification\n",
"\n",
"model = AutoModel.from_pretrained(\"cross-encoder/ms-marco-MiniLM-L-12-v2\")\n",
"model = BertForSequenceClassification.from_pretrained(\"cross-encoder/ms-marco-MiniLM-L-12-v2\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -50,9 +50,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertModel\n",
"\n",
"model = AutoModel.from_pretrained(\"cross-encoder/ms-marco-TinyBERT-L-2\")\n",
"model = BertModel.from_pretrained(\"cross-encoder/ms-marco-TinyBERT-L-2\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......@@ -103,7 +103,11 @@
]
}
],
"metadata": {},
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
......@@ -50,9 +50,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertModel\n",
"\n",
"model = AutoModel.from_pretrained(\"cross-encoder/ms-marco-TinyBERT-L-2\")\n",
"model = BertModel.from_pretrained(\"cross-encoder/ms-marco-TinyBERT-L-2\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......@@ -103,7 +103,11 @@
]
}
],
"metadata": {},
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
......@@ -61,9 +61,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import RobertaForSequenceClassification\n",
"\n",
"model = AutoModel.from_pretrained(\"cross-encoder/nli-MiniLM2-L6-H768\")\n",
"model = RobertaForSequenceClassification.from_pretrained(\"cross-encoder/nli-MiniLM2-L6-H768\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -61,9 +61,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import RobertaForSequenceClassification\n",
"\n",
"model = AutoModel.from_pretrained(\"cross-encoder/nli-MiniLM2-L6-H768\")\n",
"model = RobertaForSequenceClassification.from_pretrained(\"cross-encoder/nli-MiniLM2-L6-H768\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -156,9 +156,9 @@
],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertForSequenceClassification\n",
"\n",
"model = AutoModel.from_pretrained(\"cross-encoder/stsb-TinyBERT-L-4\")\n",
"model = BertForSequenceClassification.from_pretrained(\"cross-encoder/stsb-TinyBERT-L-4\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -52,9 +52,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertForSequenceClassification\n",
"\n",
"model = AutoModel.from_pretrained(\"cross-encoder/stsb-TinyBERT-L-4\")\n",
"model = BertForSequenceClassification.from_pretrained(\"cross-encoder/stsb-TinyBERT-L-4\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -50,9 +50,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertForMaskedLM\n",
"\n",
"model = AutoModel.from_pretrained(\"emilyalsentzer/Bio_ClinicalBERT\")\n",
"model = BertForMaskedLM.from_pretrained(\"emilyalsentzer/Bio_ClinicalBERT\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -50,9 +50,9 @@
"outputs": [],
"source": [
"import paddle\n",
"from paddlenlp.transformers import AutoModel\n",
"from paddlenlp.transformers import BertForMaskedLM\n",
"\n",
"model = AutoModel.from_pretrained(\"emilyalsentzer/Bio_ClinicalBERT\")\n",
"model = BertForMaskedLM.from_pretrained(\"emilyalsentzer/Bio_ClinicalBERT\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
]
......
......@@ -66,7 +66,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-base-lm-adapt\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -66,7 +66,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-base-lm-adapt\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -74,7 +74,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-large-lm-adapt\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -74,7 +74,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-large-lm-adapt\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -74,7 +74,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-small-lm-adapt\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -74,7 +74,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-small-lm-adapt\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -65,7 +65,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-v1_1-base\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -65,7 +65,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-v1_1-base\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -65,7 +65,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-v1_1-large\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -65,7 +65,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-v1_1-large\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -67,7 +67,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-v1_1-small\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
......@@ -67,7 +67,8 @@
"\n",
"model = AutoModel.from_pretrained(\"google/t5-v1_1-small\")\n",
"input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids))"
"decoder_input_ids = paddle.randint(100, 200, shape=[1, 20])\n",
"print(model(input_ids, decoder_input_ids=decoder_input_ids))"
]
},
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册