提交 a773fe88 编写于 作者: M MRXLT

fix imdb reader

上级 88cbb37a
......@@ -16,7 +16,7 @@
import sys
import time
import requests
from imdb_reader import IMDBDataset
from paddle_serving_app import IMDBDataset
from paddle_serving_client import Client
from paddle_serving_client.utils import MultiThreadRunner
from paddle_serving_client.utils import benchmark_args
......
......@@ -13,7 +13,7 @@
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client
from paddle_serving_app import IMDBReader
from paddle_serving_app import IMDBDataset
import sys
client = Client()
......@@ -23,7 +23,7 @@ client.connect(["127.0.0.1:9292"])
# you can define any english sentence or dataset here
# This example reuses imdb reader in training, you
# can define your own data preprocessing easily.
imdb_dataset = IMDBReader()
imdb_dataset = IMDDataset()
imdb_dataset.load_resource(sys.argv[2])
for line in sys.stdin:
......
......@@ -14,7 +14,7 @@
# pylint: disable=doc-string-missing
from paddle_serving_client import Client
from imdb_reader import IMDBDataset
from paddle_serving_app import IMDBDataset
client = Client()
# If you have more than one model, make sure that the input
......
......@@ -14,7 +14,7 @@
# pylint: disable=doc-string-missing
from paddle_serving_server.web_service import WebService
from paddle_serving_app import IMDBReader
from paddle_serving_app import IMDBDataset
import sys
......@@ -22,7 +22,7 @@ class IMDBService(WebService):
def prepare_dict(self, args={}):
if len(args) == 0:
exit(-1)
self.dataset = IMDBReader()
self.dataset = IMDBDataset()
self.dataset.load_resource(args["dict_file_path"])
def preprocess(self, feed={}, fetch=[]):
......
......@@ -15,6 +15,6 @@ from .reader.chinese_bert_reader import ChineseBertReader
from .reader.image_reader import ImageReader, File2Image, URL2Image, Sequential, Normalize, CenterCrop, Resize
from .reader.lac_reader import LACReader
from .reader.senta_reader import SentaReader
from .reader.imdb_reader import IMDBReader
from .reader.imdb_reader import IMDBDataset
from .models import ServingModels
from .local_predict import Debugger
......@@ -22,7 +22,7 @@ import paddle.fluid.incubate.data_generator as dg
py_version = sys.version_info[0]
class IMDBReader(dg.MultiSlotDataGenerator):
class IMDBDataset(dg.MultiSlotDataGenerator):
def load_resource(self, dictfile):
self._vocab = {}
wid = 0
......@@ -87,6 +87,6 @@ class IMDBReader(dg.MultiSlotDataGenerator):
if __name__ == "__main__":
imdb = IMDBReader()
imdb = IMDBDataset()
imdb.load_resource("imdb.vocab")
imdb.run_from_stdin()
......@@ -343,7 +343,7 @@ function python_test_imdb() {
sleep 5
check_cmd "head test_data/part-0 | python test_client.py imdb_cnn_client_conf/serving_client_conf.prototxt imdb.vocab"
# test batch predict
check_cmd "python benchmark_batch.py --thread 4 --batch_size 8 --model imdb_bow_client_conf/serving_client_conf.prototxt --request rpc --endpoint 127.0.0.1:9292"
check_cmd "python benchmark.py --thread 4 --batch_size 8 --model imdb_bow_client_conf/serving_client_conf.prototxt --request rpc --endpoint 127.0.0.1:9292"
echo "imdb CPU RPC inference pass"
kill_server_process
rm -rf work_dir1
......@@ -359,7 +359,7 @@ function python_test_imdb() {
exit 1
fi
# test batch predict
check_cmd "python benchmark_batch.py --thread 4 --batch_size 8 --model imdb_bow_client_conf/serving_client_conf.prototxt --request http --endpoint 127.0.0.1:9292"
check_cmd "python benchmark.py --thread 4 --batch_size 8 --model imdb_bow_client_conf/serving_client_conf.prototxt --request http --endpoint 127.0.0.1:9292"
setproxy # recover proxy state
kill_server_process
ps -ef | grep "text_classify_service.py" | grep -v grep | awk '{print $2}' | xargs kill
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册