diff --git a/demo/serving/bert_service/README.md b/demo/serving/bert_service/README.md index ebdabfd982bd907174af44ce18c0e4d2fa47a840..57c769e1bd159245dc55c023dffd968ec4f70335 100644 --- a/demo/serving/bert_service/README.md +++ b/demo/serving/bert_service/README.md @@ -68,7 +68,7 @@ $ pip install ujson |模型|网络| |:-|:-:| -|[ERNIE](https://paddlepaddle.org.cn/hubdetail?name=ERNIE&en_category=SemanticModel)|ERNIE| +|[ernie](https://paddlepaddle.org.cn/hubdetail?name=ERNIE&en_category=SemanticModel)|ERNIE| |[ernie_tiny](https://paddlepaddle.org.cn/hubdetail?name=ernie_tiny&en_category=SemanticModel)|ERNIE| |[ernie_v2_eng_large](https://paddlepaddle.org.cn/hubdetail?name=ernie_v2_eng_large&en_category=SemanticModel)|ERNIE| |[ernie_v2_eng_base](https://paddlepaddle.org.cn/hubdetail?name=ernie_v2_eng_base&en_category=SemanticModel)|ERNIE| @@ -225,8 +225,8 @@ Paddle Inference Server exit successfully! > Q : 如何在一台服务器部署多个模型? > A : 可通过多次启动`Bert Service`,分配不同端口实现。如果使用GPU,需要指定不同的显卡。如同时部署`ernie`和`bert_chinese_L-12_H-768_A-12`,分别执行命令如下: > ```shell -> $ hub serving start bert_serving -m ernie -p 8866 -> $ hub serving start bert_serving -m bert_chinese_L-12_H-768_A-12 -p 8867 +> $ hub serving start bert_service -m ernie -p 8866 +> $ hub serving start bert_service -m bert_chinese_L-12_H-768_A-12 -p 8867 > ``` > Q : 启动时显示"Check out http://yq01-gpu-255-129-12-00.epc.baidu.com:8887 in web diff --git a/paddlehub/commands/serving.py b/paddlehub/commands/serving.py index ab6725dd560fdd04fa5580798d2c419a59764d7e..fd39bf90d12a17d237ee9b22a00a05636683f4c0 100644 --- a/paddlehub/commands/serving.py +++ b/paddlehub/commands/serving.py @@ -159,7 +159,7 @@ class ServingCommand(BaseCommand): module = args.modules if module is not None: use_gpu = args.use_gpu - port = args.port[0] + port = args.port if ServingCommand.is_port_occupied("127.0.0.1", port) is True: print("Port %s is occupied, please change it." % (port)) return False @@ -206,8 +206,10 @@ class ServingCommand(BaseCommand): if args.sub_command == "start": if args.bert_service == "bert_service": ServingCommand.start_bert_serving(args) - else: + elif args.bert_service is None: ServingCommand.start_serving(args) + else: + ServingCommand.show_help() else: ServingCommand.show_help()