提交 7643b8c7 编写于 作者: M MRXLT

add mkl argument

上级 c2df6ac2
...@@ -84,6 +84,7 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po ...@@ -84,6 +84,7 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po
| `model` | str | `""` | Path of paddle model directory to be served | | `model` | str | `""` | Path of paddle model directory to be served |
| `mem_optim` | bool | `False` | Enable memory / graphic memory optimization | | `mem_optim` | bool | `False` | Enable memory / graphic memory optimization |
| `ir_optim` | bool | `False` | Enable analysis and optimization of calculation graph | | `ir_optim` | bool | `False` | Enable analysis and optimization of calculation graph |
| `use_mkl` (Only for cpu version) | bool | `False` | Run inference with MKL |
Here, we use `curl` to send a HTTP POST request to the service we just started. Users can use any python library to send HTTP POST as well, e.g, [requests](https://requests.readthedocs.io/en/master/). Here, we use `curl` to send a HTTP POST request to the service we just started. Users can use any python library to send HTTP POST as well, e.g, [requests](https://requests.readthedocs.io/en/master/).
</center> </center>
......
...@@ -88,6 +88,7 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po ...@@ -88,6 +88,7 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po
| `model` | str | `""` | Path of paddle model directory to be served | | `model` | str | `""` | Path of paddle model directory to be served |
| `mem_optim` | bool | `False` | Enable memory optimization | | `mem_optim` | bool | `False` | Enable memory optimization |
| `ir_optim` | bool | `False` | Enable analysis and optimization of calculation graph | | `ir_optim` | bool | `False` | Enable analysis and optimization of calculation graph |
| `use_mkl` (Only for cpu version) | bool | `False` | Run inference with MKL |
我们使用 `curl` 命令来发送HTTP POST请求给刚刚启动的服务。用户也可以调用python库来发送HTTP POST请求,请参考英文文档 [requests](https://requests.readthedocs.io/en/master/)。 我们使用 `curl` 命令来发送HTTP POST请求给刚刚启动的服务。用户也可以调用python库来发送HTTP POST请求,请参考英文文档 [requests](https://requests.readthedocs.io/en/master/)。
</center> </center>
......
...@@ -289,8 +289,8 @@ class Server(object): ...@@ -289,8 +289,8 @@ class Server(object):
# check config here # check config here
# print config here # print config here
def use_mkl(self): def use_mkl(self, flag):
self.mkl_flag = True self.mkl_flag = flag
def get_device_version(self): def get_device_version(self):
avx_flag = False avx_flag = False
...@@ -305,6 +305,10 @@ class Server(object): ...@@ -305,6 +305,10 @@ class Server(object):
else: else:
device_version = "serving-cpu-avx-openblas-" device_version = "serving-cpu-avx-openblas-"
else: else:
if mkl_flag:
print(
"Your CPU does not support AVX, server will running with noavx-openblas mode."
)
device_version = "serving-cpu-noavx-openblas-" device_version = "serving-cpu-noavx-openblas-"
return device_version return device_version
......
...@@ -43,6 +43,7 @@ def parse_args(): # pylint: disable=doc-string-missing ...@@ -43,6 +43,7 @@ def parse_args(): # pylint: disable=doc-string-missing
"--mem_optim", type=bool, default=False, help="Memory optimize") "--mem_optim", type=bool, default=False, help="Memory optimize")
parser.add_argument( parser.add_argument(
"--ir_optim", type=bool, default=False, help="Graph optimize") "--ir_optim", type=bool, default=False, help="Graph optimize")
parser.add_argument("--use_mkl", type=bool, default=False, help="Use MKL")
parser.add_argument( parser.add_argument(
"--max_body_size", "--max_body_size",
type=int, type=int,
...@@ -61,6 +62,7 @@ def start_standard_model(): # pylint: disable=doc-string-missing ...@@ -61,6 +62,7 @@ def start_standard_model(): # pylint: disable=doc-string-missing
mem_optim = args.mem_optim mem_optim = args.mem_optim
ir_optim = args.ir_optim ir_optim = args.ir_optim
max_body_size = args.max_body_size max_body_size = args.max_body_size
use_mkl = args.use_mkl
if model == "": if model == "":
print("You must specify your serving model") print("You must specify your serving model")
...@@ -82,6 +84,7 @@ def start_standard_model(): # pylint: disable=doc-string-missing ...@@ -82,6 +84,7 @@ def start_standard_model(): # pylint: disable=doc-string-missing
server.set_num_threads(thread_num) server.set_num_threads(thread_num)
server.set_memory_optimize(mem_optim) server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim) server.set_ir_optimize(ir_optim)
server.use_mkl(use_mkl)
server.set_max_body_size(max_body_size) server.set_max_body_size(max_body_size)
server.set_port(port) server.set_port(port)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册