From 121d48d7fb9979932aac5bf483082a90c088636b Mon Sep 17 00:00:00 2001 From: barrierye Date: Mon, 31 Aug 2020 15:45:59 +0800 Subject: [PATCH] add mode_config intro --- doc/NEW_WEB_SERVICE.md | 1 + doc/NEW_WEB_SERVICE_CN.md | 1 + 2 files changed, 2 insertions(+) diff --git a/doc/NEW_WEB_SERVICE.md b/doc/NEW_WEB_SERVICE.md index 0fde9444..86e53b84 100644 --- a/doc/NEW_WEB_SERVICE.md +++ b/doc/NEW_WEB_SERVICE.md @@ -141,6 +141,7 @@ op: batch_size: 1 # If this field is set, Op will merge multiple request outputs into a single batch auto_batching_timeout: -1 # auto-batching timeout in milliseconds. The default value is -1, that is, no timeout local_service_conf: + model_config: # the path of the corresponding model file. There is no default value(None). If this item is not configured, the model file will not be loaded. workdir: "" # working directory of corresponding model thread_num: 2 # the corresponding model is started with thread_num threads devices: "" # on which device does the model launched. You can specify the GPU card number(such as "0,1,2"), which is CPU by default diff --git a/doc/NEW_WEB_SERVICE_CN.md b/doc/NEW_WEB_SERVICE_CN.md index a02e2a03..af6730a8 100644 --- a/doc/NEW_WEB_SERVICE_CN.md +++ b/doc/NEW_WEB_SERVICE_CN.md @@ -141,6 +141,7 @@ op: batch_size: 1 # auto-batching 中的 batch_size,若设置该字段则 Op 会将多个请求输出合并为一个 batch auto_batching_timeout: -1 # auto-batching 超时时间,单位为毫秒。默认为 -1 即不超时 local_service_conf: + model_config: # 对应模型文件的路径,无默认值(None)。若不配置该项则不会加载模型文件。 workdir: "" # 对应模型的工作目录 thread_num: 2 # 对应模型用几个线程启动 devices: "" # 模型启动在哪个设备上,可以指定 gpu 卡号(如 "0,1,2"),默认为 cpu -- GitLab