From def8f91b83ba0e4d54f88d081c3450e8aa7ee539 Mon Sep 17 00:00:00 2001 From: barrierye Date: Mon, 30 Mar 2020 14:58:42 +0800 Subject: [PATCH] add gpu_ids description --- doc/RUN_IN_DOCKER.md | 12 ++++++++++++ doc/RUN_IN_DOCKER_CN.md | 18 ++++++++++++------ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/doc/RUN_IN_DOCKER.md b/doc/RUN_IN_DOCKER.md index edfd9141..c111f884 100644 --- a/doc/RUN_IN_DOCKER.md +++ b/doc/RUN_IN_DOCKER.md @@ -135,6 +135,18 @@ pip install paddle-serving-server-gpu ### Test example +When running the GPU Server, you need to set the GPUs used by the prediction service. By default, the GPU with index 0 is used. You can configure it in two ways: + +1. Using the `CUDA_VISIBLE_DEVICES` environment variable, the following example specifies two GPUs with an index of 0 and 1: + + ```shell + export CUDA_VISIBLE_DEVICES=0,1 + ``` + +2. Using the `--gpu_ids` option, which will overrides the configuration of `CUDA_VISIBLE_DEVICES`. + + + Get the trained Boston house price prediction model by the following command: ```bash diff --git a/doc/RUN_IN_DOCKER_CN.md b/doc/RUN_IN_DOCKER_CN.md index 47914a2f..cf70a8be 100644 --- a/doc/RUN_IN_DOCKER_CN.md +++ b/doc/RUN_IN_DOCKER_CN.md @@ -127,11 +127,17 @@ pip install paddle-serving-server-gpu ### 测试example -GPU版本在运行Server端代码前需要设置`CUDA_VISIBLE_DEVICES`环境变量来指定预测服务使用的GPU,下面的示例为指定索引为0和1两块GPU: +在运行GPU版Server时需要设置预测服务使用的GPU,缺省状态默认使用索引为0的GPU。可以通过下面两种方式进行配置: + +1. 使用`CUDA_VISIBLE_DEVICES`环境变量,下面的示例为指定索引为0和1两块GPU: + + ```shell + export CUDA_VISIBLE_DEVICES=0,1 + ``` + +2. 使用`--gpu_ids`选项,该选项将覆盖`CUDA_VISIBLE_DEVICES`的配置。 + -```bash - export CUDA_VISIBLE_DEVICES=0,1 -``` 通过下面命令获取训练好的Boston房价预估模型: @@ -145,7 +151,7 @@ tar -xzf uci_housing.tar.gz 在Server端(容器内)运行: ```bash - python -m paddle_serving_server_gpu.serve --model uci_housing_model --thread 10 --port 9292 --name uci + python -m paddle_serving_server_gpu.serve --model uci_housing_model --thread 10 --port 9292 --name uci --gpu_ids 0 ``` 在Client端(容器内或容器外)运行: @@ -159,7 +165,7 @@ tar -xzf uci_housing.tar.gz 在Server端(容器内)运行: ```bash - python -m paddle_serving_server_gpu.serve --model uci_housing_model --thread 10 --port 9292 + python -m paddle_serving_server_gpu.serve --model uci_housing_model --thread 10 --port 9292 --gpu_ids 0 ``` 在Client端(容器内或容器外,需要安装`paddle-serving-client`包)运行下面Python代码: -- GitLab