提交 e398e889 编写于 作者: W wangguibao

Add configure entry: specify GPU id for Serving instance

上级 ee79ea5a
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
#endif #endif
#include "predictor/framework/infer.h" #include "predictor/framework/infer.h"
DECLARE_int32(gpuid);
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace fluid_gpu { namespace fluid_gpu {
...@@ -129,7 +131,7 @@ class FluidGpuAnalysisCore : public FluidFamilyCore { ...@@ -129,7 +131,7 @@ class FluidGpuAnalysisCore : public FluidFamilyCore {
paddle::AnalysisConfig analysis_config; paddle::AnalysisConfig analysis_config;
analysis_config.SetParamsFile(data_path + "/__params__"); analysis_config.SetParamsFile(data_path + "/__params__");
analysis_config.SetProgFile(data_path + "/__model__"); analysis_config.SetProgFile(data_path + "/__model__");
analysis_config.EnableUseGpu(100, 0); analysis_config.EnableUseGpu(100, FLAGS_gpuid);
analysis_config.SetCpuMathLibraryNumThreads(1); analysis_config.SetCpuMathLibraryNumThreads(1);
analysis_config.SwitchSpecifyInputNames(true); analysis_config.SwitchSpecifyInputNames(true);
AutoLock lock(GlobalPaddleCreateMutex::instance()); AutoLock lock(GlobalPaddleCreateMutex::instance());
...@@ -159,7 +161,7 @@ class FluidGpuNativeCore : public FluidFamilyCore { ...@@ -159,7 +161,7 @@ class FluidGpuNativeCore : public FluidFamilyCore {
native_config.prog_file = data_path + "/__model__"; native_config.prog_file = data_path + "/__model__";
native_config.use_gpu = true; native_config.use_gpu = true;
native_config.fraction_of_gpu_memory = 0.9; native_config.fraction_of_gpu_memory = 0.9;
native_config.device = 0; native_config.device = FLAGS_gpuid;
AutoLock lock(GlobalPaddleCreateMutex::instance()); AutoLock lock(GlobalPaddleCreateMutex::instance());
_core = paddle::CreatePaddlePredictor<paddle::NativeConfig, _core = paddle::CreatePaddlePredictor<paddle::NativeConfig,
paddle::PaddleEngineKind::kNative>( paddle::PaddleEngineKind::kNative>(
...@@ -185,7 +187,7 @@ class FluidGpuAnalysisDirCore : public FluidFamilyCore { ...@@ -185,7 +187,7 @@ class FluidGpuAnalysisDirCore : public FluidFamilyCore {
paddle::AnalysisConfig analysis_config; paddle::AnalysisConfig analysis_config;
analysis_config.SetModel(data_path); analysis_config.SetModel(data_path);
analysis_config.EnableUseGpu(100, 0); analysis_config.EnableUseGpu(100, FLAGS_gpuid);
analysis_config.SwitchSpecifyInputNames(true); analysis_config.SwitchSpecifyInputNames(true);
analysis_config.SetCpuMathLibraryNumThreads(1); analysis_config.SetCpuMathLibraryNumThreads(1);
AutoLock lock(GlobalPaddleCreateMutex::instance()); AutoLock lock(GlobalPaddleCreateMutex::instance());
...@@ -214,7 +216,7 @@ class FluidGpuNativeDirCore : public FluidFamilyCore { ...@@ -214,7 +216,7 @@ class FluidGpuNativeDirCore : public FluidFamilyCore {
native_config.model_dir = data_path; native_config.model_dir = data_path;
native_config.use_gpu = true; native_config.use_gpu = true;
native_config.fraction_of_gpu_memory = 0.9; native_config.fraction_of_gpu_memory = 0.9;
native_config.device = 0; native_config.device = FLAGS_gpuid;
AutoLock lock(GlobalPaddleCreateMutex::instance()); AutoLock lock(GlobalPaddleCreateMutex::instance());
_core = paddle::CreatePaddlePredictor<paddle::NativeConfig, _core = paddle::CreatePaddlePredictor<paddle::NativeConfig,
paddle::PaddleEngineKind::kNative>( paddle::PaddleEngineKind::kNative>(
...@@ -464,7 +466,7 @@ class FluidGpuNativeDirWithSigmoidCore : public FluidGpuWithSigmoidCore { ...@@ -464,7 +466,7 @@ class FluidGpuNativeDirWithSigmoidCore : public FluidGpuWithSigmoidCore {
native_config.model_dir = data_path; native_config.model_dir = data_path;
native_config.use_gpu = true; native_config.use_gpu = true;
native_config.fraction_of_gpu_memory = 0.9; native_config.fraction_of_gpu_memory = 0.9;
native_config.device = 0; native_config.device = FLAGS_gpuid;
AutoLock lock(GlobalPaddleCreateMutex::instance()); AutoLock lock(GlobalPaddleCreateMutex::instance());
_core->_fluid_core = _core->_fluid_core =
paddle::CreatePaddlePredictor<paddle::NativeConfig, paddle::CreatePaddlePredictor<paddle::NativeConfig,
...@@ -491,7 +493,7 @@ class FluidGpuAnalysisDirWithSigmoidCore : public FluidGpuWithSigmoidCore { ...@@ -491,7 +493,7 @@ class FluidGpuAnalysisDirWithSigmoidCore : public FluidGpuWithSigmoidCore {
paddle::AnalysisConfig analysis_config; paddle::AnalysisConfig analysis_config;
analysis_config.SetModel(data_path); analysis_config.SetModel(data_path);
analysis_config.EnableUseGpu(100, 0); analysis_config.EnableUseGpu(100, FLAGS_gpuid);
analysis_config.SwitchSpecifyInputNames(true); analysis_config.SwitchSpecifyInputNames(true);
analysis_config.SetCpuMathLibraryNumThreads(1); analysis_config.SetCpuMathLibraryNumThreads(1);
AutoLock lock(GlobalPaddleCreateMutex::instance()); AutoLock lock(GlobalPaddleCreateMutex::instance());
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include "inferencer-fluid-gpu/include/fluid_gpu_engine.h" #include "inferencer-fluid-gpu/include/fluid_gpu_engine.h"
#include "predictor/framework/factory.h" #include "predictor/framework/factory.h"
DEFINE_int32(gpuid, 0, "GPU device id to use");
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace fluid_gpu { namespace fluid_gpu {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册