未验证 提交 b593d588 编写于 作者: W Wilber 提交者: GitHub

[Inference] EnableUseGpu has higher priority than flags (#29697)

* enable_use_gpu has higher priority than FLAGS

* update.
上级 9cbcc6ca
......@@ -18,6 +18,10 @@
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
#ifdef PADDLE_WITH_CUDA
DECLARE_uint64(initial_gpu_memory_in_mb);
#endif
namespace paddle {
struct MkldnnQuantizerConfig;
......@@ -68,6 +72,7 @@ void AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
#ifdef PADDLE_WITH_CUDA
use_gpu_ = true;
memory_pool_init_size_mb_ = memory_pool_init_size_mb;
FLAGS_initial_gpu_memory_in_mb = memory_pool_init_size_mb_;
device_id_ = device_id;
#else
LOG(ERROR) << "Please compile with gpu to EnableGpu()";
......@@ -482,12 +487,16 @@ float AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
#ifdef PADDLE_WITH_CUDA
// Get the GPU memory details and calculate the fraction of memory for the
// GPU memory pool.
size_t gpu_used, gpu_available;
size_t gpu_total, gpu_available;
platform::SetDeviceId(device_id_);
platform::GpuMemoryUsage(&gpu_used, &gpu_available);
double total_gpu_memory = (gpu_used + gpu_available) / 1024. / 1024.;
platform::GpuMemoryUsage(&gpu_available, &gpu_total);
double total_gpu_memory = gpu_total / 1024. / 1024.;
float fraction_of_gpu_memory =
static_cast<double>(memory_pool_init_size_mb()) / total_gpu_memory;
VLOG(3) << "total_gpu_memory is " << total_gpu_memory
<< "M, gpu_available is " << gpu_available / 1024. / 1024.
<< "M, memory_pool_init_size is " << memory_pool_init_size_mb()
<< "M.";
return fraction_of_gpu_memory;
#else
return 0.;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册