From 61052cdbc6cd048410aebb0df514fba6f8931347 Mon Sep 17 00:00:00 2001 From: chenweihang Date: Wed, 8 Aug 2018 10:22:36 +0000 Subject: [PATCH] polish high frequency enforce error message --- paddle/fluid/platform/enforce.h | 10 ++++++---- paddle/fluid/platform/gpu_info.cc | 20 ++++++++++---------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 566485cd3c..cad60275a2 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -263,7 +263,8 @@ inline void throw_on_error(T e) { * PADDLE_ENFORCE_EQ(a, b); * * will raise an expression described as follows: - * "enforce a == b failed, 1 != 2" with detailed stack information. + * "Data check failed. Expected input a == b, but received a(1) != b(2)." + * with detailed stack information. * * extra messages is also supported, for example: * PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2) @@ -292,9 +293,10 @@ inline void throw_on_error(T e) { #define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ do { \ if (UNLIKELY(!((__VAL0)__CMP(__VAL1)))) { \ - PADDLE_THROW("enforce %s " #__CMP " %s failed, %s " #__INV_CMP \ - " %s\n%s", \ - #__VAL0, #__VAL1, paddle::string::to_string(__VAL0), \ + PADDLE_THROW("Data check failed. Expected %s " #__CMP \ + " %s, but received %s:%s " #__INV_CMP " %s:%s.\n%s", \ + #__VAL0, #__VAL1, #__VAL0, \ + paddle::string::to_string(__VAL0), #__VAL1, \ paddle::string::to_string(__VAL1), \ paddle::string::Sprintf("" __VA_ARGS__)); \ } \ diff --git a/paddle/fluid/platform/gpu_info.cc b/paddle/fluid/platform/gpu_info.cc index 4cee93f3a4..f9e2e8c69d 100644 --- a/paddle/fluid/platform/gpu_info.cc +++ b/paddle/fluid/platform/gpu_info.cc @@ -100,25 +100,25 @@ size_t GpuMinChunkSize() { size_t GpuMaxChunkSize() { size_t total = 0; - size_t available = 0; + size_t available_memory = 0; - GpuMemoryUsage(&available, &total); - VLOG(10) << "GPU Usage " << available / 1024 / 1024 << "M/" + GpuMemoryUsage(&available_memory, &total); + VLOG(10) << "GPU Usage " << available_memory / 1024 / 1024 << "M/" << total / 1024 / 1024 << "M"; size_t reserving = static_cast(0.05 * total); // If available less than minimum chunk size, no usable memory exists. - available = - std::min(std::max(available, GpuMinChunkSize()) - GpuMinChunkSize(), - total - reserving); + available_memory = std::min( + std::max(available_memory, GpuMinChunkSize()) - GpuMinChunkSize(), + total - reserving); // Reserving the rest memory for page tables, etc. - size_t allocating = static_cast(FLAGS_fraction_of_gpu_memory_to_use * - (total - reserving)); + size_t allocating_memory = static_cast( + FLAGS_fraction_of_gpu_memory_to_use * (total - reserving)); - PADDLE_ENFORCE_LE(allocating, available); + PADDLE_ENFORCE_LE(allocating_memory, available_memory); - return allocating; + return allocating_memory; } void GpuMemcpyAsync(void *dst, const void *src, size_t count, -- GitLab