未验证 提交 6527a7df 编写于 作者: T Tao Luo 提交者: GitHub

replace part of PADDLE_ASSERT to PADDLE_ENFORCE (#19285)

* replace part of PADDLE_ASSERT to PADDLE_ENFORCE

test=develop

* remove unused fallback_alloc_size_

* add unit-test of CUDAPinnedAllocator

test=develop
上级 62facc7e
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace memory {
......@@ -61,7 +61,7 @@ MemoryBlock* MemoryBlock::right_buddy(const MetadataCache& cache) const {
void MemoryBlock::split(MetadataCache* cache, size_t size) {
// make sure the split fits
PADDLE_ASSERT(total_size(*cache) >= size);
PADDLE_ENFORCE_GE(total_size(*cache), size);
// bail out if there is no room for another partition
if (total_size(*cache) - size <= sizeof(MemoryBlock::Desc)) {
......@@ -102,8 +102,8 @@ void MemoryBlock::split(MetadataCache* cache, size_t size) {
void MemoryBlock::merge(MetadataCache* cache, MemoryBlock* right_buddy) {
// only free blocks can be merged
PADDLE_ASSERT(type(*cache) == FREE_CHUNK);
PADDLE_ASSERT(right_buddy->type(*cache) == FREE_CHUNK);
PADDLE_ENFORCE_EQ(type(*cache), FREE_CHUNK);
PADDLE_ENFORCE_EQ(right_buddy->type(*cache), FREE_CHUNK);
auto metadata = cache->load(this);
......@@ -129,8 +129,8 @@ void MemoryBlock::merge(MetadataCache* cache, MemoryBlock* right_buddy) {
void MemoryBlock::mark_as_free(MetadataCache* cache) {
// check for double free or corruption
PADDLE_ASSERT(type(*cache) != FREE_CHUNK);
PADDLE_ASSERT(type(*cache) != INVALID_CHUNK);
PADDLE_ENFORCE_NE(type(*cache), FREE_CHUNK);
PADDLE_ENFORCE_NE(type(*cache), INVALID_CHUNK);
set_type(cache, FREE_CHUNK);
}
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "glog/logging.h"
#include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace memory {
......@@ -25,12 +25,12 @@ MetadataCache::MetadataCache(bool uses_gpu) : uses_gpu_(uses_gpu) {}
MemoryBlock::Desc MetadataCache::load(const MemoryBlock* block) const {
if (uses_gpu_) {
auto existing_desc = cache_.find(block);
PADDLE_ASSERT(existing_desc->second.check_guards());
PADDLE_ENFORCE_EQ(existing_desc->second.check_guards(), true);
return existing_desc->second;
} else {
auto* desc = reinterpret_cast<const MemoryBlock::Desc*>(block);
VLOG(10) << "Load MemoryBlock::Desc type=" << desc->type;
PADDLE_ASSERT(desc->check_guards());
PADDLE_ENFORCE_EQ(desc->check_guards(), true);
return *reinterpret_cast<const MemoryBlock::Desc*>(block);
}
}
......
......@@ -56,7 +56,7 @@ void* AlignedMalloc(size_t size) {
PADDLE_ENFORCE_EQ(posix_memalign(&p, alignment, size), 0, "Alloc %ld error!",
size);
#endif
PADDLE_ENFORCE(p, "Fail to allocate CPU memory: size = %d .", size);
PADDLE_ENFORCE_NOT_NULL(p, "Fail to allocate CPU memory: size = %d .", size);
return p;
}
......@@ -136,15 +136,10 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) {
void GPUAllocator::Free(void* p, size_t size, size_t index) {
cudaError_t err;
if (index == 0) {
PADDLE_ASSERT(gpu_alloc_size_ >= size);
gpu_alloc_size_ -= size;
err = cudaFree(p);
} else {
PADDLE_ASSERT(fallback_alloc_size_ >= size);
fallback_alloc_size_ -= size;
err = cudaFreeHost(p);
}
PADDLE_ENFORCE_EQ(index, 0);
PADDLE_ENFORCE_GE(gpu_alloc_size_, size);
gpu_alloc_size_ -= size;
err = cudaFree(p);
// Purposefully allow cudaErrorCudartUnloading, because
// that is returned if you ever call cudaFree after the
......@@ -194,9 +189,9 @@ void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) {
void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) {
cudaError_t err;
PADDLE_ASSERT(index == 1);
PADDLE_ENFORCE_EQ(index, 1);
PADDLE_ASSERT(cuda_pinnd_alloc_size_ >= size);
PADDLE_ENFORCE_GE(cuda_pinnd_alloc_size_, size);
cuda_pinnd_alloc_size_ -= size;
err = cudaFreeHost(p);
......
......@@ -52,7 +52,6 @@ class GPUAllocator : public SystemAllocator {
private:
size_t gpu_alloc_size_ = 0;
size_t fallback_alloc_size_ = 0;
int gpu_id_;
};
......
......@@ -62,4 +62,10 @@ TEST(GPUAllocator, Alloc) {
TestAllocator(&a, 2048);
TestAllocator(&a, 0);
}
TEST(CUDAPinnedAllocator, Alloc) {
paddle::memory::detail::CUDAPinnedAllocator a;
TestAllocator(&a, 2048);
TestAllocator(&a, 0);
}
#endif
......@@ -15,6 +15,7 @@ limitations under the License. */
#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h
#include "gtest/gtest.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/init.h"
namespace paddle {
......@@ -144,7 +145,7 @@ TEST(float16, lod_tensor_cpu) {
TEST(float16, floating) {
// compile time assert.
PADDLE_ASSERT(std::is_floating_point<float16>::value);
PADDLE_ENFORCE_EQ(std::is_floating_point<float16>::value, true);
}
TEST(float16, print) {
......
......@@ -19,6 +19,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/enforce.h"
#define ARITHMETIC_KERNEL(op_type, sign) \
__global__ void op_type(const half* in1, const half* in2, half* out) { \
......@@ -260,8 +261,8 @@ TEST(float16, typeid) {
int b(0);
// compile time assert
PADDLE_ASSERT(functor(a) == true);
PADDLE_ASSERT(functor2(b) == false);
PADDLE_ENFORCE_EQ(functor(a), true);
PADDLE_ENFORCE_EQ(functor2(b), false);
}
// GPU test
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册