diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 4d29564aeed74558b7f0ec580568f70dad0b40cc..041187665af6ad0d75a7c55fe6ed451fe6f45b73 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -391,7 +391,7 @@ class ExecutionContext { PADDLE_ENFORCE( dynamic_cast(allocation_ptr) != nullptr, "The AllocationPtr must be TemporaryAllocation."); - PADDLE_ENFORCE_EQ(allocation_ptr->size(), + PADDLE_ENFORCE_GE(allocation_ptr->size(), framework::product(dim) * sizeof(T)); paddle::framework::Tensor temp_tensor( diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 131712ca88370aa977184fcb00d09f2283db110c..6854282a164773ad32a105c254b12a3bb4731e7f 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -100,14 +100,14 @@ set(OCR_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/ocr") if (NOT EXISTS ${OCR_INSTALL_DIR}) inference_download_and_uncompress(${OCR_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Focr.tar.gz") endif() -inference_analysis_api_test_with_refer_result(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_tester.cc) +inference_analysis_api_test_with_refer_result(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_tester.cc SERIAL) # mobilenet with transpose op set(MOBILENET_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/mobilenet") if (NOT EXISTS ${MOBILENET_INSTALL_DIR}) inference_download_and_uncompress(${MOBILENET_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Fmobilenet.tar.gz") endif() -inference_analysis_api_test_with_refer_result(test_analyzer_mobilenet_transpose ${MOBILENET_INSTALL_DIR} analyzer_vis_tester.cc) +inference_analysis_api_test_with_refer_result(test_analyzer_mobilenet_transpose ${MOBILENET_INSTALL_DIR} analyzer_vis_tester.cc SERIAL) # resnet50 inference_analysis_api_test_with_fake_data(test_analyzer_resnet50 diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 524b5fa0eecf8f031e05d1a70bae24cf2a52325e..7572468e323e08c3538b92715395c787026eb071 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -313,13 +313,12 @@ void CompareDeterministic( int num_times = FLAGS_repeat; auto predictor = CreateTestPredictor(config, FLAGS_use_analysis); - // warmup run std::vector warmup_outputs, outputs; - predictor->Run(inputs[0], &warmup_outputs, batch_size); - // run num_times to Compare Deterministic Result. - for (int i = 0; i < num_times; i++) { - for (size_t j = 0; j < inputs.size(); j++) { + for (size_t j = 0; j < inputs.size(); j++) { + // warmup run + predictor->Run(inputs[j], &warmup_outputs, batch_size); + for (int i = 0; i < num_times; i++) { predictor->Run(inputs[j], &outputs, batch_size); CompareResult(outputs, warmup_outputs); } diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index 25a723fc07948888ef3dc61320fb9bec026390de..f5208e7a601f4dd33b486e5840178022f66431e5 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -137,7 +137,6 @@ class CUDNNConvOpKernel : public framework::OpKernel { // ------------------- cudnn conv algorithm --------------------- cudnnConvolutionFwdAlgo_t algo; auto handle = dev_ctx.cudnn_handle(); - auto workspace_handle = dev_ctx.cudnn_workspace_handle(); bool half_float = false; #if CUDA_VERSION >= 9000 && CUDNN_VERSION_MIN(7, 0, 1) @@ -158,6 +157,8 @@ class CUDNNConvOpKernel : public framework::OpKernel { VLOG(5) << "NOT use cudnn_tensor_op_math"; } #endif + Tensor cudnn_workspace; + void* cudnn_workspace_ptr = nullptr; auto x_dims = framework::vectorize(input->dims()); auto f_dims = framework::vectorize(filter->dims()); @@ -180,21 +181,26 @@ class CUDNNConvOpKernel : public framework::OpKernel { .Var(kCUDNNFwdAlgoCache) ->GetMutable>(); } + cudnn_workspace = + ctx.AllocateTmpTensor( + framework::make_ddim( + {static_cast(workspace_size_limit)}), + dev_ctx); + cudnn_workspace_ptr = static_cast(cudnn_workspace.data()); + algo = algo_cache->GetAlgorithm( x_dims, f_dims, strides, paddings, dilations, 0, [&]() { int returned_algo_count; std::array fwd_perf_stat; - auto cudnn_find_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE( - platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( - handle, cudnn_input_desc, input_data, cudnn_filter_desc, - filter_data, cudnn_conv_desc, cudnn_output_desc, - output_data, kNUM_CUDNN_FWD_ALGS, &returned_algo_count, - fwd_perf_stat.data(), cudnn_workspace, - workspace_size_limit)); - }; - workspace_handle.RunFunc(cudnn_find_func, workspace_size_limit); + + CUDNN_ENFORCE( + platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( + handle, cudnn_input_desc, input_data, cudnn_filter_desc, + filter_data, cudnn_conv_desc, cudnn_output_desc, + output_data, kNUM_CUDNN_FWD_ALGS, &returned_algo_count, + fwd_perf_stat.data(), cudnn_workspace_ptr, + workspace_size_limit)); VLOG(3) << "Perf result: (algo: stat, time, memory)"; for (int i = 0; i < returned_algo_count; ++i) { @@ -219,17 +225,23 @@ class CUDNNConvOpKernel : public framework::OpKernel { PADDLE_ENFORCE_LE(workspace_size_in_bytes, workspace_size_limit, "workspace_size to be allocated exceeds the limit"); + // Allocate on GPU memory + if (!cudnn_workspace_ptr) { + cudnn_workspace = + ctx.AllocateTmpTensor( + framework::make_ddim( + {static_cast(workspace_size_in_bytes)}), + dev_ctx); + cudnn_workspace_ptr = static_cast(cudnn_workspace.data()); + } // ------------------- cudnn conv forward --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; for (int i = 0; i < groups; i++) { - auto cudnn_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( - handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, - cudnn_filter_desc, filter_data + i * group_offset_filter, - cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, - &beta, cudnn_output_desc, output_data + i * group_offset_out)); - }; - workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( + handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, + cudnn_filter_desc, filter_data + i * group_offset_filter, + cudnn_conv_desc, algo, cudnn_workspace_ptr, workspace_size_in_bytes, + &beta, cudnn_output_desc, output_data + i * group_offset_out)); } } }; @@ -353,10 +365,20 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { workspace_size_limit = max_user_size * 1024 * 1024; } + Tensor cudnn_workspace; + void* cudnn_workspace_ptr = nullptr; + if ((input_data || filter_data) && exhaustive_search) { + cudnn_workspace = + ctx.AllocateTmpTensor( + framework::make_ddim( + {static_cast(workspace_size_limit)}), + dev_ctx); + cudnn_workspace_ptr = static_cast(cudnn_workspace.data()); + } + auto x_dims = framework::vectorize(input->dims()); auto f_dims = framework::vectorize(filter->dims()); auto handle = dev_ctx.cudnn_handle(); - auto workspace_handle = dev_ctx.cudnn_workspace_handle(); if (input_grad) { T* input_grad_data = input_grad->mutable_data(ctx.GetPlace()); if (exhaustive_search) { @@ -374,25 +396,22 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { ->GetMutable< AlgorithmsCache>(); } + data_algo = data_algo_cache->GetAlgorithm( x_dims, f_dims, strides, paddings, dilations, 0, [&]() { int returned_algo_count; std::array data_perf_stat; - auto cudnn_find_bd_data_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE( - platform::dynload:: - cudnnFindConvolutionBackwardDataAlgorithmEx( - handle, cudnn_filter_desc, filter_data, - cudnn_output_grad_desc, output_grad_data, - cudnn_conv_desc, cudnn_input_desc, input_grad_data, - kNUM_CUDNN_BWD_DATA_ALGS, &returned_algo_count, - data_perf_stat.data(), cudnn_workspace, - workspace_size_limit)); - }; - workspace_handle.RunFunc(cudnn_find_bd_data_func, - workspace_size_limit); + + CUDNN_ENFORCE(platform::dynload:: + cudnnFindConvolutionBackwardDataAlgorithmEx( + handle, cudnn_filter_desc, filter_data, + cudnn_output_grad_desc, output_grad_data, + cudnn_conv_desc, cudnn_input_desc, + input_grad_data, kNUM_CUDNN_BWD_DATA_ALGS, + &returned_algo_count, data_perf_stat.data(), + cudnn_workspace_ptr, workspace_size_limit)); VLOG(3) << "Perf result: (algo: stat, time, memory)"; for (int i = 0; i < returned_algo_count; ++i) { @@ -443,25 +462,23 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { ->GetMutable< AlgorithmsCache>(); } + filter_algo = f_algo_cache->GetAlgorithm( x_dims, f_dims, strides, paddings, dilations, 0, [&]() { int returned_algo_count; std::array filter_perf_stat; - auto cudnn_find_bd_f_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE( - platform::dynload:: - cudnnFindConvolutionBackwardFilterAlgorithmEx( - handle, cudnn_input_desc, input_data, - cudnn_output_grad_desc, output_grad_data, - cudnn_conv_desc, cudnn_filter_desc, - filter_grad_data, kNUM_CUDNN_BWD_FILTER_ALGS, - &returned_algo_count, filter_perf_stat.data(), - cudnn_workspace, workspace_size_limit)); - }; - workspace_handle.RunFunc(cudnn_find_bd_f_func, - workspace_size_limit); + + CUDNN_ENFORCE( + platform::dynload:: + cudnnFindConvolutionBackwardFilterAlgorithmEx( + handle, cudnn_input_desc, input_data, + cudnn_output_grad_desc, output_grad_data, + cudnn_conv_desc, cudnn_filter_desc, filter_grad_data, + kNUM_CUDNN_BWD_FILTER_ALGS, &returned_algo_count, + filter_perf_stat.data(), cudnn_workspace_ptr, + workspace_size_limit)); return filter_perf_stat[0].algo; }); VLOG(3) << "cuDNN backward filter algo " << filter_algo; @@ -482,6 +499,16 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { workspace_size_in_bytes = std::max(workspace_size_in_bytes, tmp_size); } + // ------------------- cudnn conv workspace --------------------- + if (!cudnn_workspace_ptr) { + cudnn_workspace = + ctx.AllocateTmpTensor( + framework::make_ddim( + {static_cast(workspace_size_in_bytes)}), + dev_ctx); + cudnn_workspace_ptr = static_cast(cudnn_workspace.data()); + } + // ------------------- cudnn conv backward data --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; if (input_grad) { @@ -489,15 +516,12 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { // Because beta is zero, it is unnecessary to reset input_grad. for (int i = 0; i < groups; i++) { - auto cudnn_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( - handle, &alpha, cudnn_filter_desc, - filter_data + i * group_offset_filter, cudnn_output_grad_desc, - output_grad_data + i * group_offset_out, cudnn_conv_desc, - data_algo, cudnn_workspace, workspace_size_in_bytes, &beta, - cudnn_input_desc, input_grad_data + i * group_offset_in)); - }; - workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( + handle, &alpha, cudnn_filter_desc, + filter_data + i * group_offset_filter, cudnn_output_grad_desc, + output_grad_data + i * group_offset_out, cudnn_conv_desc, data_algo, + cudnn_workspace_ptr, workspace_size_in_bytes, &beta, + cudnn_input_desc, input_grad_data + i * group_offset_in)); } } // ------------------- cudnn conv backward filter --------------------- @@ -505,15 +529,12 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { T* filter_grad_data = filter_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset filter_grad. for (int i = 0; i < groups; i++) { - auto cudnn_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( - handle, &alpha, cudnn_input_desc, - input_data + i * group_offset_in, cudnn_output_grad_desc, - output_grad_data + i * group_offset_out, cudnn_conv_desc, - filter_algo, cudnn_workspace, workspace_size_in_bytes, &beta, - cudnn_filter_desc, filter_grad_data + i * group_offset_filter)); - }; - workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( + handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, + cudnn_output_grad_desc, output_grad_data + i * group_offset_out, + cudnn_conv_desc, filter_algo, cudnn_workspace_ptr, + workspace_size_in_bytes, &beta, cudnn_filter_desc, + filter_grad_data + i * group_offset_filter)); } } } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index c81d17380cf894631d06588c007c2e11ce5c7836..d376f90ad5754d70f3b9f30957eb2e2f584f8da9 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -61,7 +61,7 @@ namespace platform { * the allocations of temp_allocation_queue: * - when the Stream calls cudaStreamSynchronize; * - when the allocation size of opportunities exceeds a certain threshold - * (defined by FLAGS_limit_of_temporary_allocation). + * (defined by FLAGS_limit_of_tmp_allocation). * * */ class DeviceTemporaryAllocator { diff --git a/paddle/fluid/platform/temporary_allocator.cc b/paddle/fluid/platform/temporary_allocator.cc index 0be017f75bcc8aff5073ebb2c5179cf7250be8b9..9cbdfe46e78dc84e58eae6929c887221d9562c69 100644 --- a/paddle/fluid/platform/temporary_allocator.cc +++ b/paddle/fluid/platform/temporary_allocator.cc @@ -15,8 +15,15 @@ #include "paddle/fluid/platform/temporary_allocator.h" #include "paddle/fluid/memory/allocation/allocator_facade.h" -DEFINE_double(limit_of_temporary_allocation, -1, - "The up limit of temporary_allocation size."); +DEFINE_int64(limit_of_tmp_allocation, -1, + "The up limit of temporary_allocation size."); +DEFINE_double(times_excess_than_required_tmp_allocation, 2, + "times_excess_than_required_tmp_allocation indicates the " + "max size the TemporaryAllocator can return. For example, " + "if the required memory size is N, and " + "times_excess_than_required_tmp_allocation is 2.0, " + "the TemporaryAllocator will return the available allocation " + "that the range of size is N ~ 2*N."); namespace paddle { namespace platform { @@ -29,24 +36,25 @@ TemporaryAllocation::TemporaryAllocation( underlying_allocation_(std::move(underlying_allocation)) {} TemporaryAllocator::TemporaryAllocator(platform::Place place) : place_(place) { - temp_mem_queue_.reset(new std::deque()); + temp_mem_map_.reset(new std::multimap()); } bool TemporaryAllocator::IsAllocThreadSafe() const { return true; } void TemporaryAllocator::Release(const std::function &callback) { - std::shared_ptr> t_allocations; + std::unique_ptr> t_allocations; { std::unique_lock lock(mtx_); callback(); - t_allocations = temp_mem_queue_; - temp_mem_queue_.reset(new std::deque()); + t_allocations.swap(temp_mem_map_); + temp_mem_map_.reset(new std::multimap()); wait_delete_mem_ = 0; } + for (auto tmp : *t_allocations) { - VLOG(10) << "Delete temporary allocation " << tmp->ptr() - << " size: " << tmp->size(); - delete tmp; + VLOG(10) << "Delete temporary allocation " << tmp.second->ptr() + << " size: " << tmp.second->size(); + delete tmp.second; } } @@ -54,28 +62,34 @@ void TemporaryAllocator::Free(alloc::Allocation *allocation) { auto *temp_allocation = dynamic_cast(allocation); PADDLE_ENFORCE_NOT_NULL(temp_allocation); if (platform::is_gpu_place(temp_allocation->place())) { + PADDLE_ENFORCE(platform::is_same_place(temp_allocation->place(), place_), + "The place should be the same."); size_t wait_delete_mem = 0; { std::unique_lock lock(mtx_); - temp_mem_queue_->emplace_back(temp_allocation); + temp_mem_map_->emplace(temp_allocation->size(), temp_allocation); wait_delete_mem_ += temp_allocation->size(); wait_delete_mem = wait_delete_mem_; VLOG(10) << "Move temporary allocation: " << temp_allocation->ptr() << " to delete queue: " << temp_allocation->size() << "; " - << "wait_delete_mem: " << wait_delete_mem_; + << "wait_delete_mem: " << wait_delete_mem; } - if (FLAGS_limit_of_temporary_allocation > 0 && - wait_delete_mem > FLAGS_limit_of_temporary_allocation) { + + if (FLAGS_limit_of_tmp_allocation > 0 && + wait_delete_mem > static_cast(FLAGS_limit_of_tmp_allocation)) { + PADDLE_ENFORCE(callback_ != nullptr, "The callback is non-initialized."); Release(callback_); } return; } + VLOG(10) << "Delete temporary allocation " << temp_allocation->ptr() + << " size: " << temp_allocation->size(); delete temp_allocation; } size_t TemporaryAllocator::TemporaryAllocationQueueSize() { std::unique_lock lock(mtx_); - return temp_mem_queue_ ? temp_mem_queue_->size() : 0; + return temp_mem_map_ ? temp_mem_map_->size() : 0; } void TemporaryAllocator::SetCallback(const std::function &callback) { @@ -84,6 +98,27 @@ void TemporaryAllocator::SetCallback(const std::function &callback) { alloc::Allocation *TemporaryAllocator::AllocateImpl( size_t size, alloc::Allocator::Attr attr) { + { + // Find available allocation in temp_mem_map. + std::unique_lock lock(mtx_); + if (temp_mem_map_->size()) { + auto it = temp_mem_map_->lower_bound(size); + // FIXME(zcd): Not sure the best value of excess fraction. + if (it != temp_mem_map_->end() && + it->first < + static_cast( + size * FLAGS_times_excess_than_required_tmp_allocation)) { + auto tmp_ptr = it->second; + temp_mem_map_->erase(it); + wait_delete_mem_ -= tmp_ptr->size(); + VLOG(10) << "Reuse temporary allocation: " << tmp_ptr->ptr() << ": " + << tmp_ptr->size(); + return tmp_ptr; + } + } + } + // If not find the the available allocation, get allocation from + // AllocatorFacadeInstance. auto raw_allocation = alloc::AllocatorFacade::Instance().Alloc(place_, size, attr); auto temp_mem = new TemporaryAllocation(std::move(raw_allocation)); diff --git a/paddle/fluid/platform/temporary_allocator.h b/paddle/fluid/platform/temporary_allocator.h index 812c4a333189d8c432be398ca0ebbce11f957561..d657a14223326aa1e2cb5b154a10a56ae742f95c 100644 --- a/paddle/fluid/platform/temporary_allocator.h +++ b/paddle/fluid/platform/temporary_allocator.h @@ -15,6 +15,7 @@ #pragma once #include // NOLINT #include +#include #include // NOLINT #include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/platform/lock_guard_ptr.h" @@ -39,7 +40,7 @@ class TemporaryAllocation : public memory::allocation::Allocation { * * There is one opportunity to free the allocations of temp_allocation_queue: * - when the allocation size of opportunities exceeds a certain threshold - * (defined by FLAGS_limit_of_temporary_allocation). + * (defined by FLAGS_limit_of_tmp_allocation). * * */ class TemporaryAllocator : public memory::allocation::Allocator { @@ -62,11 +63,10 @@ class TemporaryAllocator : public memory::allocation::Allocator { private: platform::Place place_; - // When the allocation is not held by any variable, it should be placed - // to temp_mem_queue immediately. - std::shared_ptr> temp_mem_queue_{nullptr}; - + // to temp_mem_map immediately. + std::unique_ptr> temp_mem_map_{ + nullptr}; std::mutex mtx_; size_t wait_delete_mem_{0}; std::function callback_; diff --git a/paddle/fluid/platform/temporary_allocator_test.cc b/paddle/fluid/platform/temporary_allocator_test.cc index 35d1d929819c41b213bc51ec24ac725021a76c88..3879cd540017ea22b0cf4eee794a172e56716b74 100644 --- a/paddle/fluid/platform/temporary_allocator_test.cc +++ b/paddle/fluid/platform/temporary_allocator_test.cc @@ -18,7 +18,8 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/tensor_util.h" -DECLARE_double(limit_of_temporary_allocation); +DECLARE_int64(limit_of_tmp_allocation); +DECLARE_double(times_excess_than_required_tmp_allocation); namespace paddle { namespace platform { @@ -35,7 +36,7 @@ class DummyOp : public framework::OperatorBase { const platform::Place& place) const override {} }; -TEST(temporary_allocator, temporary_allocator) { +TEST(temporary_allocator, test_base_function) { platform::CPUPlace cpu_place; TemporaryAllocator alloc(cpu_place); alloc.Allocate(100); @@ -59,10 +60,10 @@ TEST(temporary_allocator, temporary_allocator) { #endif } -TEST(temporary_allocator, add_callback) { +TEST(temporary_allocator, test_flags_function) { #ifdef PADDLE_WITH_CUDA - const double limit = FLAGS_limit_of_temporary_allocation; - FLAGS_limit_of_temporary_allocation = 10; + const int64_t limit = FLAGS_limit_of_tmp_allocation; + FLAGS_limit_of_tmp_allocation = 10; platform::CUDAPlace gpu_place(0); TemporaryAllocator gpu_alloc(gpu_place); @@ -78,7 +79,52 @@ TEST(temporary_allocator, add_callback) { }); { gpu_alloc.Allocate(100); } PADDLE_ENFORCE(deleted); - FLAGS_limit_of_temporary_allocation = limit; + FLAGS_limit_of_tmp_allocation = limit; +#endif +} + +TEST(temporary_allocator, test_reuse_tmp_allocation) { +#ifdef PADDLE_WITH_CUDA + platform::CUDAPlace gpu_place(0); + TemporaryAllocator gpu_alloc(gpu_place); + gpu_alloc.SetCallback([]() {}); + + void* tmp_allocation_ptr1 = nullptr; + { + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 0); + auto tmp_allocation1 = gpu_alloc.Allocate(100); + tmp_allocation_ptr1 = tmp_allocation1->ptr(); + } + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 1); + auto tmp_allocation2 = gpu_alloc.Allocate(100); + void* tmp_allocation_ptr2 = tmp_allocation2->ptr(); + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 0); + PADDLE_ENFORCE_EQ(tmp_allocation_ptr1, tmp_allocation_ptr2); + + auto tmp_allocation3 = gpu_alloc.Allocate(100); + void* tmp_allocation_ptr3 = tmp_allocation2->ptr(); + PADDLE_ENFORCE_EQ(tmp_allocation_ptr1, tmp_allocation_ptr3); +#endif +} + +TEST(temporary_allocator, test_times_excess_than_required_tmp_allocation) { +#ifdef PADDLE_WITH_CUDA + platform::CUDAPlace gpu_place(0); + TemporaryAllocator gpu_alloc(gpu_place); + gpu_alloc.SetCallback([]() {}); + double excess_fraction = FLAGS_times_excess_than_required_tmp_allocation; + void* tmp_allocation_ptr1 = nullptr; + { + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 0); + auto tmp_allocation1 = + gpu_alloc.Allocate(static_cast(100 * excess_fraction - 1)); + tmp_allocation_ptr1 = tmp_allocation1->ptr(); + } + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 1); + auto tmp_allocation2 = gpu_alloc.Allocate(100); + void* tmp_allocation_ptr2 = tmp_allocation2->ptr(); + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 0); + PADDLE_ENFORCE_EQ(tmp_allocation_ptr1, tmp_allocation_ptr2); #endif } diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 2c17716500ababfab3216a5ec47fecca30065ff1..686550a3c8d7d55f06b03132124621c5d0db342f 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -155,7 +155,8 @@ def __bootstrap__(): 'fraction_of_gpu_memory_to_use', 'cudnn_deterministic', 'enable_cublas_tensor_op_math', 'conv_workspace_size_limit', 'cudnn_exhaustive_search', 'memory_optimize_debug', 'selected_gpus', - 'sync_nccl_allreduce' + 'sync_nccl_allreduce', 'limit_of_tmp_allocation', + 'times_excess_than_required_tmp_allocation' ] core.init_gflags([sys.argv[0]] +