From 4c8ea492cdc95f61b279c6006ed642b4a0b063a3 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Wed, 16 Sep 2020 10:00:01 +0800 Subject: [PATCH] use shared dev_ctx (#27313) --- paddle/fluid/memory/allocation/best_fit_allocator_test.cu | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/memory/allocation/best_fit_allocator_test.cu b/paddle/fluid/memory/allocation/best_fit_allocator_test.cu index eb24ba84c88..59c14103ca6 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator_test.cu +++ b/paddle/fluid/memory/allocation/best_fit_allocator_test.cu @@ -16,6 +16,7 @@ #include #include // NOLINT #include + #include "gtest/gtest.h" #include "paddle/fluid/memory/allocation/best_fit_allocator.h" #include "paddle/fluid/memory/allocation/cuda_allocator.h" @@ -41,12 +42,14 @@ TEST(BestFitAllocator, concurrent_cuda) { LockedAllocator concurrent_allocator( std::unique_ptr(new BestFitAllocator(cuda_allocation.get()))); + platform::CUDAPlace gpu(0); + platform::CUDADeviceContext dev_ctx(gpu); + auto th_main = [&](std::random_device::result_type seed) { std::default_random_engine engine(seed); std::uniform_int_distribution dist(1U, 1024U); - platform::CUDAPlace gpu(0); - platform::CUDADeviceContext dev_ctx(gpu); std::array buf; + for (size_t i = 0; i < 128; ++i) { size_t allocate_size = dist(engine); -- GitLab