未验证 提交 4c8ea492 编写于 作者: L Leo Chen 提交者: GitHub

use shared dev_ctx (#27313)

上级 696a39e2
......@@ -16,6 +16,7 @@
#include <random>
#include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/memory/allocation/best_fit_allocator.h"
#include "paddle/fluid/memory/allocation/cuda_allocator.h"
......@@ -41,12 +42,14 @@ TEST(BestFitAllocator, concurrent_cuda) {
LockedAllocator concurrent_allocator(
std::unique_ptr<Allocator>(new BestFitAllocator(cuda_allocation.get())));
platform::CUDAPlace gpu(0);
platform::CUDADeviceContext dev_ctx(gpu);
auto th_main = [&](std::random_device::result_type seed) {
std::default_random_engine engine(seed);
std::uniform_int_distribution<size_t> dist(1U, 1024U);
platform::CUDAPlace gpu(0);
platform::CUDADeviceContext dev_ctx(gpu);
std::array<size_t, 1024> buf;
for (size_t i = 0; i < 128; ++i) {
size_t allocate_size = dist(engine);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册