提交 7d929036 编写于 作者: G guo-ran

add

上级 21caffd9
......@@ -175,12 +175,20 @@ DecodeHandleFactory CreateDecodeHandleFactory<DeviceType::kCPU>(int target_width
#if defined(WITH_NVJPEG)
int GpuDeviceMalloc(void** p, size_t s) { return (int)cudaMalloc(p, s); }
int GpuDeviceMalloc(void** p, size_t s) {
int err = (int)cudaMalloc(p, s);
LOG(ERROR)<<"cudaMalloc size: ";
LOG(ERROR)<<s<<" err: "<<err;
return err;
}
int GpuDeviceFree(void* p) { return (int)cudaFree(p); }
int GpuPinnedMalloc(void** p, size_t s, unsigned int flags) {
return (int)cudaHostAlloc(p, s, flags);
int err = (int)cudaHostAlloc(p, s, flags);
LOG(ERROR)<<"cudaHostAlloc size: ";
LOG(ERROR)<<"cudaHostAlloc "<<s<<" err: "<<(int)err;
return (int)err;
}
int GpuPinnedFree(void* p) { return (int)cudaFreeHost(p); }
......@@ -475,7 +483,11 @@ class Worker final {
ChannelStatus status = work_queue_.Receive(&work);
if (status == ChannelStatus::kChannelStatusErrorClosed) { break; }
CHECK_EQ(status, ChannelStatus::kChannelStatusSuccess);
LOG(ERROR)<<"start warmup ";
LOG(ERROR)<<"warmup_size "<<warmup_size;
LOG(ERROR)<<"work->workspace_size "<<work->workspace_size;
handle->WarmupOnce(warmup_size, work->workspace, work->workspace_size);
LOG(ERROR)<<"warmup done ";
while (true) {
const int task_id = work->task_counter->fetch_add(1, std::memory_order_relaxed);
if (task_id >= work->tasks->size()) { break; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册