提交 8abd1634 编写于 作者: V Vadim Pisarevsky

Merge pull request #8404 from khnaba:stream-with-custom-allocator

...@@ -507,6 +507,9 @@ public: ...@@ -507,6 +507,9 @@ public:
//! creates a new asynchronous stream //! creates a new asynchronous stream
Stream(); Stream();
//! creates a new asynchronous stream with custom allocator
Stream(const Ptr<GpuMat::Allocator>& allocator);
/** @brief Returns true if the current stream queue is finished. Otherwise, it returns false. /** @brief Returns true if the current stream queue is finished. Otherwise, it returns false.
*/ */
bool queryIfComplete() const; bool queryIfComplete() const;
......
...@@ -282,9 +282,10 @@ public: ...@@ -282,9 +282,10 @@ public:
cudaStream_t stream; cudaStream_t stream;
bool ownStream; bool ownStream;
Ptr<StackAllocator> stackAllocator; Ptr<GpuMat::Allocator> allocator;
Impl(); Impl();
Impl(const Ptr<GpuMat::Allocator>& allocator);
explicit Impl(cudaStream_t stream); explicit Impl(cudaStream_t stream);
~Impl(); ~Impl();
...@@ -295,17 +296,23 @@ cv::cuda::Stream::Impl::Impl() : stream(0), ownStream(false) ...@@ -295,17 +296,23 @@ cv::cuda::Stream::Impl::Impl() : stream(0), ownStream(false)
cudaSafeCall( cudaStreamCreate(&stream) ); cudaSafeCall( cudaStreamCreate(&stream) );
ownStream = true; ownStream = true;
stackAllocator = makePtr<StackAllocator>(stream); allocator = makePtr<StackAllocator>(stream);
}
cv::cuda::Stream::Impl::Impl(const Ptr<GpuMat::Allocator>& allocator) : stream(0), ownStream(false), allocator(allocator)
{
cudaSafeCall( cudaStreamCreate(&stream) );
ownStream = true;
} }
cv::cuda::Stream::Impl::Impl(cudaStream_t stream_) : stream(stream_), ownStream(false) cv::cuda::Stream::Impl::Impl(cudaStream_t stream_) : stream(stream_), ownStream(false)
{ {
stackAllocator = makePtr<StackAllocator>(stream); allocator = makePtr<StackAllocator>(stream);
} }
cv::cuda::Stream::Impl::~Impl() cv::cuda::Stream::Impl::~Impl()
{ {
stackAllocator.release(); allocator.release();
if (stream && ownStream) if (stream && ownStream)
{ {
...@@ -417,6 +424,16 @@ cv::cuda::Stream::Stream() ...@@ -417,6 +424,16 @@ cv::cuda::Stream::Stream()
#endif #endif
} }
cv::cuda::Stream::Stream(const Ptr<GpuMat::Allocator>& allocator)
{
#ifndef HAVE_CUDA
(void) allocator;
throw_no_cuda();
#else
impl_ = makePtr<Impl>(allocator);
#endif
}
bool cv::cuda::Stream::queryIfComplete() const bool cv::cuda::Stream::queryIfComplete() const
{ {
#ifndef HAVE_CUDA #ifndef HAVE_CUDA
...@@ -675,7 +692,7 @@ cv::cuda::BufferPool::BufferPool(Stream& stream) ...@@ -675,7 +692,7 @@ cv::cuda::BufferPool::BufferPool(Stream& stream)
throw_no_cuda(); throw_no_cuda();
} }
#else #else
cv::cuda::BufferPool::BufferPool(Stream& stream) : allocator_(stream.impl_->stackAllocator) cv::cuda::BufferPool::BufferPool(Stream& stream) : allocator_(stream.impl_->allocator)
{ {
} }
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册