提交 9ec96597 编写于 作者: V Vladislav Vinogradov

gpu version of GMG Background Subtractor

上级 0ceb9b6a
......@@ -2127,6 +2127,71 @@ private:
GpuMat samples_;
};
/**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background.
* This class implements an algorithm described in "Visual Tracking of Human Visitors under
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*/
class CV_EXPORTS GMG_GPU
{
public:
GMG_GPU();
/**
* Validate parameters and set up data structures for appropriate frame size.
* @param frameSize Input frame size
* @param min Minimum value taken on by pixels in image sequence. Usually 0
* @param max Maximum value taken on by pixels in image sequence. e.g. 1.0 or 255
*/
void initialize(Size frameSize, float min = 0.0f, float max = 255.0f);
/**
* Performs single-frame background subtraction and builds up a statistical background image
* model.
* @param frame Input frame
* @param fgmask Output mask image representing foreground and background pixels
* @param stream Stream for the asynchronous version
*/
void operator ()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null());
//! Total number of distinct colors to maintain in histogram.
int maxFeatures;
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
float learningRate;
//! Number of frames of video to use to initialize histograms.
int numInitializationFrames;
//! Number of discrete levels in each channel to be used in histograms.
int quantizationLevels;
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
float backgroundPrior;
//! value above which pixel is determined to be FG.
float decisionThreshold;
//! smoothing radius, in pixels, for cleaning up FG image.
int smoothingRadius;
private:
float maxVal_, minVal_;
Size frameSize_;
int frameNum_;
GpuMat nfeatures_;
GpuMat colors_;
GpuMat weights_;
Ptr<FilterEngine_GPU> boxFilter_;
GpuMat buf_;
};
////////////////////////////////// Video Encoding //////////////////////////////////
// Works only under Windows
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifndef HAVE_CUDA
cv::gpu::GMG_GPU::GMG_GPU() { throw_nogpu(); }
void cv::gpu::GMG_GPU::initialize(cv::Size, float, float) { throw_nogpu(); }
void cv::gpu::GMG_GPU::operator ()(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, float, cv::gpu::Stream&) { throw_nogpu(); }
#else
namespace cv { namespace gpu { namespace device {
namespace bgfg_gmg
{
void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior,
float decisionThreshold, int maxFeatures, int numInitializationFrames);
template <typename SrcT>
void update_gpu(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
}
}}}
cv::gpu::GMG_GPU::GMG_GPU()
{
maxFeatures = 64;
learningRate = 0.025f;
numInitializationFrames = 120;
quantizationLevels = 16;
backgroundPrior = 0.8f;
decisionThreshold = 0.8f;
smoothingRadius = 7;
}
void cv::gpu::GMG_GPU::initialize(cv::Size frameSize, float min, float max)
{
using namespace cv::gpu::device::bgfg_gmg;
CV_Assert(min < max);
CV_Assert(maxFeatures > 0);
CV_Assert(learningRate >= 0.0f && learningRate <= 1.0f);
CV_Assert(numInitializationFrames >= 1);
CV_Assert(quantizationLevels >= 1 && quantizationLevels <= 255);
CV_Assert(backgroundPrior >= 0.0f && backgroundPrior <= 1.0f);
minVal_ = min;
maxVal_ = max;
frameSize_ = frameSize;
frameNum_ = 0;
nfeatures_.create(frameSize_, CV_32SC1);
colors_.create(maxFeatures * frameSize_.height, frameSize_.width, CV_32SC1);
weights_.create(maxFeatures * frameSize_.height, frameSize_.width, CV_32FC1);
nfeatures_.setTo(cv::Scalar::all(0));
boxFilter_ = cv::gpu::createBoxFilter_GPU(CV_8UC1, CV_8UC1, cv::Size(smoothingRadius, smoothingRadius));
loadConstants(frameSize_.width, frameSize_.height, minVal_, maxVal_, quantizationLevels, backgroundPrior, decisionThreshold, maxFeatures, numInitializationFrames);
}
void cv::gpu::GMG_GPU::operator ()(const cv::gpu::GpuMat& frame, cv::gpu::GpuMat& fgmask, float newLearningRate, cv::gpu::Stream& stream)
{
using namespace cv::gpu::device::bgfg_gmg;
typedef void (*func_t)(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures,
int frameNum, float learningRate, cudaStream_t stream);
static const func_t funcs[6][4] =
{
{update_gpu<uchar>, 0, update_gpu<uchar3>, update_gpu<uchar4>},
{0,0,0,0},
{update_gpu<ushort>, 0, update_gpu<ushort3>, update_gpu<ushort4>},
{0,0,0,0},
{0,0,0,0},
{update_gpu<float>, 0, update_gpu<float3>, update_gpu<float4>}
};
CV_Assert(frame.depth() == CV_8U || frame.depth() == CV_16U || frame.depth() == CV_32F);
CV_Assert(frame.channels() == 1 || frame.channels() == 3 || frame.channels() == 4);
if (newLearningRate != -1.0f)
{
CV_Assert(newLearningRate >= 0.0f && newLearningRate <= 1.0f);
learningRate = newLearningRate;
}
if (frame.size() != frameSize_)
initialize(frame.size(), 0.0f, frame.depth() == CV_8U ? 255.0f : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0f);
fgmask.create(frameSize_, CV_8UC1);
funcs[frame.depth()][frame.channels() - 1](frame, fgmask, colors_, weights_, nfeatures_, frameNum_, learningRate, cv::gpu::StreamAccessor::getStream(stream));
// medianBlur
boxFilter_->apply(fgmask, buf_, cv::Rect(0,0,-1,-1), stream);
int minCount = (smoothingRadius * smoothingRadius + 1) / 2;
double thresh = 255.0 * minCount / (smoothingRadius * smoothingRadius);
cv::gpu::threshold(buf_, fgmask, thresh, 255.0, cv::THRESH_BINARY, stream);
// keep track of how many frames we have processed
++frameNum_;
}
#endif
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/limits.hpp"
namespace cv { namespace gpu { namespace device {
namespace bgfg_gmg
{
__constant__ int c_width;
__constant__ int c_height;
__constant__ float c_minVal;
__constant__ float c_maxVal;
__constant__ int c_quantizationLevels;
__constant__ float c_backgroundPrior;
__constant__ float c_decisionThreshold;
__constant__ int c_maxFeatures;
__constant__ int c_numInitializationFrames;
void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior,
float decisionThreshold, int maxFeatures, int numInitializationFrames)
{
cudaSafeCall( cudaMemcpyToSymbol(c_width, &width, sizeof(width)) );
cudaSafeCall( cudaMemcpyToSymbol(c_height, &height, sizeof(height)) );
cudaSafeCall( cudaMemcpyToSymbol(c_minVal, &minVal, sizeof(minVal)) );
cudaSafeCall( cudaMemcpyToSymbol(c_maxVal, &maxVal, sizeof(maxVal)) );
cudaSafeCall( cudaMemcpyToSymbol(c_quantizationLevels, &quantizationLevels, sizeof(quantizationLevels)) );
cudaSafeCall( cudaMemcpyToSymbol(c_backgroundPrior, &backgroundPrior, sizeof(backgroundPrior)) );
cudaSafeCall( cudaMemcpyToSymbol(c_decisionThreshold, &decisionThreshold, sizeof(decisionThreshold)) );
cudaSafeCall( cudaMemcpyToSymbol(c_maxFeatures, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( cudaMemcpyToSymbol(c_numInitializationFrames, &numInitializationFrames, sizeof(numInitializationFrames)) );
}
__device__ float findFeature(const int color, const PtrStepi& colors, const PtrStepf& weights, const int x, const int y, const int nfeatures)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
{
if (color == colors(fy, x))
return weights(fy, x);
}
// not in histogram, so return 0.
return 0.0f;
}
__device__ void normalizeHistogram(PtrStepf weights, const int x, const int y, const int nfeatures)
{
float total = 0.0f;
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
total += weights(fy, x);
if (total != 0.0f)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
weights(fy, x) /= total;
}
}
__device__ bool insertFeature(const int color, const float weight, PtrStepi colors, PtrStepf weights, const int x, const int y, int& nfeatures)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
{
if (color == colors(fy, x))
{
// feature in histogram
weights(fy, x) += weight;
return false;
}
}
if (nfeatures == c_maxFeatures)
{
// discard oldest feature
int idx = -1;
float minVal = numeric_limits<float>::max();
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
{
const float w = weights(fy, x);
if (w < minVal)
{
minVal = w;
idx = fy;
}
}
colors(idx, x) = color;
weights(idx, x) = weight;
return false;
}
colors(nfeatures * c_height + y, x) = color;
weights(nfeatures * c_height + y, x) = weight;
++nfeatures;
return true;
}
namespace detail
{
template <int cn> struct Quantization
{
template <typename T>
__device__ static int apply(const T& val)
{
int res = 0;
res |= static_cast<int>((val.x - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal));
res |= static_cast<int>((val.y - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 8;
res |= static_cast<int>((val.z - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 16;
return res;
}
};
template <> struct Quantization<1>
{
template <typename T>
__device__ static int apply(T val)
{
return static_cast<int>((val - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal));
}
};
}
template <typename T> struct Quantization : detail::Quantization<VecTraits<T>::cn> {};
template <typename SrcT>
__global__ void update(const PtrStep_<SrcT> frame, PtrStepb fgmask, PtrStepi colors_, PtrStepf weights_, PtrStepi nfeatures_, const int frameNum, const float learningRate)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= c_width || y >= c_height)
return;
const SrcT pix = frame(y, x);
const int newFeatureColor = Quantization<SrcT>::apply(pix);
int nfeatures = nfeatures_(y, x);
bool isForeground = false;
if (frameNum > c_numInitializationFrames)
{
// typical operation
const float weight = findFeature(newFeatureColor, colors_, weights_, x, y, nfeatures);
// see Godbehere, Matsukawa, Goldberg (2012) for reasoning behind this implementation of Bayes rule
const float posterior = (weight * c_backgroundPrior) / (weight * c_backgroundPrior + (1.0f - weight) * (1.0f - c_backgroundPrior));
isForeground = ((1.0f - posterior) > c_decisionThreshold);
}
fgmask(y, x) = (uchar)(-isForeground);
if (frameNum <= c_numInitializationFrames + 1)
{
// training-mode update
insertFeature(newFeatureColor, 1.0f, colors_, weights_, x, y, nfeatures);
if (frameNum == c_numInitializationFrames + 1)
normalizeHistogram(weights_, x, y, nfeatures);
}
else
{
// update histogram.
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
weights_(fy, x) *= 1.0f - learningRate;
bool inserted = insertFeature(newFeatureColor, learningRate, colors_, weights_, x, y, nfeatures);
if (inserted)
{
normalizeHistogram(weights_, x, y, nfeatures);
nfeatures_(y, x) = nfeatures;
}
}
}
template <typename SrcT>
void update_gpu(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(update<SrcT>, cudaFuncCachePreferL1) );
update<SrcT><<<grid, block, 0, stream>>>((DevMem2D_<SrcT>) frame, fgmask, colors, weights, nfeatures, frameNum, learningRate);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void update_gpu<uchar >(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
template void update_gpu<uchar3 >(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
template void update_gpu<uchar4 >(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
template void update_gpu<ushort >(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
template void update_gpu<ushort3>(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
template void update_gpu<ushort4>(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
template void update_gpu<float >(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
template void update_gpu<float3 >(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
template void update_gpu<float4 >(DevMem2Db frame, PtrStepb fgmask, DevMem2Di colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, cudaStream_t stream);
}
}}}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册