提交 487ff4f3 编写于 作者: A Andrey Pavlenko 提交者: OpenCV Buildbot

Merge pull request #1151 from jet47:gpubgsegm-refactoring

......@@ -6,4 +6,4 @@ set(the_description "GPU-accelerated Background Segmentation")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations)
ocv_define_module(gpubgsegm opencv_video opencv_imgproc opencv_legacy opencv_gpuarithm opencv_gpufilters opencv_gpuimgproc)
ocv_define_module(gpubgsegm opencv_video OPTIONAL opencv_legacy opencv_imgproc opencv_gpuarithm opencv_gpufilters opencv_gpuimgproc)
......@@ -5,128 +5,11 @@ Background Segmentation
gpu::FGDStatModel
-----------------
.. ocv:class:: gpu::FGDStatModel
gpu::BackgroundSubtractorMOG
----------------------------
Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
Class used for background/foreground segmentation. ::
class FGDStatModel
{
public:
struct Params
{
...
};
explicit FGDStatModel(int out_cn = 3);
explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3);
~FGDStatModel();
void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params());
void release();
int update(const cv::gpu::GpuMat& curFrame);
//8UC3 or 8UC4 reference background image
cv::gpu::GpuMat background;
//8UC1 foreground image
cv::gpu::GpuMat foreground;
std::vector< std::vector<cv::Point> > foreground_regions;
};
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [FGD2003]_.
The results are available through the class fields:
.. ocv:member:: cv::gpu::GpuMat background
The output background image.
.. ocv:member:: cv::gpu::GpuMat foreground
The output foreground mask as an 8-bit binary image.
.. ocv:member:: cv::gpu::GpuMat foreground_regions
The output foreground regions calculated by :ocv:func:`findContours`.
gpu::FGDStatModel::FGDStatModel
-------------------------------
Constructors.
.. ocv:function:: gpu::FGDStatModel::FGDStatModel(int out_cn = 3)
.. ocv:function:: gpu::FGDStatModel::FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3)
:param firstFrame: First frame from video stream. Supports 3- and 4-channels input ( ``CV_8UC3`` and ``CV_8UC4`` ).
:param params: Algorithm's parameters. See [FGD2003]_ for explanation.
:param out_cn: Channels count in output result and inner buffers. Can be 3 or 4. 4-channels version requires more memory, but works a bit faster.
.. seealso:: :ocv:func:`gpu::FGDStatModel::create`
gpu::FGDStatModel::create
-------------------------
Initializes background model.
.. ocv:function:: void gpu::FGDStatModel::create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params())
:param firstFrame: First frame from video stream. Supports 3- and 4-channels input ( ``CV_8UC3`` and ``CV_8UC4`` ).
:param params: Algorithm's parameters. See [FGD2003]_ for explanation.
gpu::FGDStatModel::release
--------------------------
Releases all inner buffer's memory.
.. ocv:function:: void gpu::FGDStatModel::release()
gpu::FGDStatModel::update
--------------------------
Updates the background model and returns foreground regions count.
.. ocv:function:: int gpu::FGDStatModel::update(const cv::gpu::GpuMat& curFrame)
:param curFrame: Next video frame.
gpu::MOG_GPU
------------
.. ocv:class:: gpu::MOG_GPU
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm. ::
class MOG_GPU
{
public:
MOG_GPU(int nmixtures = -1);
void initialize(Size frameSize, int frameType);
void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null());
void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
void release();
int history;
float varThreshold;
float backgroundRatio;
float noiseSigma;
};
.. ocv:class:: gpu::BackgroundSubtractorMOG : public cv::BackgroundSubtractorMOG
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [MOG2001]_.
......@@ -134,275 +17,108 @@ The class discriminates between foreground and background pixels by building and
gpu::MOG_GPU::MOG_GPU
---------------------
The constructor.
.. ocv:function:: gpu::MOG_GPU::MOG_GPU(int nmixtures = -1)
:param nmixtures: Number of Gaussian mixtures.
Default constructor sets all parameters to default values.
gpu::MOG_GPU::operator()
------------------------
Updates the background model and returns the foreground mask.
.. ocv:function:: void gpu::MOG_GPU::operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null())
:param frame: Next video frame.
:param fgmask: The output foreground mask as an 8-bit binary image.
:param stream: Stream for the asynchronous version.
gpu::MOG_GPU::getBackgroundImage
--------------------------------
Computes a background image.
gpu::createBackgroundSubtractorMOG
----------------------------------
Creates mixture-of-gaussian background subtractor
.. ocv:function:: void gpu::MOG_GPU::getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const
.. ocv:function:: Ptr<gpu::BackgroundSubtractorMOG> gpu::createBackgroundSubtractorMOG(int history=200, int nmixtures=5, double backgroundRatio=0.7, double noiseSigma=0)
:param backgroundImage: The output background image.
:param history: Length of the history.
:param stream: Stream for the asynchronous version.
gpu::MOG_GPU::release
---------------------
Releases all inner buffer's memory.
.. ocv:function:: void gpu::MOG_GPU::release()
gpu::MOG2_GPU
-------------
.. ocv:class:: gpu::MOG2_GPU
Gaussian Mixture-based Background/Foreground Segmentation Algorithm. ::
class MOG2_GPU
{
public:
MOG2_GPU(int nmixtures = -1);
void initialize(Size frameSize, int frameType);
void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null());
void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
void release();
// parameters
...
};
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [MOG2004]_.
Here are important members of the class that control the algorithm, which you can set after constructing the class instance:
.. ocv:member:: float backgroundRatio
Threshold defining whether the component is significant enough to be included into the background model ( corresponds to ``TB=1-cf`` from the paper??which paper??). ``cf=0.1 => TB=0.9`` is default. For ``alpha=0.001``, it means that the mode should exist for approximately 105 frames before it is considered foreground.
.. ocv:member:: float varThreshold
Threshold for the squared Mahalanobis distance that helps decide when a sample is close to the existing components (corresponds to ``Tg``). If it is not close to any component, a new component is generated. ``3 sigma => Tg=3*3=9`` is default. A smaller ``Tg`` value generates more components. A higher ``Tg`` value may result in a small number of components but they can grow too large.
.. ocv:member:: float fVarInit
Initial variance for the newly generated components. It affects the speed of adaptation. The parameter value is based on your estimate of the typical standard deviation from the images. OpenCV uses 15 as a reasonable value.
.. ocv:member:: float fVarMin
Parameter used to further control the variance.
.. ocv:member:: float fVarMax
Parameter used to further control the variance.
.. ocv:member:: float fCT
:param nmixtures: Number of Gaussian mixtures.
Complexity reduction parameter. This parameter defines the number of samples needed to accept to prove the component exists. ``CT=0.05`` is a default value for all the samples. By setting ``CT=0`` you get an algorithm very similar to the standard Stauffer&Grimson algorithm.
:param backgroundRatio: Background ratio.
.. ocv:member:: uchar nShadowDetection
:param noiseSigma: Noise strength (standard deviation of the brightness or each color channel). 0 means some automatic value.
The value for marking shadow pixels in the output foreground mask. Default value is 127.
.. ocv:member:: float fTau
Shadow threshold. The shadow is detected if the pixel is a darker version of the background. ``Tau`` is a threshold defining how much darker the shadow can be. ``Tau= 0.5`` means that if a pixel is more than twice darker then it is not shadow. See [ShadowDetect2003]_.
gpu::BackgroundSubtractorMOG2
-----------------------------
Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
.. ocv:member:: bool bShadowDetection
.. ocv:class:: gpu::BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
Parameter defining whether shadow detection should be enabled.
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [MOG2004]_.
.. seealso:: :ocv:class:`BackgroundSubtractorMOG2`
gpu::MOG2_GPU::MOG2_GPU
-----------------------
The constructor.
.. ocv:function:: gpu::MOG2_GPU::MOG2_GPU(int nmixtures = -1)
:param nmixtures: Number of Gaussian mixtures.
Default constructor sets all parameters to default values.
gpu::createBackgroundSubtractorMOG2
-----------------------------------
Creates MOG2 Background Subtractor
.. ocv:function:: Ptr<gpu::BackgroundSubtractorMOG2> gpu::createBackgroundSubtractorMOG2( int history=500, double varThreshold=16, bool detectShadows=true )
gpu::MOG2_GPU::operator()
-------------------------
Updates the background model and returns the foreground mask.
:param history: Length of the history.
.. ocv:function:: void gpu::MOG2_GPU::operator()( const GpuMat& frame, GpuMat& fgmask, float learningRate=-1.0f, Stream& stream=Stream::Null() )
:param varThreshold: Threshold on the squared Mahalanobis distance between the pixel and the model to decide whether a pixel is well described by the background model. This parameter does not affect the background update.
:param frame: Next video frame.
:param detectShadows: If true, the algorithm will detect shadows and mark them. It decreases the speed a bit, so if you do not need this feature, set the parameter to false.
:param fgmask: The output foreground mask as an 8-bit binary image.
:param stream: Stream for the asynchronous version.
gpu::BackgroundSubtractorGMG
----------------------------
Background/Foreground Segmentation Algorithm.
.. ocv:class:: gpu::BackgroundSubtractorGMG : public cv::BackgroundSubtractorGMG
gpu::MOG2_GPU::getBackgroundImage
---------------------------------
Computes a background image.
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [GMG2012]_.
.. ocv:function:: void gpu::MOG2_GPU::getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const
:param backgroundImage: The output background image.
:param stream: Stream for the asynchronous version.
gpu::createBackgroundSubtractorGMG
----------------------------------
Creates GMG Background Subtractor
.. ocv:function:: Ptr<gpu::BackgroundSubtractorGMG> gpu::createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8)
:param initializationFrames: Number of frames of video to use to initialize histograms.
gpu::MOG2_GPU::release
----------------------
Releases all inner buffer's memory.
:param decisionThreshold: Value above which pixel is determined to be FG.
.. ocv:function:: void gpu::MOG2_GPU::release()
gpu::BackgroundSubtractorFGD
----------------------------
gpu::GMG_GPU
------------
.. ocv:class:: gpu::GMG_GPU
.. ocv:class:: gpu::BackgroundSubtractorFGD : public cv::BackgroundSubtractor
Class used for background/foreground segmentation. ::
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [FGD2003]_. ::
class GMG_GPU_GPU
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
{
public:
GMG_GPU();
void initialize(Size frameSize, float min = 0.0f, float max = 255.0f);
void operator ()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null());
void release();
int maxFeatures;
float learningRate;
int numInitializationFrames;
int quantizationLevels;
float backgroundPrior;
float decisionThreshold;
int smoothingRadius;
...
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [GMG2012]_.
Here are important members of the class that control the algorithm, which you can set after constructing the class instance:
.. ocv:member:: int maxFeatures
Total number of distinct colors to maintain in histogram.
.. ocv:member:: float learningRate
Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
.. ocv:member:: int numInitializationFrames
Number of frames of video to use to initialize histograms.
.. ocv:member:: int quantizationLevels
Number of discrete levels in each channel to be used in histograms.
.. ocv:member:: float backgroundPrior
Prior probability that any given pixel is a background pixel. A sensitivity parameter.
.. ocv:member:: float decisionThreshold
.. seealso:: :ocv:class:`BackgroundSubtractor`
Value above which pixel is determined to be FG.
.. ocv:member:: float smoothingRadius
Smoothing radius, in pixels, for cleaning up FG image.
gpu::BackgroundSubtractorFGD::getForegroundRegions
--------------------------------------------------
Returns the output foreground regions calculated by :ocv:func:`findContours`.
.. ocv:function:: void gpu::BackgroundSubtractorFGD::getForegroundRegions(OutputArrayOfArrays foreground_regions)
:params foreground_regions: Output array (CPU memory).
gpu::GMG_GPU::GMG_GPU
---------------------
The default constructor.
.. ocv:function:: gpu::GMG_GPU::GMG_GPU()
Default constructor sets all parameters to default values.
gpu::createBackgroundSubtractorFGD
----------------------------------
Creates FGD Background Subtractor
.. ocv:function:: Ptr<gpu::BackgroundSubtractorGMG> gpu::createBackgroundSubtractorFGD(const FGDParams& params = FGDParams())
gpu::GMG_GPU::initialize
------------------------
Initialize background model and allocates all inner buffers.
.. ocv:function:: void gpu::GMG_GPU::initialize(Size frameSize, float min = 0.0f, float max = 255.0f)
:param frameSize: Input frame size.
:param min: Minimum value taken on by pixels in image sequence. Usually 0.
:param max: Maximum value taken on by pixels in image sequence, e.g. 1.0 or 255.
gpu::GMG_GPU::operator()
------------------------
Updates the background model and returns the foreground mask
.. ocv:function:: void gpu::GMG_GPU::operator ()( const GpuMat& frame, GpuMat& fgmask, float learningRate=-1.0f, Stream& stream=Stream::Null() )
:param frame: Next video frame.
:param fgmask: The output foreground mask as an 8-bit binary image.
:param stream: Stream for the asynchronous version.
gpu::GMG_GPU::release
---------------------
Releases all inner buffer's memory.
.. ocv:function:: void gpu::GMG_GPU::release()
:param params: Algorithm's parameters. See [FGD2003]_ for explanation.
.. [FGD2003] Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian. *Foreground Object Detection from Videos Containing Complex Background*. ACM MM2003 9p, 2003.
.. [MOG2001] P. KadewTraKuPong and R. Bowden. *An improved adaptive background mixture model for real-time tracking with shadow detection*. Proc. 2nd European Workshop on Advanced Video-Based Surveillance Systems, 2001
.. [MOG2004] Z. Zivkovic. *Improved adaptive Gausian mixture model for background subtraction*. International Conference Pattern Recognition, UK, August, 2004
.. [ShadowDetect2003] Prati, Mikic, Trivedi and Cucchiarra. *Detecting Moving Shadows...*. IEEE PAMI, 2003
.. [GMG2012] A. Godbehere, A. Matsukawa and K. Goldberg. *Visual Tracking of Human Visitors under Variable-Lighting Conditions for a Responsive Audio Art Installation*. American Control Conference, Montreal, June 2012
......@@ -47,284 +47,106 @@
# error gpubgsegm.hpp header must be compiled as C++
#endif
#include <memory>
#include "opencv2/core/gpu.hpp"
#include "opencv2/gpufilters.hpp"
#include "opencv2/video/background_segm.hpp"
namespace cv { namespace gpu {
// Foreground Object Detection from Videos Containing Complex Background.
// Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
// ACM MM2003 9p
class CV_EXPORTS FGDStatModel
////////////////////////////////////////////////////
// MOG
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractorMOG
{
public:
struct CV_EXPORTS Params
{
int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
// Used to allow the first N1c vectors to adapt over time to changing background.
int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
// Used to allow the first N1cc vectors to adapt over time to changing background.
bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
// These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
float delta; // Affects color and color co-occurrence quantization, typically set to 2.
float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
// default Params
Params();
};
// out_cn - channels count in output result (can be 3 or 4)
// 4-channels require more memory, but a bit faster
explicit FGDStatModel(int out_cn = 3);
explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3);
~FGDStatModel();
void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params());
void release();
int update(const cv::gpu::GpuMat& curFrame);
//8UC3 or 8UC4 reference background image
cv::gpu::GpuMat background;
//8UC1 foreground image
cv::gpu::GpuMat foreground;
std::vector< std::vector<cv::Point> > foreground_regions;
using cv::BackgroundSubtractorMOG::apply;
using cv::BackgroundSubtractorMOG::getBackgroundImage;
private:
FGDStatModel(const FGDStatModel&);
FGDStatModel& operator=(const FGDStatModel&);
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
class Impl;
std::auto_ptr<Impl> impl_;
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
};
/*!
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
CV_EXPORTS Ptr<gpu::BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history = 200, int nmixtures = 5,
double backgroundRatio = 0.7, double noiseSigma = 0);
The class implements the following algorithm:
"An improved adaptive background mixture model for real-time tracking with shadow detection"
P. KadewTraKuPong and R. Bowden,
Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
*/
class CV_EXPORTS MOG_GPU
////////////////////////////////////////////////////
// MOG2
class CV_EXPORTS BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
{
public:
//! the default constructor
MOG_GPU(int nmixtures = -1);
//! re-initiaization method
void initialize(Size frameSize, int frameType);
//! the update operator
void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null());
//! computes a background image which are the mean of all background gaussians
void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
using cv::BackgroundSubtractorMOG2::apply;
using cv::BackgroundSubtractorMOG2::getBackgroundImage;
//! releases all inner buffers
void release();
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
int history;
float varThreshold;
float backgroundRatio;
float noiseSigma;
private:
int nmixtures_;
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
};
Size frameSize_;
int frameType_;
int nframes_;
CV_EXPORTS Ptr<gpu::BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16,
bool detectShadows = true);
GpuMat weight_;
GpuMat sortKey_;
GpuMat mean_;
GpuMat var_;
};
////////////////////////////////////////////////////
// GMG
/*!
The class implements the following algorithm:
"Improved adaptive Gausian mixture model for background subtraction"
Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004.
http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
*/
class CV_EXPORTS MOG2_GPU
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractorGMG
{
public:
//! the default constructor
MOG2_GPU(int nmixtures = -1);
//! re-initiaization method
void initialize(Size frameSize, int frameType);
//! the update operator
void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null());
//! computes a background image which are the mean of all background gaussians
void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
//! releases all inner buffers
void release();
// parameters
// you should call initialize after parameters changes
int history;
using cv::BackgroundSubtractorGMG::apply;
//! here it is the maximum allowed number of mixture components.
//! Actual number is determined dynamically per pixel
float varThreshold;
// threshold on the squared Mahalanobis distance to decide if it is well described
// by the background model or not. Related to Cthr from the paper.
// This does not influence the update of the background. A typical value could be 4 sigma
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
/////////////////////////
// less important parameters - things you might change but be carefull
////////////////////////
float backgroundRatio;
// corresponds to fTB=1-cf from the paper
// TB - threshold when the component becomes significant enough to be included into
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
// it is considered foreground
// float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
//to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
//Smaller Tg leads to more generated components and higher Tg might make
//lead to small number of components but they can grow too large
float fVarInit;
float fVarMin;
float fVarMax;
//initial variance for the newly generated components.
//It will will influence the speed of adaptation. A good guess should be made.
//A simple way is to estimate the typical standard deviation from the images.
//I used here 10 as a reasonable value
// min and max can be used to further control the variance
float fCT; //CT - complexity reduction prior
//this is related to the number of samples needed to accept that a component
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
//shadow detection parameters
bool bShadowDetection; //default 1 - do shadow detection
unsigned char nShadowDetection; //do shadow detection - insert this value as the detection result - 127 default value
float fTau;
// Tau - shadow threshold. The shadow is detected if the pixel is darker
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
private:
int nmixtures_;
Size frameSize_;
int frameType_;
int nframes_;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
};
GpuMat weight_;
GpuMat variance_;
GpuMat mean_;
CV_EXPORTS Ptr<gpu::BackgroundSubtractorGMG>
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
GpuMat bgmodelUsedModes_; //keep track of number of modes per pixel
};
////////////////////////////////////////////////////
// FGD
/**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background.
* This class implements an algorithm described in "Visual Tracking of Human Visitors under
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
* Foreground Object Detection from Videos Containing Complex Background.
* Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
* ACM MM2003 9p
*/
class CV_EXPORTS GMG_GPU
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
{
public:
GMG_GPU();
/**
* Validate parameters and set up data structures for appropriate frame size.
* @param frameSize Input frame size
* @param min Minimum value taken on by pixels in image sequence. Usually 0
* @param max Maximum value taken on by pixels in image sequence. e.g. 1.0 or 255
*/
void initialize(Size frameSize, float min = 0.0f, float max = 255.0f);
/**
* Performs single-frame background subtraction and builds up a statistical background image
* model.
* @param frame Input frame
* @param fgmask Output mask image representing foreground and background pixels
* @param stream Stream for the asynchronous version
*/
void operator ()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null());
//! Releases all inner buffers
void release();
//! Total number of distinct colors to maintain in histogram.
int maxFeatures;
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
float learningRate;
//! Number of frames of video to use to initialize histograms.
int numInitializationFrames;
//! Number of discrete levels in each channel to be used in histograms.
int quantizationLevels;
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
float backgroundPrior;
//! Value above which pixel is determined to be FG.
float decisionThreshold;
//! Smoothing radius, in pixels, for cleaning up FG image.
int smoothingRadius;
//! Perform background model update.
bool updateBackgroundModel;
private:
float maxVal_, minVal_;
Size frameSize_;
int frameNum_;
GpuMat nfeatures_;
GpuMat colors_;
GpuMat weights_;
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};
Ptr<gpu::Filter> boxFilter_;
GpuMat buf_;
struct CV_EXPORTS FGDParams
{
int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
// Used to allow the first N1c vectors to adapt over time to changing background.
int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
// Used to allow the first N1cc vectors to adapt over time to changing background.
bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
// These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
float delta; // Affects color and color co-occurrence quantization, typically set to 2.
float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
// default Params
FGDParams();
};
CV_EXPORTS Ptr<gpu::BackgroundSubtractorFGD>
createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
}} // namespace cv { namespace gpu {
#endif /* __OPENCV_GPUBGSEGM_HPP__ */
......@@ -41,7 +41,14 @@
//M*/
#include "perf_precomp.hpp"
#include "opencv2/legacy.hpp"
#ifdef HAVE_OPENCV_LEGACY
# include "opencv2/legacy.hpp"
#endif
#ifdef HAVE_OPENCV_GPUIMGPROC
# include "opencv2/gpuimgproc.hpp"
#endif
using namespace std;
using namespace testing;
......@@ -59,6 +66,13 @@ using namespace perf;
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
#ifdef HAVE_OPENCV_LEGACY
namespace cv
{
template<> void Ptr<CvBGStatModel>::delete_obj()
......@@ -67,10 +81,7 @@ namespace cv
}
}
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
#endif
DEF_PARAM_TEST_1(Video, string);
......@@ -90,10 +101,10 @@ PERF_TEST_P(Video, FGDStatModel,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::GpuMat d_frame(frame), foreground;
cv::gpu::FGDStatModel d_model(4);
d_model.create(d_frame);
cv::Ptr<cv::gpu::BackgroundSubtractorFGD> d_fgd = cv::gpu::createBackgroundSubtractorFGD();
d_fgd->apply(d_frame, foreground);
for (int i = 0; i < 10; ++i)
{
......@@ -103,18 +114,22 @@ PERF_TEST_P(Video, FGDStatModel,
d_frame.upload(frame);
startTimer(); next();
d_model.update(d_frame);
d_fgd->apply(d_frame, foreground);
stopTimer();
}
const cv::gpu::GpuMat background = d_model.background;
const cv::gpu::GpuMat foreground = d_model.foreground;
GPU_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
#ifdef HAVE_OPENCV_GPUIMGPROC
cv::gpu::GpuMat background3, background;
d_fgd->getBackgroundImage(background3);
cv::gpu::cvtColor(background3, background, cv::COLOR_BGR2BGRA);
GPU_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
GPU_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
#endif
}
else
{
#ifdef HAVE_OPENCV_LEGACY
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
......@@ -135,6 +150,9 @@ PERF_TEST_P(Video, FGDStatModel,
CPU_SANITY_CHECK(background);
CPU_SANITY_CHECK(foreground);
#else
FAIL_NO_CPU();
#endif
}
}
......@@ -176,11 +194,12 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
if (PERF_RUN_GPU())
{
cv::Ptr<cv::BackgroundSubtractor> d_mog = cv::gpu::createBackgroundSubtractorMOG();
cv::gpu::GpuMat d_frame(frame);
cv::gpu::MOG_GPU d_mog;
cv::gpu::GpuMat foreground;
d_mog(d_frame, foreground, learningRate);
d_mog->apply(d_frame, foreground, learningRate);
for (int i = 0; i < 10; ++i)
{
......@@ -200,7 +219,7 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
d_frame.upload(frame);
startTimer(); next();
d_mog(d_frame, foreground, learningRate);
d_mog->apply(d_frame, foreground, learningRate);
stopTimer();
}
......@@ -273,13 +292,13 @@ PERF_TEST_P(Video_Cn, MOG2,
if (PERF_RUN_GPU())
{
cv::gpu::MOG2_GPU d_mog2;
d_mog2.bShadowDetection = false;
cv::Ptr<cv::BackgroundSubtractorMOG2> d_mog2 = cv::gpu::createBackgroundSubtractorMOG2();
d_mog2->setDetectShadows(false);
cv::gpu::GpuMat d_frame(frame);
cv::gpu::GpuMat foreground;
d_mog2(d_frame, foreground);
d_mog2->apply(d_frame, foreground);
for (int i = 0; i < 10; ++i)
{
......@@ -299,7 +318,7 @@ PERF_TEST_P(Video_Cn, MOG2,
d_frame.upload(frame);
startTimer(); next();
d_mog2(d_frame, foreground);
d_mog2->apply(d_frame, foreground);
stopTimer();
}
......@@ -307,8 +326,8 @@ PERF_TEST_P(Video_Cn, MOG2,
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
mog2->set("detectShadows", false);
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = cv::createBackgroundSubtractorMOG2();
mog2->setDetectShadows(false);
cv::Mat foreground;
......@@ -359,8 +378,9 @@ PERF_TEST_P(Video_Cn, MOG2GetBackgroundImage,
if (PERF_RUN_GPU())
{
cv::Ptr<cv::BackgroundSubtractor> d_mog2 = cv::gpu::createBackgroundSubtractorMOG2();
cv::gpu::GpuMat d_frame;
cv::gpu::MOG2_GPU d_mog2;
cv::gpu::GpuMat d_foreground;
for (int i = 0; i < 10; ++i)
......@@ -380,12 +400,12 @@ PERF_TEST_P(Video_Cn, MOG2GetBackgroundImage,
d_frame.upload(frame);
d_mog2(d_frame, d_foreground);
d_mog2->apply(d_frame, d_foreground);
}
cv::gpu::GpuMat background;
TEST_CYCLE() d_mog2.getBackgroundImage(background);
TEST_CYCLE() d_mog2->getBackgroundImage(background);
GPU_SANITY_CHECK(background, 1);
}
......@@ -460,10 +480,10 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
cv::gpu::GpuMat d_frame(frame);
cv::gpu::GpuMat foreground;
cv::gpu::GMG_GPU d_gmg;
d_gmg.maxFeatures = maxFeatures;
cv::Ptr<cv::BackgroundSubtractorGMG> d_gmg = cv::gpu::createBackgroundSubtractorGMG();
d_gmg->setMaxFeatures(maxFeatures);
d_gmg(d_frame, foreground);
d_gmg->apply(d_frame, foreground);
for (int i = 0; i < 150; ++i)
{
......@@ -488,7 +508,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
d_frame.upload(frame);
startTimer(); next();
d_gmg(d_frame, foreground);
d_gmg->apply(d_frame, foreground);
stopTimer();
}
......@@ -499,9 +519,8 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
cv::Mat foreground;
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
cv::Ptr<cv::BackgroundSubtractor> gmg = cv::createBackgroundSubtractorGMG();
gmg->set("maxFeatures", maxFeatures);
//gmg.initialize(frame.size(), 0.0, 255.0);
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::createBackgroundSubtractorGMG();
gmg->setMaxFeatures(maxFeatures);
gmg->apply(frame, foreground);
......
......@@ -57,6 +57,8 @@
#include "opencv2/gpubgsegm.hpp"
#include "opencv2/video.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
......
......@@ -53,7 +53,7 @@
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace bgfg
namespace fgd
{
////////////////////////////////////////////////////////////////////////////
// calcDiffHistogram
......
......@@ -45,7 +45,7 @@
#include "opencv2/core/gpu_types.hpp"
namespace bgfg
namespace fgd
{
struct BGPixelStat
{
......
......@@ -47,7 +47,7 @@
#include "opencv2/core/cuda/limits.hpp"
namespace cv { namespace gpu { namespace cudev {
namespace bgfg_gmg
namespace gmg
{
__constant__ int c_width;
__constant__ int c_height;
......
......@@ -111,14 +111,6 @@ namespace cv { namespace gpu { namespace cudev
0.0f);
}
template <class Ptr2D>
__device__ __forceinline__ void swap(Ptr2D& ptr, int x, int y, int k, int rows)
{
typename Ptr2D::elem_type val = ptr(k * rows + y, x);
ptr(k * rows + y, x) = ptr((k + 1) * rows + y, x);
ptr((k + 1) * rows + y, x) = val;
}
///////////////////////////////////////////////////////////////
// MOG without learning
......@@ -426,337 +418,6 @@ namespace cv { namespace gpu { namespace cudev
funcs[cn](weight, mean, dst, nmixtures, backgroundRatio, stream);
}
///////////////////////////////////////////////////////////////
// MOG2
__constant__ int c_nmixtures;
__constant__ float c_Tb;
__constant__ float c_TB;
__constant__ float c_Tg;
__constant__ float c_varInit;
__constant__ float c_varMin;
__constant__ float c_varMax;
__constant__ float c_tau;
__constant__ unsigned char c_shadowVal;
void loadConstants(int nmixtures, float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal)
{
varMin = ::fminf(varMin, varMax);
varMax = ::fmaxf(varMin, varMax);
cudaSafeCall( cudaMemcpyToSymbol(c_nmixtures, &nmixtures, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_Tb, &Tb, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_TB, &TB, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_Tg, &Tg, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_varInit, &varInit, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_varMin, &varMin, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_varMax, &varMax, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_tau, &tau, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_shadowVal, &shadowVal, sizeof(unsigned char)) );
}
template <bool detectShadows, typename SrcT, typename WorkT>
__global__ void mog2(const PtrStepSz<SrcT> frame, PtrStepb fgmask, PtrStepb modesUsed,
PtrStepf gmm_weight, PtrStepf gmm_variance, PtrStep<WorkT> gmm_mean,
const float alphaT, const float alpha1, const float prune)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
WorkT pix = cvt(frame(y, x));
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false; // true - the pixel classified as background
//internal:
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
int nmodes = modesUsed(y, x);
int nNewModes = nmodes; //current number of modes in GMM
float totalWeight = 0.0f;
//go through all modes
for (int mode = 0; mode < nmodes; ++mode)
{
//need only weight if fit is found
float weight = alpha1 * gmm_weight(mode * frame.rows + y, x) + prune;
//fit not found yet
if (!fitsPDF)
{
//check if it belongs to some of the remaining modes
float var = gmm_variance(mode * frame.rows + y, x);
WorkT mean = gmm_mean(mode * frame.rows + y, x);
//calculate difference and distance
WorkT diff = mean - pix;
float dist2 = sqr(diff);
//background? - Tb - usually larger than Tg
if (totalWeight < c_TB && dist2 < c_Tb * var)
background = true;
//check fit
if (dist2 < c_Tg * var)
{
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT / weight;
//update mean
gmm_mean(mode * frame.rows + y, x) = mean - k * diff;
//update variance
float varnew = var + k * (dist2 - var);
//limit the variance
varnew = ::fmaxf(varnew, c_varMin);
varnew = ::fminf(varnew, c_varMax);
gmm_variance(mode * frame.rows + y, x) = varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int i = mode; i > 0; --i)
{
//check one up
if (weight < gmm_weight((i - 1) * frame.rows + y, x))
break;
//swap one up
swap(gmm_weight, x, y, i - 1, frame.rows);
swap(gmm_variance, x, y, i - 1, frame.rows);
swap(gmm_mean, x, y, i - 1, frame.rows);
}
//belongs to the mode - bFitsPDF becomes 1
}
} // !fitsPDF
//check prune
if (weight < -prune)
{
weight = 0.0;
nmodes--;
}
gmm_weight(mode * frame.rows + y, x) = weight; //update weight by the calculated value
totalWeight += weight;
}
//renormalize weights
totalWeight = 1.f / totalWeight;
for (int mode = 0; mode < nmodes; ++mode)
gmm_weight(mode * frame.rows + y, x) *= totalWeight;
nmodes = nNewModes;
//make new mode if needed and exit
if (!fitsPDF)
{
// replace the weakest or add a new one
int mode = nmodes == c_nmixtures ? c_nmixtures - 1 : nmodes++;
if (nmodes == 1)
gmm_weight(mode * frame.rows + y, x) = 1.f;
else
{
gmm_weight(mode * frame.rows + y, x) = alphaT;
// renormalize all other weights
for (int i = 0; i < nmodes - 1; ++i)
gmm_weight(i * frame.rows + y, x) *= alpha1;
}
// init
gmm_mean(mode * frame.rows + y, x) = pix;
gmm_variance(mode * frame.rows + y, x) = c_varInit;
//sort
//find the new place for it
for (int i = nmodes - 1; i > 0; --i)
{
// check one up
if (alphaT < gmm_weight((i - 1) * frame.rows + y, x))
break;
//swap one up
swap(gmm_weight, x, y, i - 1, frame.rows);
swap(gmm_variance, x, y, i - 1, frame.rows);
swap(gmm_mean, x, y, i - 1, frame.rows);
}
}
//set the number of modes
modesUsed(y, x) = nmodes;
bool isShadow = false;
if (detectShadows && !background)
{
float tWeight = 0.0f;
// check all the components marked as background:
for (int mode = 0; mode < nmodes; ++mode)
{
WorkT mean = gmm_mean(mode * frame.rows + y, x);
WorkT pix_mean = pix * mean;
float numerator = sum(pix_mean);
float denominator = sqr(mean);
// no division by zero allowed
if (denominator == 0)
break;
// if tau < a < 1 then also check the color distortion
if (numerator <= denominator && numerator >= c_tau * denominator)
{
float a = numerator / denominator;
WorkT dD = a * mean - pix;
if (sqr(dD) < c_Tb * gmm_variance(mode * frame.rows + y, x) * a * a)
{
isShadow = true;
break;
}
};
tWeight += gmm_weight(mode * frame.rows + y, x);
if (tWeight > c_TB)
break;
}
}
fgmask(y, x) = background ? 0 : isShadow ? c_shadowVal : 255;
}
template <typename SrcT, typename WorkT>
void mog2_caller(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
float alphaT, float prune, bool detectShadows, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
const float alpha1 = 1.0f - alphaT;
if (detectShadows)
{
cudaSafeCall( cudaFuncSetCacheConfig(mog2<true, SrcT, WorkT>, cudaFuncCachePreferL1) );
mog2<true, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, modesUsed,
weight, variance, (PtrStepSz<WorkT>) mean,
alphaT, alpha1, prune);
}
else
{
cudaSafeCall( cudaFuncSetCacheConfig(mog2<false, SrcT, WorkT>, cudaFuncCachePreferL1) );
mog2<false, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, modesUsed,
weight, variance, (PtrStepSz<WorkT>) mean,
alphaT, alpha1, prune);
}
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
float alphaT, float prune, bool detectShadows, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, cudaStream_t stream);
static const func_t funcs[] =
{
0, mog2_caller<uchar, float>, 0, mog2_caller<uchar3, float3>, mog2_caller<uchar4, float4>
};
funcs[cn](frame, fgmask, modesUsed, weight, variance, mean, alphaT, prune, detectShadows, stream);
}
template <typename WorkT, typename OutT>
__global__ void getBackgroundImage2(const PtrStepSzb modesUsed, const PtrStepf gmm_weight, const PtrStep<WorkT> gmm_mean, PtrStep<OutT> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= modesUsed.cols || y >= modesUsed.rows)
return;
int nmodes = modesUsed(y, x);
WorkT meanVal = VecTraits<WorkT>::all(0.0f);
float totalWeight = 0.0f;
for (int mode = 0; mode < nmodes; ++mode)
{
float weight = gmm_weight(mode * modesUsed.rows + y, x);
WorkT mean = gmm_mean(mode * modesUsed.rows + y, x);
meanVal = meanVal + weight * mean;
totalWeight += weight;
if(totalWeight > c_TB)
break;
}
meanVal = meanVal * (1.f / totalWeight);
dst(y, x) = saturate_cast<OutT>(meanVal);
}
template <typename WorkT, typename OutT>
void getBackgroundImage2_caller(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(modesUsed.cols, block.x), divUp(modesUsed.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(getBackgroundImage2<WorkT, OutT>, cudaFuncCachePreferL1) );
getBackgroundImage2<WorkT, OutT><<<grid, block, 0, stream>>>(modesUsed, weight, (PtrStepSz<WorkT>) mean, (PtrStepSz<OutT>) dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0, getBackgroundImage2_caller<float, uchar>, 0, getBackgroundImage2_caller<float3, uchar3>, getBackgroundImage2_caller<float4, uchar4>
};
funcs[cn](modesUsed, weight, mean, dst, stream);
}
}
}}}
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/limits.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace mog2
{
///////////////////////////////////////////////////////////////
// Utility
__device__ __forceinline__ float cvt(uchar val)
{
return val;
}
__device__ __forceinline__ float3 cvt(const uchar3& val)
{
return make_float3(val.x, val.y, val.z);
}
__device__ __forceinline__ float4 cvt(const uchar4& val)
{
return make_float4(val.x, val.y, val.z, val.w);
}
__device__ __forceinline__ float sqr(float val)
{
return val * val;
}
__device__ __forceinline__ float sqr(const float3& val)
{
return val.x * val.x + val.y * val.y + val.z * val.z;
}
__device__ __forceinline__ float sqr(const float4& val)
{
return val.x * val.x + val.y * val.y + val.z * val.z;
}
__device__ __forceinline__ float sum(float val)
{
return val;
}
__device__ __forceinline__ float sum(const float3& val)
{
return val.x + val.y + val.z;
}
__device__ __forceinline__ float sum(const float4& val)
{
return val.x + val.y + val.z;
}
template <class Ptr2D>
__device__ __forceinline__ void swap(Ptr2D& ptr, int x, int y, int k, int rows)
{
typename Ptr2D::elem_type val = ptr(k * rows + y, x);
ptr(k * rows + y, x) = ptr((k + 1) * rows + y, x);
ptr((k + 1) * rows + y, x) = val;
}
///////////////////////////////////////////////////////////////
// MOG2
__constant__ int c_nmixtures;
__constant__ float c_Tb;
__constant__ float c_TB;
__constant__ float c_Tg;
__constant__ float c_varInit;
__constant__ float c_varMin;
__constant__ float c_varMax;
__constant__ float c_tau;
__constant__ unsigned char c_shadowVal;
void loadConstants(int nmixtures, float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal)
{
varMin = ::fminf(varMin, varMax);
varMax = ::fmaxf(varMin, varMax);
cudaSafeCall( cudaMemcpyToSymbol(c_nmixtures, &nmixtures, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_Tb, &Tb, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_TB, &TB, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_Tg, &Tg, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_varInit, &varInit, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_varMin, &varMin, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_varMax, &varMax, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_tau, &tau, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(c_shadowVal, &shadowVal, sizeof(unsigned char)) );
}
template <bool detectShadows, typename SrcT, typename WorkT>
__global__ void mog2(const PtrStepSz<SrcT> frame, PtrStepb fgmask, PtrStepb modesUsed,
PtrStepf gmm_weight, PtrStepf gmm_variance, PtrStep<WorkT> gmm_mean,
const float alphaT, const float alpha1, const float prune)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
WorkT pix = cvt(frame(y, x));
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false; // true - the pixel classified as background
//internal:
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
int nmodes = modesUsed(y, x);
int nNewModes = nmodes; //current number of modes in GMM
float totalWeight = 0.0f;
//go through all modes
for (int mode = 0; mode < nmodes; ++mode)
{
//need only weight if fit is found
float weight = alpha1 * gmm_weight(mode * frame.rows + y, x) + prune;
//fit not found yet
if (!fitsPDF)
{
//check if it belongs to some of the remaining modes
float var = gmm_variance(mode * frame.rows + y, x);
WorkT mean = gmm_mean(mode * frame.rows + y, x);
//calculate difference and distance
WorkT diff = mean - pix;
float dist2 = sqr(diff);
//background? - Tb - usually larger than Tg
if (totalWeight < c_TB && dist2 < c_Tb * var)
background = true;
//check fit
if (dist2 < c_Tg * var)
{
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT / weight;
//update mean
gmm_mean(mode * frame.rows + y, x) = mean - k * diff;
//update variance
float varnew = var + k * (dist2 - var);
//limit the variance
varnew = ::fmaxf(varnew, c_varMin);
varnew = ::fminf(varnew, c_varMax);
gmm_variance(mode * frame.rows + y, x) = varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int i = mode; i > 0; --i)
{
//check one up
if (weight < gmm_weight((i - 1) * frame.rows + y, x))
break;
//swap one up
swap(gmm_weight, x, y, i - 1, frame.rows);
swap(gmm_variance, x, y, i - 1, frame.rows);
swap(gmm_mean, x, y, i - 1, frame.rows);
}
//belongs to the mode - bFitsPDF becomes 1
}
} // !fitsPDF
//check prune
if (weight < -prune)
{
weight = 0.0;
nmodes--;
}
gmm_weight(mode * frame.rows + y, x) = weight; //update weight by the calculated value
totalWeight += weight;
}
//renormalize weights
totalWeight = 1.f / totalWeight;
for (int mode = 0; mode < nmodes; ++mode)
gmm_weight(mode * frame.rows + y, x) *= totalWeight;
nmodes = nNewModes;
//make new mode if needed and exit
if (!fitsPDF)
{
// replace the weakest or add a new one
int mode = nmodes == c_nmixtures ? c_nmixtures - 1 : nmodes++;
if (nmodes == 1)
gmm_weight(mode * frame.rows + y, x) = 1.f;
else
{
gmm_weight(mode * frame.rows + y, x) = alphaT;
// renormalize all other weights
for (int i = 0; i < nmodes - 1; ++i)
gmm_weight(i * frame.rows + y, x) *= alpha1;
}
// init
gmm_mean(mode * frame.rows + y, x) = pix;
gmm_variance(mode * frame.rows + y, x) = c_varInit;
//sort
//find the new place for it
for (int i = nmodes - 1; i > 0; --i)
{
// check one up
if (alphaT < gmm_weight((i - 1) * frame.rows + y, x))
break;
//swap one up
swap(gmm_weight, x, y, i - 1, frame.rows);
swap(gmm_variance, x, y, i - 1, frame.rows);
swap(gmm_mean, x, y, i - 1, frame.rows);
}
}
//set the number of modes
modesUsed(y, x) = nmodes;
bool isShadow = false;
if (detectShadows && !background)
{
float tWeight = 0.0f;
// check all the components marked as background:
for (int mode = 0; mode < nmodes; ++mode)
{
WorkT mean = gmm_mean(mode * frame.rows + y, x);
WorkT pix_mean = pix * mean;
float numerator = sum(pix_mean);
float denominator = sqr(mean);
// no division by zero allowed
if (denominator == 0)
break;
// if tau < a < 1 then also check the color distortion
if (numerator <= denominator && numerator >= c_tau * denominator)
{
float a = numerator / denominator;
WorkT dD = a * mean - pix;
if (sqr(dD) < c_Tb * gmm_variance(mode * frame.rows + y, x) * a * a)
{
isShadow = true;
break;
}
};
tWeight += gmm_weight(mode * frame.rows + y, x);
if (tWeight > c_TB)
break;
}
}
fgmask(y, x) = background ? 0 : isShadow ? c_shadowVal : 255;
}
template <typename SrcT, typename WorkT>
void mog2_caller(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
float alphaT, float prune, bool detectShadows, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
const float alpha1 = 1.0f - alphaT;
if (detectShadows)
{
cudaSafeCall( cudaFuncSetCacheConfig(mog2<true, SrcT, WorkT>, cudaFuncCachePreferL1) );
mog2<true, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, modesUsed,
weight, variance, (PtrStepSz<WorkT>) mean,
alphaT, alpha1, prune);
}
else
{
cudaSafeCall( cudaFuncSetCacheConfig(mog2<false, SrcT, WorkT>, cudaFuncCachePreferL1) );
mog2<false, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, modesUsed,
weight, variance, (PtrStepSz<WorkT>) mean,
alphaT, alpha1, prune);
}
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
float alphaT, float prune, bool detectShadows, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, cudaStream_t stream);
static const func_t funcs[] =
{
0, mog2_caller<uchar, float>, 0, mog2_caller<uchar3, float3>, mog2_caller<uchar4, float4>
};
funcs[cn](frame, fgmask, modesUsed, weight, variance, mean, alphaT, prune, detectShadows, stream);
}
template <typename WorkT, typename OutT>
__global__ void getBackgroundImage2(const PtrStepSzb modesUsed, const PtrStepf gmm_weight, const PtrStep<WorkT> gmm_mean, PtrStep<OutT> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= modesUsed.cols || y >= modesUsed.rows)
return;
int nmodes = modesUsed(y, x);
WorkT meanVal = VecTraits<WorkT>::all(0.0f);
float totalWeight = 0.0f;
for (int mode = 0; mode < nmodes; ++mode)
{
float weight = gmm_weight(mode * modesUsed.rows + y, x);
WorkT mean = gmm_mean(mode * modesUsed.rows + y, x);
meanVal = meanVal + weight * mean;
totalWeight += weight;
if(totalWeight > c_TB)
break;
}
meanVal = meanVal * (1.f / totalWeight);
dst(y, x) = saturate_cast<OutT>(meanVal);
}
template <typename WorkT, typename OutT>
void getBackgroundImage2_caller(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(modesUsed.cols, block.x), divUp(modesUsed.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(getBackgroundImage2<WorkT, OutT>, cudaFuncCachePreferL1) );
getBackgroundImage2<WorkT, OutT><<<grid, block, 0, stream>>>(modesUsed, weight, (PtrStepSz<WorkT>) mean, (PtrStepSz<OutT>) dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0, getBackgroundImage2_caller<float, uchar>, 0, getBackgroundImage2_caller<float3, uchar3>, getBackgroundImage2_caller<float4, uchar4>
};
funcs[cn](modesUsed, weight, mean, dst, stream);
}
}
}}}
#endif /* CUDA_DISABLER */
此差异已折叠。
......@@ -42,17 +42,17 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
cv::gpu::GMG_GPU::GMG_GPU() { throw_no_cuda(); }
void cv::gpu::GMG_GPU::initialize(cv::Size, float, float) { throw_no_cuda(); }
void cv::gpu::GMG_GPU::operator ()(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, float, cv::gpu::Stream&) { throw_no_cuda(); }
void cv::gpu::GMG_GPU::release() {}
Ptr<gpu::BackgroundSubtractorGMG> cv::gpu::createBackgroundSubtractorGMG(int, double) { throw_no_cuda(); return Ptr<gpu::BackgroundSubtractorGMG>(); }
#else
namespace cv { namespace gpu { namespace cudev {
namespace bgfg_gmg
namespace gmg
{
void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior,
float decisionThreshold, int maxFeatures, int numInitializationFrames);
......@@ -63,103 +63,215 @@ namespace cv { namespace gpu { namespace cudev {
}
}}}
cv::gpu::GMG_GPU::GMG_GPU()
namespace
{
maxFeatures = 64;
learningRate = 0.025f;
numInitializationFrames = 120;
quantizationLevels = 16;
backgroundPrior = 0.8f;
decisionThreshold = 0.8f;
smoothingRadius = 7;
updateBackgroundModel = true;
}
class GMGImpl : public gpu::BackgroundSubtractorGMG
{
public:
GMGImpl(int initializationFrames, double decisionThreshold);
void cv::gpu::GMG_GPU::initialize(cv::Size frameSize, float min, float max)
{
using namespace cv::gpu::cudev::bgfg_gmg;
void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream);
CV_Assert(min < max);
CV_Assert(maxFeatures > 0);
CV_Assert(learningRate >= 0.0f && learningRate <= 1.0f);
CV_Assert(numInitializationFrames >= 1);
CV_Assert(quantizationLevels >= 1 && quantizationLevels <= 255);
CV_Assert(backgroundPrior >= 0.0f && backgroundPrior <= 1.0f);
void getBackgroundImage(OutputArray backgroundImage) const;
minVal_ = min;
maxVal_ = max;
int getMaxFeatures() const { return maxFeatures_; }
void setMaxFeatures(int maxFeatures) { maxFeatures_ = maxFeatures; }
frameSize_ = frameSize;
double getDefaultLearningRate() const { return learningRate_; }
void setDefaultLearningRate(double lr) { learningRate_ = (float) lr; }
frameNum_ = 0;
int getNumFrames() const { return numInitializationFrames_; }
void setNumFrames(int nframes) { numInitializationFrames_ = nframes; }
nfeatures_.create(frameSize_, CV_32SC1);
colors_.create(maxFeatures * frameSize_.height, frameSize_.width, CV_32SC1);
weights_.create(maxFeatures * frameSize_.height, frameSize_.width, CV_32FC1);
int getQuantizationLevels() const { return quantizationLevels_; }
void setQuantizationLevels(int nlevels) { quantizationLevels_ = nlevels; }
nfeatures_.setTo(cv::Scalar::all(0));
double getBackgroundPrior() const { return backgroundPrior_; }
void setBackgroundPrior(double bgprior) { backgroundPrior_ = (float) bgprior; }
if (smoothingRadius > 0)
boxFilter_ = cv::gpu::createBoxFilter(CV_8UC1, -1, cv::Size(smoothingRadius, smoothingRadius));
int getSmoothingRadius() const { return smoothingRadius_; }
void setSmoothingRadius(int radius) { smoothingRadius_ = radius; }
loadConstants(frameSize_.width, frameSize_.height, minVal_, maxVal_, quantizationLevels, backgroundPrior, decisionThreshold, maxFeatures, numInitializationFrames);
}
double getDecisionThreshold() const { return decisionThreshold_; }
void setDecisionThreshold(double thresh) { decisionThreshold_ = (float) thresh; }
void cv::gpu::GMG_GPU::operator ()(const cv::gpu::GpuMat& frame, cv::gpu::GpuMat& fgmask, float newLearningRate, cv::gpu::Stream& stream)
{
using namespace cv::gpu::cudev::bgfg_gmg;
bool getUpdateBackgroundModel() const { return updateBackgroundModel_; }
void setUpdateBackgroundModel(bool update) { updateBackgroundModel_ = update; }
typedef void (*func_t)(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures,
int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
static const func_t funcs[6][4] =
{
{update_gpu<uchar>, 0, update_gpu<uchar3>, update_gpu<uchar4>},
{0,0,0,0},
{update_gpu<ushort>, 0, update_gpu<ushort3>, update_gpu<ushort4>},
{0,0,0,0},
{0,0,0,0},
{update_gpu<float>, 0, update_gpu<float3>, update_gpu<float4>}
double getMinVal() const { return minVal_; }
void setMinVal(double val) { minVal_ = (float) val; }
double getMaxVal() const { return maxVal_; }
void setMaxVal(double val) { maxVal_ = (float) val; }
private:
void initialize(Size frameSize, float min, float max);
//! Total number of distinct colors to maintain in histogram.
int maxFeatures_;
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
float learningRate_;
//! Number of frames of video to use to initialize histograms.
int numInitializationFrames_;
//! Number of discrete levels in each channel to be used in histograms.
int quantizationLevels_;
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
float backgroundPrior_;
//! Smoothing radius, in pixels, for cleaning up FG image.
int smoothingRadius_;
//! Value above which pixel is determined to be FG.
float decisionThreshold_;
//! Perform background model update.
bool updateBackgroundModel_;
float minVal_, maxVal_;
Size frameSize_;
int frameNum_;
GpuMat nfeatures_;
GpuMat colors_;
GpuMat weights_;
#if defined(HAVE_OPENCV_GPUFILTERS) && defined(HAVE_OPENCV_GPUARITHM)
Ptr<gpu::Filter> boxFilter_;
GpuMat buf_;
#endif
};
CV_Assert(frame.depth() == CV_8U || frame.depth() == CV_16U || frame.depth() == CV_32F);
CV_Assert(frame.channels() == 1 || frame.channels() == 3 || frame.channels() == 4);
GMGImpl::GMGImpl(int initializationFrames, double decisionThreshold)
{
maxFeatures_ = 64;
learningRate_ = 0.025f;
numInitializationFrames_ = initializationFrames;
quantizationLevels_ = 16;
backgroundPrior_ = 0.8f;
decisionThreshold_ = (float) decisionThreshold;
smoothingRadius_ = 7;
updateBackgroundModel_ = true;
minVal_ = maxVal_ = 0;
}
if (newLearningRate != -1.0f)
void GMGImpl::apply(InputArray image, OutputArray fgmask, double learningRate)
{
CV_Assert(newLearningRate >= 0.0f && newLearningRate <= 1.0f);
learningRate = newLearningRate;
apply(image, fgmask, learningRate, Stream::Null());
}
if (frame.size() != frameSize_)
initialize(frame.size(), 0.0f, frame.depth() == CV_8U ? 255.0f : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0f);
void GMGImpl::apply(InputArray _frame, OutputArray _fgmask, double newLearningRate, Stream& stream)
{
using namespace cv::gpu::cudev::gmg;
fgmask.create(frameSize_, CV_8UC1);
fgmask.setTo(cv::Scalar::all(0), stream);
typedef void (*func_t)(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures,
int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
static const func_t funcs[6][4] =
{
{update_gpu<uchar>, 0, update_gpu<uchar3>, update_gpu<uchar4>},
{0,0,0,0},
{update_gpu<ushort>, 0, update_gpu<ushort3>, update_gpu<ushort4>},
{0,0,0,0},
{0,0,0,0},
{update_gpu<float>, 0, update_gpu<float3>, update_gpu<float4>}
};
funcs[frame.depth()][frame.channels() - 1](frame, fgmask, colors_, weights_, nfeatures_, frameNum_, learningRate, updateBackgroundModel, cv::gpu::StreamAccessor::getStream(stream));
GpuMat frame = _frame.getGpuMat();
// medianBlur
if (smoothingRadius > 0)
CV_Assert( frame.depth() == CV_8U || frame.depth() == CV_16U || frame.depth() == CV_32F );
CV_Assert( frame.channels() == 1 || frame.channels() == 3 || frame.channels() == 4 );
if (newLearningRate != -1.0)
{
CV_Assert( newLearningRate >= 0.0 && newLearningRate <= 1.0 );
learningRate_ = (float) newLearningRate;
}
if (frame.size() != frameSize_)
{
double minVal = minVal_;
double maxVal = maxVal_;
if (minVal_ == 0 && maxVal_ == 0)
{
minVal = 0;
maxVal = frame.depth() == CV_8U ? 255.0 : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0;
}
initialize(frame.size(), (float) minVal, (float) maxVal);
}
_fgmask.create(frameSize_, CV_8UC1);
GpuMat fgmask = _fgmask.getGpuMat();
fgmask.setTo(Scalar::all(0), stream);
funcs[frame.depth()][frame.channels() - 1](frame, fgmask, colors_, weights_, nfeatures_, frameNum_,
learningRate_, updateBackgroundModel_, StreamAccessor::getStream(stream));
#if defined(HAVE_OPENCV_GPUFILTERS) && defined(HAVE_OPENCV_GPUARITHM)
// medianBlur
if (smoothingRadius_ > 0)
{
boxFilter_->apply(fgmask, buf_, stream);
const int minCount = (smoothingRadius_ * smoothingRadius_ + 1) / 2;
const double thresh = 255.0 * minCount / (smoothingRadius_ * smoothingRadius_);
gpu::threshold(buf_, fgmask, thresh, 255.0, THRESH_BINARY, stream);
}
#endif
// keep track of how many frames we have processed
++frameNum_;
}
void GMGImpl::getBackgroundImage(OutputArray backgroundImage) const
{
boxFilter_->apply(fgmask, buf_, stream);
int minCount = (smoothingRadius * smoothingRadius + 1) / 2;
double thresh = 255.0 * minCount / (smoothingRadius * smoothingRadius);
cv::gpu::threshold(buf_, fgmask, thresh, 255.0, cv::THRESH_BINARY, stream);
(void) backgroundImage;
CV_Error(Error::StsNotImplemented, "Not implemented");
}
// keep track of how many frames we have processed
++frameNum_;
void GMGImpl::initialize(Size frameSize, float min, float max)
{
using namespace cv::gpu::cudev::gmg;
CV_Assert( maxFeatures_ > 0 );
CV_Assert( learningRate_ >= 0.0f && learningRate_ <= 1.0f);
CV_Assert( numInitializationFrames_ >= 1);
CV_Assert( quantizationLevels_ >= 1 && quantizationLevels_ <= 255);
CV_Assert( backgroundPrior_ >= 0.0f && backgroundPrior_ <= 1.0f);
minVal_ = min;
maxVal_ = max;
CV_Assert( minVal_ < maxVal_ );
frameSize_ = frameSize;
frameNum_ = 0;
nfeatures_.create(frameSize_, CV_32SC1);
colors_.create(maxFeatures_ * frameSize_.height, frameSize_.width, CV_32SC1);
weights_.create(maxFeatures_ * frameSize_.height, frameSize_.width, CV_32FC1);
nfeatures_.setTo(Scalar::all(0));
#if defined(HAVE_OPENCV_GPUFILTERS) && defined(HAVE_OPENCV_GPUARITHM)
if (smoothingRadius_ > 0)
boxFilter_ = gpu::createBoxFilter(CV_8UC1, -1, Size(smoothingRadius_, smoothingRadius_));
#endif
loadConstants(frameSize_.width, frameSize_.height, minVal_, maxVal_,
quantizationLevels_, backgroundPrior_, decisionThreshold_, maxFeatures_, numInitializationFrames_);
}
}
void cv::gpu::GMG_GPU::release()
Ptr<gpu::BackgroundSubtractorGMG> cv::gpu::createBackgroundSubtractorGMG(int initializationFrames, double decisionThreshold)
{
frameSize_ = Size();
nfeatures_.release();
colors_.release();
weights_.release();
boxFilter_.release();
buf_.release();
return new GMGImpl(initializationFrames, decisionThreshold);
}
#endif
......@@ -42,19 +42,12 @@
#include "precomp.hpp"
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
using namespace cv;
using namespace cv::gpu;
cv::gpu::MOG_GPU::MOG_GPU(int) { throw_no_cuda(); }
void cv::gpu::MOG_GPU::initialize(cv::Size, int) { throw_no_cuda(); }
void cv::gpu::MOG_GPU::operator()(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, float, Stream&) { throw_no_cuda(); }
void cv::gpu::MOG_GPU::getBackgroundImage(GpuMat&, Stream&) const { throw_no_cuda(); }
void cv::gpu::MOG_GPU::release() {}
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
cv::gpu::MOG2_GPU::MOG2_GPU(int) { throw_no_cuda(); }
void cv::gpu::MOG2_GPU::initialize(cv::Size, int) { throw_no_cuda(); }
void cv::gpu::MOG2_GPU::operator()(const GpuMat&, GpuMat&, float, Stream&) { throw_no_cuda(); }
void cv::gpu::MOG2_GPU::getBackgroundImage(GpuMat&, Stream&) const { throw_no_cuda(); }
void cv::gpu::MOG2_GPU::release() {}
Ptr<gpu::BackgroundSubtractorMOG> cv::gpu::createBackgroundSubtractorMOG(int, int, double, double) { throw_no_cuda(); return Ptr<gpu::BackgroundSubtractorMOG>(); }
#else
......@@ -66,14 +59,10 @@ namespace cv { namespace gpu { namespace cudev
int nmixtures, float varThreshold, float learningRate, float backgroundRatio, float noiseSigma,
cudaStream_t stream);
void getBackgroundImage_gpu(int cn, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, int nmixtures, float backgroundRatio, cudaStream_t stream);
void loadConstants(int nmixtures, float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal);
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, cudaStream_t stream);
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream);
}
}}}
namespace mog
namespace
{
const int defaultNMixtures = 5;
const int defaultHistory = 200;
......@@ -81,199 +70,140 @@ namespace mog
const float defaultVarThreshold = 2.5f * 2.5f;
const float defaultNoiseSigma = 30.0f * 0.5f;
const float defaultInitialWeight = 0.05f;
}
cv::gpu::MOG_GPU::MOG_GPU(int nmixtures) :
frameSize_(0, 0), frameType_(0), nframes_(0)
{
nmixtures_ = std::min(nmixtures > 0 ? nmixtures : mog::defaultNMixtures, 8);
history = mog::defaultHistory;
varThreshold = mog::defaultVarThreshold;
backgroundRatio = mog::defaultBackgroundRatio;
noiseSigma = mog::defaultNoiseSigma;
}
void cv::gpu::MOG_GPU::initialize(cv::Size frameSize, int frameType)
{
CV_Assert(frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4);
frameSize_ = frameSize;
frameType_ = frameType;
int ch = CV_MAT_CN(frameType);
int work_ch = ch;
// for each gaussian mixture of each pixel bg model we store
// the mixture sort key (w/sum_of_variances), the mixture weight (w),
// the mean (nchannels values) and
// the diagonal covariance matrix (another nchannels values)
weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
sortKey_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
var_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
class MOGImpl : public gpu::BackgroundSubtractorMOG
{
public:
MOGImpl(int history, int nmixtures, double backgroundRatio, double noiseSigma);
weight_.setTo(cv::Scalar::all(0));
sortKey_.setTo(cv::Scalar::all(0));
mean_.setTo(cv::Scalar::all(0));
var_.setTo(cv::Scalar::all(0));
void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream);
nframes_ = 0;
}
void getBackgroundImage(OutputArray backgroundImage) const;
void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const;
void cv::gpu::MOG_GPU::operator()(const cv::gpu::GpuMat& frame, cv::gpu::GpuMat& fgmask, float learningRate, Stream& stream)
{
using namespace cv::gpu::cudev::mog;
int getHistory() const { return history_; }
void setHistory(int nframes) { history_ = nframes; }
CV_Assert(frame.depth() == CV_8U);
int getNMixtures() const { return nmixtures_; }
void setNMixtures(int nmix) { nmixtures_ = nmix; }
int ch = frame.channels();
int work_ch = ch;
double getBackgroundRatio() const { return backgroundRatio_; }
void setBackgroundRatio(double backgroundRatio) { backgroundRatio_ = (float) backgroundRatio; }
if (nframes_ == 0 || learningRate >= 1.0 || frame.size() != frameSize_ || work_ch != mean_.channels())
initialize(frame.size(), frame.type());
double getNoiseSigma() const { return noiseSigma_; }
void setNoiseSigma(double noiseSigma) { noiseSigma_ = (float) noiseSigma; }
fgmask.create(frameSize_, CV_8UC1);
private:
//! re-initiaization method
void initialize(Size frameSize, int frameType);
++nframes_;
learningRate = learningRate >= 0.0f && nframes_ > 1 ? learningRate : 1.0f / std::min(nframes_, history);
CV_Assert(learningRate >= 0.0f);
int history_;
int nmixtures_;
float backgroundRatio_;
float noiseSigma_;
mog_gpu(frame, ch, fgmask, weight_, sortKey_, mean_, var_, nmixtures_,
varThreshold, learningRate, backgroundRatio, noiseSigma,
StreamAccessor::getStream(stream));
}
float varThreshold_;
void cv::gpu::MOG_GPU::getBackgroundImage(GpuMat& backgroundImage, Stream& stream) const
{
using namespace cv::gpu::cudev::mog;
Size frameSize_;
int frameType_;
int nframes_;
backgroundImage.create(frameSize_, frameType_);
GpuMat weight_;
GpuMat sortKey_;
GpuMat mean_;
GpuMat var_;
};
getBackgroundImage_gpu(backgroundImage.channels(), weight_, mean_, backgroundImage, nmixtures_, backgroundRatio, StreamAccessor::getStream(stream));
}
void cv::gpu::MOG_GPU::release()
{
frameSize_ = Size(0, 0);
frameType_ = 0;
nframes_ = 0;
weight_.release();
sortKey_.release();
mean_.release();
var_.release();
}
/////////////////////////////////////////////////////////////////
// MOG2
MOGImpl::MOGImpl(int history, int nmixtures, double backgroundRatio, double noiseSigma) :
frameSize_(0, 0), frameType_(0), nframes_(0)
{
history_ = history > 0 ? history : defaultHistory;
nmixtures_ = std::min(nmixtures > 0 ? nmixtures : defaultNMixtures, 8);
backgroundRatio_ = backgroundRatio > 0 ? (float) backgroundRatio : defaultBackgroundRatio;
noiseSigma_ = noiseSigma > 0 ? (float) noiseSigma : defaultNoiseSigma;
namespace mog2
{
// default parameters of gaussian background detection algorithm
const int defaultHistory = 500; // Learning rate; alpha = 1/defaultHistory2
const float defaultVarThreshold = 4.0f * 4.0f;
const int defaultNMixtures = 5; // maximal number of Gaussians in mixture
const float defaultBackgroundRatio = 0.9f; // threshold sum of weights for background test
const float defaultVarThresholdGen = 3.0f * 3.0f;
const float defaultVarInit = 15.0f; // initial variance for new components
const float defaultVarMax = 5.0f * defaultVarInit;
const float defaultVarMin = 4.0f;
// additional parameters
const float defaultfCT = 0.05f; // complexity reduction prior constant 0 - no reduction of number of components
const unsigned char defaultnShadowDetection = 127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
}
varThreshold_ = defaultVarThreshold;
}
cv::gpu::MOG2_GPU::MOG2_GPU(int nmixtures) :
frameSize_(0, 0), frameType_(0), nframes_(0)
{
nmixtures_ = nmixtures > 0 ? nmixtures : mog2::defaultNMixtures;
void MOGImpl::apply(InputArray image, OutputArray fgmask, double learningRate)
{
apply(image, fgmask, learningRate, Stream::Null());
}
history = mog2::defaultHistory;
varThreshold = mog2::defaultVarThreshold;
bShadowDetection = true;
void MOGImpl::apply(InputArray _frame, OutputArray _fgmask, double learningRate, Stream& stream)
{
using namespace cv::gpu::cudev::mog;
backgroundRatio = mog2::defaultBackgroundRatio;
fVarInit = mog2::defaultVarInit;
fVarMax = mog2::defaultVarMax;
fVarMin = mog2::defaultVarMin;
GpuMat frame = _frame.getGpuMat();
varThresholdGen = mog2::defaultVarThresholdGen;
fCT = mog2::defaultfCT;
nShadowDetection = mog2::defaultnShadowDetection;
fTau = mog2::defaultfTau;
}
CV_Assert( frame.depth() == CV_8U );
void cv::gpu::MOG2_GPU::initialize(cv::Size frameSize, int frameType)
{
using namespace cv::gpu::cudev::mog;
int ch = frame.channels();
int work_ch = ch;
CV_Assert(frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4);
if (nframes_ == 0 || learningRate >= 1.0 || frame.size() != frameSize_ || work_ch != mean_.channels())
initialize(frame.size(), frame.type());
frameSize_ = frameSize;
frameType_ = frameType;
nframes_ = 0;
_fgmask.create(frameSize_, CV_8UC1);
GpuMat fgmask = _fgmask.getGpuMat();
int ch = CV_MAT_CN(frameType);
int work_ch = ch;
++nframes_;
learningRate = learningRate >= 0 && nframes_ > 1 ? learningRate : 1.0 / std::min(nframes_, history_);
CV_Assert( learningRate >= 0 );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture weight (w),
// the mean (nchannels values) and
// the covariance
weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
variance_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
mog_gpu(frame, ch, fgmask, weight_, sortKey_, mean_, var_, nmixtures_,
varThreshold_, (float) learningRate, backgroundRatio_, noiseSigma_,
StreamAccessor::getStream(stream));
}
//make the array for keeping track of the used modes per pixel - all zeros at start
bgmodelUsedModes_.create(frameSize_, CV_8UC1);
bgmodelUsedModes_.setTo(cv::Scalar::all(0));
void MOGImpl::getBackgroundImage(OutputArray backgroundImage) const
{
getBackgroundImage(backgroundImage, Stream::Null());
}
loadConstants(nmixtures_, varThreshold, backgroundRatio, varThresholdGen, fVarInit, fVarMin, fVarMax, fTau, nShadowDetection);
}
void MOGImpl::getBackgroundImage(OutputArray _backgroundImage, Stream& stream) const
{
using namespace cv::gpu::cudev::mog;
void cv::gpu::MOG2_GPU::operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate, Stream& stream)
{
using namespace cv::gpu::cudev::mog;
_backgroundImage.create(frameSize_, frameType_);
GpuMat backgroundImage = _backgroundImage.getGpuMat();
int ch = frame.channels();
int work_ch = ch;
getBackgroundImage_gpu(backgroundImage.channels(), weight_, mean_, backgroundImage, nmixtures_, backgroundRatio_, StreamAccessor::getStream(stream));
}
if (nframes_ == 0 || learningRate >= 1.0f || frame.size() != frameSize_ || work_ch != mean_.channels())
initialize(frame.size(), frame.type());
void MOGImpl::initialize(Size frameSize, int frameType)
{
CV_Assert( frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4 );
fgmask.create(frameSize_, CV_8UC1);
fgmask.setTo(cv::Scalar::all(0));
frameSize_ = frameSize;
frameType_ = frameType;
++nframes_;
learningRate = learningRate >= 0.0f && nframes_ > 1 ? learningRate : 1.0f / std::min(2 * nframes_, history);
CV_Assert(learningRate >= 0.0f);
int ch = CV_MAT_CN(frameType);
int work_ch = ch;
mog2_gpu(frame, frame.channels(), fgmask, bgmodelUsedModes_, weight_, variance_, mean_, learningRate, -learningRate * fCT, bShadowDetection, StreamAccessor::getStream(stream));
}
// for each gaussian mixture of each pixel bg model we store
// the mixture sort key (w/sum_of_variances), the mixture weight (w),
// the mean (nchannels values) and
// the diagonal covariance matrix (another nchannels values)
void cv::gpu::MOG2_GPU::getBackgroundImage(GpuMat& backgroundImage, Stream& stream) const
{
using namespace cv::gpu::cudev::mog;
weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
sortKey_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
var_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
backgroundImage.create(frameSize_, frameType_);
weight_.setTo(cv::Scalar::all(0));
sortKey_.setTo(cv::Scalar::all(0));
mean_.setTo(cv::Scalar::all(0));
var_.setTo(cv::Scalar::all(0));
getBackgroundImage2_gpu(backgroundImage.channels(), bgmodelUsedModes_, weight_, mean_, backgroundImage, StreamAccessor::getStream(stream));
nframes_ = 0;
}
}
void cv::gpu::MOG2_GPU::release()
Ptr<gpu::BackgroundSubtractorMOG> cv::gpu::createBackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma)
{
frameSize_ = Size(0, 0);
frameType_ = 0;
nframes_ = 0;
weight_.release();
variance_.release();
mean_.release();
bgmodelUsedModes_.release();
return new MOGImpl(history, nmixtures, backgroundRatio, noiseSigma);
}
#endif
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
Ptr<gpu::BackgroundSubtractorMOG2> cv::gpu::createBackgroundSubtractorMOG2(int, double, bool) { throw_no_cuda(); return Ptr<gpu::BackgroundSubtractorMOG2>(); }
#else
namespace cv { namespace gpu { namespace cudev
{
namespace mog2
{
void loadConstants(int nmixtures, float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal);
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, cudaStream_t stream);
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream);
}
}}}
namespace
{
// default parameters of gaussian background detection algorithm
const int defaultHistory = 500; // Learning rate; alpha = 1/defaultHistory2
const float defaultVarThreshold = 4.0f * 4.0f;
const int defaultNMixtures = 5; // maximal number of Gaussians in mixture
const float defaultBackgroundRatio = 0.9f; // threshold sum of weights for background test
const float defaultVarThresholdGen = 3.0f * 3.0f;
const float defaultVarInit = 15.0f; // initial variance for new components
const float defaultVarMax = 5.0f * defaultVarInit;
const float defaultVarMin = 4.0f;
// additional parameters
const float defaultCT = 0.05f; // complexity reduction prior constant 0 - no reduction of number of components
const unsigned char defaultShadowValue = 127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
const float defaultShadowThreshold = 0.5f; // Tau - shadow threshold, see the paper for explanation
class MOG2Impl : public gpu::BackgroundSubtractorMOG2
{
public:
MOG2Impl(int history, double varThreshold, bool detectShadows);
void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream);
void getBackgroundImage(OutputArray backgroundImage) const;
void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const;
int getHistory() const { return history_; }
void setHistory(int history) { history_ = history; }
int getNMixtures() const { return nmixtures_; }
void setNMixtures(int nmixtures) { nmixtures_ = nmixtures; }
double getBackgroundRatio() const { return backgroundRatio_; }
void setBackgroundRatio(double ratio) { backgroundRatio_ = (float) ratio; }
double getVarThreshold() const { return varThreshold_; }
void setVarThreshold(double varThreshold) { varThreshold_ = (float) varThreshold; }
double getVarThresholdGen() const { return varThresholdGen_; }
void setVarThresholdGen(double varThresholdGen) { varThresholdGen_ = (float) varThresholdGen; }
double getVarInit() const { return varInit_; }
void setVarInit(double varInit) { varInit_ = (float) varInit; }
double getVarMin() const { return varMin_; }
void setVarMin(double varMin) { varMin_ = (float) varMin; }
double getVarMax() const { return varMax_; }
void setVarMax(double varMax) { varMax_ = (float) varMax; }
double getComplexityReductionThreshold() const { return ct_; }
void setComplexityReductionThreshold(double ct) { ct_ = (float) ct; }
bool getDetectShadows() const { return detectShadows_; }
void setDetectShadows(bool detectShadows) { detectShadows_ = detectShadows; }
int getShadowValue() const { return shadowValue_; }
void setShadowValue(int value) { shadowValue_ = (uchar) value; }
double getShadowThreshold() const { return shadowThreshold_; }
void setShadowThreshold(double threshold) { shadowThreshold_ = (float) threshold; }
private:
void initialize(Size frameSize, int frameType);
int history_;
int nmixtures_;
float backgroundRatio_;
float varThreshold_;
float varThresholdGen_;
float varInit_;
float varMin_;
float varMax_;
float ct_;
bool detectShadows_;
uchar shadowValue_;
float shadowThreshold_;
Size frameSize_;
int frameType_;
int nframes_;
GpuMat weight_;
GpuMat variance_;
GpuMat mean_;
//keep track of number of modes per pixel
GpuMat bgmodelUsedModes_;
};
MOG2Impl::MOG2Impl(int history, double varThreshold, bool detectShadows) :
frameSize_(0, 0), frameType_(0), nframes_(0)
{
history_ = history > 0 ? history : defaultHistory;
varThreshold_ = varThreshold > 0 ? (float) varThreshold : defaultVarThreshold;
detectShadows_ = detectShadows;
nmixtures_ = defaultNMixtures;
backgroundRatio_ = defaultBackgroundRatio;
varInit_ = defaultVarInit;
varMax_ = defaultVarMax;
varMin_ = defaultVarMin;
varThresholdGen_ = defaultVarThresholdGen;
ct_ = defaultCT;
shadowValue_ = defaultShadowValue;
shadowThreshold_ = defaultShadowThreshold;
}
void MOG2Impl::apply(InputArray image, OutputArray fgmask, double learningRate)
{
apply(image, fgmask, learningRate, Stream::Null());
}
void MOG2Impl::apply(InputArray _frame, OutputArray _fgmask, double learningRate, Stream& stream)
{
using namespace cv::gpu::cudev::mog2;
GpuMat frame = _frame.getGpuMat();
int ch = frame.channels();
int work_ch = ch;
if (nframes_ == 0 || learningRate >= 1.0 || frame.size() != frameSize_ || work_ch != mean_.channels())
initialize(frame.size(), frame.type());
_fgmask.create(frameSize_, CV_8UC1);
GpuMat fgmask = _fgmask.getGpuMat();
fgmask.setTo(Scalar::all(0), stream);
++nframes_;
learningRate = learningRate >= 0 && nframes_ > 1 ? learningRate : 1.0 / std::min(2 * nframes_, history_);
CV_Assert( learningRate >= 0 );
mog2_gpu(frame, frame.channels(), fgmask, bgmodelUsedModes_, weight_, variance_, mean_,
(float) learningRate, static_cast<float>(-learningRate * ct_), detectShadows_, StreamAccessor::getStream(stream));
}
void MOG2Impl::getBackgroundImage(OutputArray backgroundImage) const
{
getBackgroundImage(backgroundImage, Stream::Null());
}
void MOG2Impl::getBackgroundImage(OutputArray _backgroundImage, Stream& stream) const
{
using namespace cv::gpu::cudev::mog2;
_backgroundImage.create(frameSize_, frameType_);
GpuMat backgroundImage = _backgroundImage.getGpuMat();
getBackgroundImage2_gpu(backgroundImage.channels(), bgmodelUsedModes_, weight_, mean_, backgroundImage, StreamAccessor::getStream(stream));
}
void MOG2Impl::initialize(cv::Size frameSize, int frameType)
{
using namespace cv::gpu::cudev::mog2;
CV_Assert( frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4 );
frameSize_ = frameSize;
frameType_ = frameType;
nframes_ = 0;
int ch = CV_MAT_CN(frameType);
int work_ch = ch;
// for each gaussian mixture of each pixel bg model we store ...
// the mixture weight (w),
// the mean (nchannels values) and
// the covariance
weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
variance_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
//make the array for keeping track of the used modes per pixel - all zeros at start
bgmodelUsedModes_.create(frameSize_, CV_8UC1);
bgmodelUsedModes_.setTo(Scalar::all(0));
loadConstants(nmixtures_, varThreshold_, backgroundRatio_, varThresholdGen_, varInit_, varMin_, varMax_, shadowThreshold_, shadowValue_);
}
}
Ptr<gpu::BackgroundSubtractorMOG2> cv::gpu::createBackgroundSubtractorMOG2(int history, double varThreshold, bool detectShadows)
{
return new MOG2Impl(history, varThreshold, detectShadows);
}
#endif
......@@ -46,10 +46,21 @@
#include <limits>
#include "opencv2/gpubgsegm.hpp"
#include "opencv2/gpuarithm.hpp"
#include "opencv2/gpufilters.hpp"
#include "opencv2/gpuimgproc.hpp"
#include "opencv2/core/private.gpu.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPUARITHM
# include "opencv2/gpuarithm.hpp"
#endif
#ifdef HAVE_OPENCV_GPUFILTERS
# include "opencv2/gpufilters.hpp"
#endif
#ifdef HAVE_OPENCV_GPUIMGPROC
# include "opencv2/gpuimgproc.hpp"
#endif
#endif /* __OPENCV_PRECOMP_H__ */
......@@ -41,7 +41,10 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/legacy.hpp"
#ifdef HAVE_OPENCV_LEGACY
# include "opencv2/legacy.hpp"
#endif
#ifdef HAVE_CUDA
......@@ -62,7 +65,7 @@ using namespace cvtest;
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined(HAVE_OPENCV_LEGACY)
namespace cv
{
......@@ -72,11 +75,10 @@ namespace cv
}
}
PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string, Channels)
PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string)
{
cv::gpu::DeviceInfo devInfo;
std::string inputFile;
int out_cn;
virtual void SetUp()
{
......@@ -84,8 +86,6 @@ PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string, Channels)
cv::gpu::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
out_cn = GET_PARAM(2);
}
};
......@@ -102,15 +102,10 @@ GPU_TEST_P(FGDStatModel, Update)
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
cv::gpu::GpuMat d_frame(frame);
cv::gpu::FGDStatModel d_model(out_cn);
d_model.create(d_frame);
cv::Mat h_background;
cv::Mat h_foreground;
cv::Mat h_background3;
cv::Mat backgroundDiff;
cv::Mat foregroundDiff;
cv::Ptr<cv::gpu::BackgroundSubtractorFGD> d_fgd = cv::gpu::createBackgroundSubtractorFGD();
cv::gpu::GpuMat d_foreground, d_background;
std::vector< std::vector<cv::Point> > foreground_regions;
d_fgd->apply(d_frame, d_foreground);
for (int i = 0; i < 5; ++i)
{
......@@ -121,32 +116,23 @@ GPU_TEST_P(FGDStatModel, Update)
int gold_count = cvUpdateBGStatModel(&ipl_frame, model);
d_frame.upload(frame);
int count = d_model.update(d_frame);
ASSERT_EQ(gold_count, count);
d_fgd->apply(d_frame, d_foreground);
d_fgd->getBackgroundImage(d_background);
d_fgd->getForegroundRegions(foreground_regions);
int count = (int) foreground_regions.size();
cv::Mat gold_background = cv::cvarrToMat(model->background);
cv::Mat gold_foreground = cv::cvarrToMat(model->foreground);
if (out_cn == 3)
d_model.background.download(h_background3);
else
{
d_model.background.download(h_background);
cv::cvtColor(h_background, h_background3, cv::COLOR_BGRA2BGR);
}
d_model.foreground.download(h_foreground);
ASSERT_MAT_NEAR(gold_background, h_background3, 1.0);
ASSERT_MAT_NEAR(gold_foreground, h_foreground, 0.0);
ASSERT_MAT_NEAR(gold_background, d_background, 1.0);
ASSERT_MAT_NEAR(gold_foreground, d_foreground, 0.0);
ASSERT_EQ(gold_count, count);
}
}
INSTANTIATE_TEST_CASE_P(GPU_BgSegm, FGDStatModel, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(Channels(3), Channels(4))));
testing::Values(std::string("768x576.avi"))));
#endif
......@@ -193,7 +179,7 @@ GPU_TEST_P(MOG, Update)
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::gpu::MOG_GPU mog;
cv::Ptr<cv::BackgroundSubtractorMOG> mog = cv::gpu::createBackgroundSubtractorMOG();
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::Ptr<cv::BackgroundSubtractorMOG> mog_gold = cv::createBackgroundSubtractorMOG();
......@@ -211,7 +197,7 @@ GPU_TEST_P(MOG, Update)
cv::swap(temp, frame);
}
mog(loadMat(frame, useRoi), foreground, (float)learningRate);
mog->apply(loadMat(frame, useRoi), foreground, learningRate);
mog_gold->apply(frame, foreground_gold, learningRate);
......@@ -267,8 +253,8 @@ GPU_TEST_P(MOG2, Update)
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::gpu::MOG2_GPU mog2;
mog2.bShadowDetection = detectShadow;
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = cv::gpu::createBackgroundSubtractorMOG2();
mog2->setDetectShadows(detectShadow);
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
......@@ -287,7 +273,7 @@ GPU_TEST_P(MOG2, Update)
cv::swap(temp, frame);
}
mog2(loadMat(frame, useRoi), foreground);
mog2->apply(loadMat(frame, useRoi), foreground);
mog2_gold->apply(frame, foreground_gold);
......@@ -312,8 +298,8 @@ GPU_TEST_P(MOG2, getBackgroundImage)
cv::Mat frame;
cv::gpu::MOG2_GPU mog2;
mog2.bShadowDetection = detectShadow;
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = cv::gpu::createBackgroundSubtractorMOG2();
mog2->setDetectShadows(detectShadow);
cv::gpu::GpuMat foreground;
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
......@@ -325,13 +311,13 @@ GPU_TEST_P(MOG2, getBackgroundImage)
cap >> frame;
ASSERT_FALSE(frame.empty());
mog2(loadMat(frame, useRoi), foreground);
mog2->apply(loadMat(frame, useRoi), foreground);
mog2_gold->apply(frame, foreground_gold);
}
cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi);
mog2.getBackgroundImage(background);
mog2->getBackgroundImage(background);
cv::Mat background_gold;
mog2_gold->getBackgroundImage(background_gold);
......@@ -372,16 +358,15 @@ GPU_TEST_P(GMG, Accuracy)
cv::Mat frame = randomMat(size, type, 0, 100);
cv::gpu::GpuMat d_frame = loadMat(frame, useRoi);
cv::gpu::GMG_GPU gmg;
gmg.numInitializationFrames = 5;
gmg.smoothingRadius = 0;
gmg.initialize(d_frame.size(), 0, 255);
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::gpu::createBackgroundSubtractorGMG();
gmg->setNumFrames(5);
gmg->setSmoothingRadius(0);
cv::gpu::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
for (int i = 0; i < gmg.numInitializationFrames; ++i)
for (int i = 0; i < gmg->getNumFrames(); ++i)
{
gmg(d_frame, d_fgmask);
gmg->apply(d_frame, d_fgmask);
// fgmask should be entirely background during training
ASSERT_MAT_NEAR(zeros, d_fgmask, 0);
......@@ -389,7 +374,7 @@ GPU_TEST_P(GMG, Accuracy)
frame = randomMat(size, type, 160, 255);
d_frame = loadMat(frame, useRoi);
gmg(d_frame, d_fgmask);
gmg->apply(d_frame, d_fgmask);
// now fgmask should be entirely foreground
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
......
......@@ -59,4 +59,6 @@
#include "opencv2/gpubgsegm.hpp"
#include "opencv2/video.hpp"
#include "opencv2/opencv_modules.hpp"
#endif
......@@ -18,10 +18,10 @@ using namespace cv::gpu;
enum Method
{
FGD_STAT,
MOG,
MOG2,
GMG
GMG,
FGD_STAT
};
int main(int argc, const char** argv)
......@@ -29,7 +29,7 @@ int main(int argc, const char** argv)
cv::CommandLineParser cmd(argc, argv,
"{ c camera | | use camera }"
"{ f file | 768x576.avi | input video file }"
"{ m method | mog | method (fgd, mog, mog2, gmg) }"
"{ m method | mog | method (mog, mog2, gmg, fgd) }"
"{ h help | | print help message }");
if (cmd.has("help") || !cmd.check())
......@@ -43,18 +43,18 @@ int main(int argc, const char** argv)
string file = cmd.get<string>("file");
string method = cmd.get<string>("method");
if (method != "fgd"
&& method != "mog"
if (method != "mog"
&& method != "mog2"
&& method != "gmg")
&& method != "gmg"
&& method != "fgd")
{
cerr << "Incorrect method" << endl;
return -1;
}
Method m = method == "fgd" ? FGD_STAT :
method == "mog" ? MOG :
Method m = method == "mog" ? MOG :
method == "mog2" ? MOG2 :
method == "fgd" ? FGD_STAT :
GMG;
VideoCapture cap;
......@@ -75,11 +75,10 @@ int main(int argc, const char** argv)
GpuMat d_frame(frame);
FGDStatModel fgd_stat;
MOG_GPU mog;
MOG2_GPU mog2;
GMG_GPU gmg;
gmg.numInitializationFrames = 40;
Ptr<BackgroundSubtractor> mog = gpu::createBackgroundSubtractorMOG();
Ptr<BackgroundSubtractor> mog2 = gpu::createBackgroundSubtractorMOG2();
Ptr<BackgroundSubtractor> gmg = gpu::createBackgroundSubtractorGMG(40);
Ptr<BackgroundSubtractor> fgd = gpu::createBackgroundSubtractorFGD();
GpuMat d_fgmask;
GpuMat d_fgimg;
......@@ -91,20 +90,20 @@ int main(int argc, const char** argv)
switch (m)
{
case FGD_STAT:
fgd_stat.create(d_frame);
break;
case MOG:
mog(d_frame, d_fgmask, 0.01f);
mog->apply(d_frame, d_fgmask, 0.01);
break;
case MOG2:
mog2(d_frame, d_fgmask);
mog2->apply(d_frame, d_fgmask);
break;
case GMG:
gmg.initialize(d_frame.size());
gmg->apply(d_frame, d_fgmask);
break;
case FGD_STAT:
fgd->apply(d_frame, d_fgmask);
break;
}
......@@ -128,24 +127,23 @@ int main(int argc, const char** argv)
//update the model
switch (m)
{
case FGD_STAT:
fgd_stat.update(d_frame);
d_fgmask = fgd_stat.foreground;
d_bgimg = fgd_stat.background;
break;
case MOG:
mog(d_frame, d_fgmask, 0.01f);
mog.getBackgroundImage(d_bgimg);
mog->apply(d_frame, d_fgmask, 0.01);
mog->getBackgroundImage(d_bgimg);
break;
case MOG2:
mog2(d_frame, d_fgmask);
mog2.getBackgroundImage(d_bgimg);
mog2->apply(d_frame, d_fgmask);
mog2->getBackgroundImage(d_bgimg);
break;
case GMG:
gmg(d_frame, d_fgmask);
gmg->apply(d_frame, d_fgmask);
break;
case FGD_STAT:
fgd->apply(d_frame, d_fgmask);
fgd->getBackgroundImage(d_bgimg);
break;
}
......
......@@ -1271,14 +1271,14 @@ TEST(FGDStatModel)
{
const std::string inputFile = abspath("768x576.avi");
cv::VideoCapture cap(inputFile);
VideoCapture cap(inputFile);
if (!cap.isOpened()) throw runtime_error("can't open 768x576.avi");
cv::Mat frame;
Mat frame;
cap >> frame;
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
while (!TestSystem::instance().stop())
{
......@@ -1297,8 +1297,10 @@ TEST(FGDStatModel)
cap >> frame;
cv::gpu::GpuMat d_frame(frame);
cv::gpu::FGDStatModel d_model(d_frame);
gpu::GpuMat d_frame(frame), d_fgmask;
Ptr<BackgroundSubtractor> d_fgd = gpu::createBackgroundSubtractorFGD();
d_fgd->apply(d_frame, d_fgmask);
while (!TestSystem::instance().stop())
{
......@@ -1307,7 +1309,7 @@ TEST(FGDStatModel)
TestSystem::instance().gpuOn();
d_model.update(d_frame);
d_fgd->apply(d_frame, d_fgmask);
TestSystem::instance().gpuOff();
}
......@@ -1346,10 +1348,10 @@ TEST(MOG)
cap >> frame;
cv::gpu::GpuMat d_frame(frame);
cv::gpu::MOG_GPU d_mog;
cv::Ptr<cv::BackgroundSubtractor> d_mog = cv::gpu::createBackgroundSubtractorMOG();
cv::gpu::GpuMat d_foreground;
d_mog(d_frame, d_foreground, 0.01f);
d_mog->apply(d_frame, d_foreground, 0.01);
while (!TestSystem::instance().stop())
{
......@@ -1358,7 +1360,7 @@ TEST(MOG)
TestSystem::instance().gpuOn();
d_mog(d_frame, d_foreground, 0.01f);
d_mog->apply(d_frame, d_foreground, 0.01);
TestSystem::instance().gpuOff();
}
......@@ -1399,13 +1401,13 @@ TEST(MOG2)
cap >> frame;
cv::Ptr<cv::BackgroundSubtractor> d_mog2 = cv::gpu::createBackgroundSubtractorMOG2();
cv::gpu::GpuMat d_frame(frame);
cv::gpu::MOG2_GPU d_mog2;
cv::gpu::GpuMat d_foreground;
cv::gpu::GpuMat d_background;
d_mog2(d_frame, d_foreground);
d_mog2.getBackgroundImage(d_background);
d_mog2->apply(d_frame, d_foreground);
d_mog2->getBackgroundImage(d_background);
while (!TestSystem::instance().stop())
{
......@@ -1414,8 +1416,8 @@ TEST(MOG2)
TestSystem::instance().gpuOn();
d_mog2(d_frame, d_foreground);
d_mog2.getBackgroundImage(d_background);
d_mog2->apply(d_frame, d_foreground);
d_mog2->getBackgroundImage(d_background);
TestSystem::instance().gpuOff();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册