提交 9786e0a7 编写于 作者: V Vladislav Vinogradov

fix cudabgsegm module compilation

上级 8392296e
......@@ -55,15 +55,27 @@ namespace cv { namespace cuda {
////////////////////////////////////////////////////
// MOG
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractorMOG
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractor
{
public:
using cv::BackgroundSubtractorMOG::apply;
using cv::BackgroundSubtractorMOG::getBackgroundImage;
using cv::BackgroundSubtractor::apply;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
using cv::BackgroundSubtractor::getBackgroundImage;
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
virtual int getHistory() const = 0;
virtual void setHistory(int nframes) = 0;
virtual int getNMixtures() const = 0;
virtual void setNMixtures(int nmix) = 0;
virtual double getBackgroundRatio() const = 0;
virtual void setBackgroundRatio(double backgroundRatio) = 0;
virtual double getNoiseSigma() const = 0;
virtual void setNoiseSigma(double noiseSigma) = 0;
};
CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG>
......@@ -91,12 +103,41 @@ CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG2>
////////////////////////////////////////////////////
// GMG
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractorGMG
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
{
public:
using cv::BackgroundSubtractorGMG::apply;
using cv::BackgroundSubtractor::apply;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
virtual int getMaxFeatures() const = 0;
virtual void setMaxFeatures(int maxFeatures) = 0;
virtual double getDefaultLearningRate() const = 0;
virtual void setDefaultLearningRate(double lr) = 0;
virtual int getNumFrames() const = 0;
virtual void setNumFrames(int nframes) = 0;
virtual int getQuantizationLevels() const = 0;
virtual void setQuantizationLevels(int nlevels) = 0;
virtual double getBackgroundPrior() const = 0;
virtual void setBackgroundPrior(double bgprior) = 0;
virtual int getSmoothingRadius() const = 0;
virtual void setSmoothingRadius(int radius) = 0;
virtual double getDecisionThreshold() const = 0;
virtual void setDecisionThreshold(double thresh) = 0;
virtual bool getUpdateBackgroundModel() const = 0;
virtual void setUpdateBackgroundModel(bool update) = 0;
virtual double getMinVal() const = 0;
virtual void setMinVal(double val) = 0;
virtual double getMaxVal() const = 0;
virtual void setMaxVal(double val) = 0;
};
CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
......
......@@ -239,58 +239,7 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground;
mog->apply(frame, foreground, learningRate);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer();
if(!next())
break;
mog->apply(frame, foreground, learningRate);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
mog->apply(frame, foreground, learningRate);
}
CPU_SANITY_CHECK(foreground);
FAIL_NO_CPU();
}
}
......@@ -576,7 +525,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
cv::cuda::GpuMat d_frame(frame);
cv::cuda::GpuMat foreground;
cv::Ptr<cv::BackgroundSubtractorGMG> d_gmg = cv::cuda::createBackgroundSubtractorGMG();
cv::Ptr<cv::cuda::BackgroundSubtractorGMG> d_gmg = cv::cuda::createBackgroundSubtractorGMG();
d_gmg->setMaxFeatures(maxFeatures);
d_gmg->apply(d_frame, foreground);
......@@ -645,71 +594,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
}
else
{
cv::Mat foreground;
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::createBackgroundSubtractorGMG();
gmg->setMaxFeatures(maxFeatures);
gmg->apply(frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer();
if(!next())
break;
gmg->apply(frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
gmg->apply(frame, foreground);
}
CPU_SANITY_CHECK(foreground);
FAIL_NO_CPU();
}
}
......
......@@ -59,91 +59,14 @@ using namespace cvtest;
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
//////////////////////////////////////////////////////
// MOG
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
namespace
{
IMPLEMENT_PARAM_CLASS(UseGray, bool)
IMPLEMENT_PARAM_CLASS(LearningRate, double)
}
PARAM_TEST_CASE(MOG, cv::cuda::DeviceInfo, std::string, UseGray, LearningRate, UseRoi)
{
cv::cuda::DeviceInfo devInfo;
std::string inputFile;
bool useGray;
double learningRate;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::cuda::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
useGray = GET_PARAM(2);
learningRate = GET_PARAM(3);
useRoi = GET_PARAM(4);
}
};
CUDA_TEST_P(MOG, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::Ptr<cv::BackgroundSubtractorMOG> mog = cv::cuda::createBackgroundSubtractorMOG();
cv::cuda::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::Ptr<cv::BackgroundSubtractorMOG> mog_gold = cv::createBackgroundSubtractorMOG();
cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (useGray)
{
cv::Mat temp;
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
cv::swap(temp, frame);
}
mog->apply(loadMat(frame, useRoi), foreground, learningRate);
mog_gold->apply(frame, foreground_gold, learningRate);
ASSERT_MAT_NEAR(foreground_gold, foreground, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, MOG, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(UseGray(true), UseGray(false)),
testing::Values(LearningRate(0.0), LearningRate(0.01)),
WHOLE_SUBMAT));
#endif
//////////////////////////////////////////////////////
// MOG2
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
namespace
{
{
IMPLEMENT_PARAM_CLASS(UseGray, bool)
IMPLEMENT_PARAM_CLASS(DetectShadow, bool)
}
......@@ -257,57 +180,4 @@ INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, MOG2, testing::Combine(
#endif
//////////////////////////////////////////////////////
// GMG
PARAM_TEST_CASE(GMG, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
{
};
CUDA_TEST_P(GMG, Accuracy)
{
const cv::cuda::DeviceInfo devInfo = GET_PARAM(0);
cv::cuda::setDevice(devInfo.deviceID());
const cv::Size size = GET_PARAM(1);
const int depth = GET_PARAM(2);
const int channels = GET_PARAM(3);
const bool useRoi = GET_PARAM(4);
const int type = CV_MAKE_TYPE(depth, channels);
const cv::Mat zeros(size, CV_8UC1, cv::Scalar::all(0));
const cv::Mat fullfg(size, CV_8UC1, cv::Scalar::all(255));
cv::Mat frame = randomMat(size, type, 0, 100);
cv::cuda::GpuMat d_frame = loadMat(frame, useRoi);
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::cuda::createBackgroundSubtractorGMG();
gmg->setNumFrames(5);
gmg->setSmoothingRadius(0);
cv::cuda::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
for (int i = 0; i < gmg->getNumFrames(); ++i)
{
gmg->apply(d_frame, d_fgmask);
// fgmask should be entirely background during training
ASSERT_MAT_NEAR(zeros, d_fgmask, 0);
}
frame = randomMat(size, type, 160, 255);
d_frame = loadMat(frame, useRoi);
gmg->apply(d_frame, d_fgmask);
// now fgmask should be entirely foreground
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
}
INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, GMG, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8U), MatType(CV_16U), MatType(CV_32F)),
testing::Values(Channels(1), Channels(3), Channels(4)),
WHOLE_SUBMAT));
#endif // HAVE_CUDA
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册