提交 0428dce2 编写于 作者: A Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

......@@ -37,7 +37,7 @@ J_{12} & J_{22}
where \f$J_{11} = M[Z_{x}^{2}]\f$, \f$J_{22} = M[Z_{y}^{2}]\f$, \f$J_{12} = M[Z_{x}Z_{y}]\f$ - components of the tensor, \f$M[]\f$ is a symbol of mathematical expectation (we can consider this operation as averaging in a window w), \f$Z_{x}\f$ and \f$Z_{y}\f$ are partial derivatives of an image \f$Z\f$ with respect to \f$x\f$ and \f$y\f$.
The eigenvalues of the tensor can be found in the below formula:
\f[\lambda_{1,2} = J_{11} + J_{22} \pm \sqrt{(J_{11} - J_{22})^{2} + 4J_{12}^{2}}\f]
\f[\lambda_{1,2} = \frac{1}{2} \left [ J_{11} + J_{22} \pm \sqrt{(J_{11} - J_{22})^{2} + 4J_{12}^{2}} \right ] \f]
where \f$\lambda_1\f$ - largest eigenvalue, \f$\lambda_2\f$ - smallest eigenvalue.
### How to estimate orientation and coherency of an anisotropic image by gradient structure tensor?
......
......@@ -40,6 +40,11 @@
//M*/
#include "precomp.hpp"
#ifndef HAVE_OPENCL
#include "ocl_disabled.impl.hpp"
#else // HAVE_OPENCL
#include <list>
#include <map>
#include <deque>
......@@ -106,23 +111,7 @@
#include "opencv2/core/opencl/runtime/opencl_clamdblas.hpp"
#include "opencv2/core/opencl/runtime/opencl_clamdfft.hpp"
#ifdef HAVE_OPENCL
#include "opencv2/core/opencl/runtime/opencl_core.hpp"
#else
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 4100)
#pragma warning(disable : 4702)
#elif defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
// TODO FIXIT: This file can't be build without OPENCL
#include "ocl_deprecated.hpp"
#endif // HAVE_OPENCL
#ifdef HAVE_OPENCL_SVM
#include "opencv2/core/opencl/runtime/opencl_svm_20.hpp"
......@@ -147,31 +136,6 @@ cv::utils::AllocatorStatisticsInterface& getOpenCLAllocatorStatistics()
return opencl_allocator_stats;
}
#ifndef HAVE_OPENCL
#define CV_OPENCL_NO_SUPPORT() CV_Error(cv::Error::OpenCLApiCallError, "OpenCV build without OpenCL support")
namespace {
struct DummyImpl
{
DummyImpl() { CV_OPENCL_NO_SUPPORT(); }
~DummyImpl() { /* do not throw in desctructors */ }
IMPLEMENT_REFCOUNTABLE();
};
} // namespace
// TODO Replace to empty body (without HAVE_OPENCL)
#define CV_OCL_TRACE_CHECK_RESULT(status, message) /* nothing */
#define CV_OCL_API_ERROR_MSG(check_result, msg) cv::String()
#define CV_OCL_CHECK_RESULT(check_result, msg) (void)check_result
#define CV_OCL_CHECK_(expr, check_result) expr; (void)check_result
#define CV_OCL_CHECK(expr) do { cl_int __cl_result = (expr); CV_OCL_CHECK_RESULT(__cl_result, #expr); } while (0)
#define CV_OCL_DBG_CHECK_RESULT(check_result, msg) (void)check_result
#define CV_OCL_DBG_CHECK_(expr, check_result) expr; (void)check_result
#define CV_OCL_DBG_CHECK(expr) do { cl_int __cl_result = (expr); CV_OCL_CHECK_RESULT(__cl_result, #expr); } while (0)
static const bool CV_OPENCL_DISABLE_BUFFER_RECT_OPERATIONS = false;
#else // HAVE_OPENCL
#ifndef _DEBUG
static bool isRaiseError()
{
......@@ -270,7 +234,6 @@ static const String getBuildExtraOptions()
static const bool CV_OPENCL_ENABLE_MEM_USE_HOST_PTR = utils::getConfigurationParameterBool("OPENCV_OPENCL_ENABLE_MEM_USE_HOST_PTR", true);
static const size_t CV_OPENCL_ALIGNMENT_MEM_USE_HOST_PTR = utils::getConfigurationParameterSizeT("OPENCV_OPENCL_ALIGNMENT_MEM_USE_HOST_PTR", 4);
#endif // HAVE_OPENCL
struct UMat2D
{
......@@ -331,7 +294,7 @@ static uint64 crc64( const uchar* data, size_t size, uint64 crc0=0 )
return ~crc;
}
#if defined HAVE_OPENCL && OPENCV_HAVE_FILESYSTEM_SUPPORT
#if OPENCV_HAVE_FILESYSTEM_SUPPORT
struct OpenCLBinaryCacheConfigurator
{
cv::String cache_path_;
......@@ -872,7 +835,6 @@ static bool g_isOpenCVActivated = false;
bool haveOpenCL()
{
CV_TRACE_FUNCTION();
#ifdef HAVE_OPENCL
static bool g_isOpenCLInitialized = false;
static bool g_isOpenCLAvailable = false;
......@@ -902,9 +864,6 @@ bool haveOpenCL()
g_isOpenCLInitialized = true;
}
return g_isOpenCLAvailable;
#else
return false;
#endif
}
bool useOpenCL()
......@@ -924,14 +883,12 @@ bool useOpenCL()
return data.useOpenCL > 0;
}
#ifdef HAVE_OPENCL
bool isOpenCLActivated()
{
if (!g_isOpenCVActivated)
return false; // prevent unnecessary OpenCL activation via useOpenCL()->haveOpenCL() calls
return useOpenCL();
}
#endif
void setUseOpenCL(bool flag)
{
......@@ -1953,7 +1910,6 @@ static unsigned int getSVMCapabilitiesMask()
} // namespace
#endif
#ifdef HAVE_OPENCL
static size_t getProgramCountLimit()
{
static bool initialized = false;
......@@ -1965,7 +1921,6 @@ static size_t getProgramCountLimit()
}
return count;
}
#endif
struct Context::Impl
{
......@@ -3548,8 +3503,6 @@ internal::ProgramEntry::operator ProgramSource&() const
/////////////////////////////////////////// Program /////////////////////////////////////////////
#ifdef HAVE_OPENCL
static
cv::String joinBuildOptions(const cv::String& a, const cv::String& b)
{
......@@ -3963,10 +3916,6 @@ struct Program::Impl
String sourceName_;
};
#else // HAVE_OPENCL
struct Program::Impl : public DummyImpl {};
#endif // HAVE_OPENCL
Program::Program() { p = 0; }
......@@ -4009,7 +3958,6 @@ bool Program::create(const ProgramSource& src,
p->release();
p = NULL;
}
#ifdef HAVE_OPENCL
p = new Impl(src, buildflags, errmsg);
if(!p->handle)
{
......@@ -4017,18 +3965,11 @@ bool Program::create(const ProgramSource& src,
p = 0;
}
return p != 0;
#else
CV_OPENCL_NO_SUPPORT();
#endif
}
void* Program::ptr() const
{
#ifdef HAVE_OPENCL
return p ? p->handle : 0;
#else
CV_OPENCL_NO_SUPPORT();
#endif
}
#ifndef OPENCV_REMOVE_DEPRECATED_API
......@@ -4051,44 +3992,30 @@ bool Program::write(String& bin) const
String Program::getPrefix() const
{
#ifdef HAVE_OPENCL
if(!p)
return String();
Context::Impl* ctx_ = Context::getDefault().getImpl();
CV_Assert(ctx_);
return cv::format("opencl=%s\nbuildflags=%s", ctx_->getPrefixString().c_str(), p->buildflags.c_str());
#else
CV_OPENCL_NO_SUPPORT();
#endif
}
String Program::getPrefix(const String& buildflags)
{
#ifdef HAVE_OPENCL
Context::Impl* ctx_ = Context::getDefault().getImpl();
CV_Assert(ctx_);
return cv::format("opencl=%s\nbuildflags=%s", ctx_->getPrefixString().c_str(), buildflags.c_str());
#else
CV_OPENCL_NO_SUPPORT();
#endif
}
#endif
#endif // OPENCV_REMOVE_DEPRECATED_API
void Program::getBinary(std::vector<char>& binary) const
{
#ifdef HAVE_OPENCL
CV_Assert(p && "Empty program");
p->getProgramBinary(binary);
#else
binary.clear();
CV_OPENCL_NO_SUPPORT();
#endif
}
Program Context::Impl::getProg(const ProgramSource& src,
const String& buildflags, String& errmsg)
{
#ifdef HAVE_OPENCL
size_t limit = getProgramCountLimit();
const ProgramSource::Impl* src_ = src.getImpl();
CV_Assert(src_);
......@@ -4140,9 +4067,6 @@ Program Context::Impl::getProg(const ProgramSource& src,
cacheList.push_front(key);
}
return prog;
#else
CV_OPENCL_NO_SUPPORT();
#endif
}
......@@ -4703,9 +4627,6 @@ public:
bool allocate(UMatData* u, AccessFlag accessFlags, UMatUsageFlags usageFlags) const CV_OVERRIDE
{
#ifndef HAVE_OPENCL
return false;
#else
if(!u)
return false;
......@@ -4825,7 +4746,6 @@ public:
u->markHostCopyObsolete(true);
opencl_allocator_stats.onAllocate(u->size);
return true;
#endif // HAVE_OPENCL
}
/*void sync(UMatData* u) const
......@@ -6705,27 +6625,19 @@ struct Timer::Impl
void start()
{
#ifdef HAVE_OPENCL
CV_OCL_DBG_CHECK(clFinish((cl_command_queue)queue.ptr()));
timer.start();
#endif
}
void stop()
{
#ifdef HAVE_OPENCL
CV_OCL_DBG_CHECK(clFinish((cl_command_queue)queue.ptr()));
timer.stop();
#endif
}
uint64 durationNS() const
{
#ifdef HAVE_OPENCL
return (uint64)(timer.getTimeSec() * 1e9);
#else
return 0;
#endif
}
TickMeter timer;
......@@ -6752,13 +6664,6 @@ uint64 Timer::durationNS() const
return p->durationNS();
}
#ifndef HAVE_OPENCL
#if defined(_MSC_VER)
#pragma warning(pop)
#elif defined(__clang__)
#pragma clang diagnostic pop
#elif defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
#endif
}} // namespace
#endif // HAVE_OPENCL
此差异已折叠。
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "opencv2/core/ocl_genbase.hpp"
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 4100)
#pragma warning(disable : 4702)
#elif defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
namespace cv { namespace ocl {
static
CV_NORETURN void throw_no_ocl()
{
CV_Error(Error::OpenCLApiCallError, "OpenCV build without OpenCL support");
}
#define OCL_NOT_AVAILABLE() throw_no_ocl();
CV_EXPORTS_W bool haveOpenCL() { return false; }
CV_EXPORTS_W bool useOpenCL() { return false; }
CV_EXPORTS_W bool haveAmdBlas() { return false; }
CV_EXPORTS_W bool haveAmdFft() { return false; }
CV_EXPORTS_W void setUseOpenCL(bool flag) { /* nothing */ }
CV_EXPORTS_W void finish() { /* nothing */ }
CV_EXPORTS bool haveSVM() { return false; }
Device::Device() : p(NULL) { }
Device::Device(void* d) : p(NULL) { OCL_NOT_AVAILABLE(); }
Device::Device(const Device& d) : p(NULL) { }
Device& Device::operator=(const Device& d) { return *this; }
Device::~Device() { }
void Device::set(void* d) { OCL_NOT_AVAILABLE(); }
String Device::name() const { OCL_NOT_AVAILABLE(); }
String Device::extensions() const { OCL_NOT_AVAILABLE(); }
bool Device::isExtensionSupported(const String& extensionName) const { OCL_NOT_AVAILABLE(); }
String Device::version() const { OCL_NOT_AVAILABLE(); }
String Device::vendorName() const { OCL_NOT_AVAILABLE(); }
String Device::OpenCL_C_Version() const { OCL_NOT_AVAILABLE(); }
String Device::OpenCLVersion() const { OCL_NOT_AVAILABLE(); }
int Device::deviceVersionMajor() const { OCL_NOT_AVAILABLE(); }
int Device::deviceVersionMinor() const { OCL_NOT_AVAILABLE(); }
String Device::driverVersion() const { OCL_NOT_AVAILABLE(); }
void* Device::ptr() const { /*OCL_NOT_AVAILABLE();*/ return NULL; }
int Device::type() const { OCL_NOT_AVAILABLE(); }
int Device::addressBits() const { OCL_NOT_AVAILABLE(); }
bool Device::available() const { OCL_NOT_AVAILABLE(); }
bool Device::compilerAvailable() const { OCL_NOT_AVAILABLE(); }
bool Device::linkerAvailable() const { OCL_NOT_AVAILABLE(); }
int Device::doubleFPConfig() const { OCL_NOT_AVAILABLE(); }
int Device::singleFPConfig() const { OCL_NOT_AVAILABLE(); }
int Device::halfFPConfig() const { OCL_NOT_AVAILABLE(); }
bool Device::endianLittle() const { OCL_NOT_AVAILABLE(); }
bool Device::errorCorrectionSupport() const { OCL_NOT_AVAILABLE(); }
int Device::executionCapabilities() const { OCL_NOT_AVAILABLE(); }
size_t Device::globalMemCacheSize() const { OCL_NOT_AVAILABLE(); }
int Device::globalMemCacheType() const { OCL_NOT_AVAILABLE(); }
int Device::globalMemCacheLineSize() const { OCL_NOT_AVAILABLE(); }
size_t Device::globalMemSize() const { OCL_NOT_AVAILABLE(); }
size_t Device::localMemSize() const { OCL_NOT_AVAILABLE(); }
int Device::localMemType() const { return NO_LOCAL_MEM; }
bool Device::hostUnifiedMemory() const { OCL_NOT_AVAILABLE(); }
bool Device::imageSupport() const { OCL_NOT_AVAILABLE(); }
bool Device::imageFromBufferSupport() const { OCL_NOT_AVAILABLE(); }
uint Device::imagePitchAlignment() const { OCL_NOT_AVAILABLE(); }
uint Device::imageBaseAddressAlignment() const { OCL_NOT_AVAILABLE(); }
bool Device::intelSubgroupsSupport() const { OCL_NOT_AVAILABLE(); }
size_t Device::image2DMaxWidth() const { OCL_NOT_AVAILABLE(); }
size_t Device::image2DMaxHeight() const { OCL_NOT_AVAILABLE(); }
size_t Device::image3DMaxWidth() const { OCL_NOT_AVAILABLE(); }
size_t Device::image3DMaxHeight() const { OCL_NOT_AVAILABLE(); }
size_t Device::image3DMaxDepth() const { OCL_NOT_AVAILABLE(); }
size_t Device::imageMaxBufferSize() const { OCL_NOT_AVAILABLE(); }
size_t Device::imageMaxArraySize() const { OCL_NOT_AVAILABLE(); }
int Device::vendorID() const { OCL_NOT_AVAILABLE(); }
int Device::maxClockFrequency() const { OCL_NOT_AVAILABLE(); }
int Device::maxComputeUnits() const { OCL_NOT_AVAILABLE(); }
int Device::maxConstantArgs() const { OCL_NOT_AVAILABLE(); }
size_t Device::maxConstantBufferSize() const { OCL_NOT_AVAILABLE(); }
size_t Device::maxMemAllocSize() const { OCL_NOT_AVAILABLE(); }
size_t Device::maxParameterSize() const { OCL_NOT_AVAILABLE(); }
int Device::maxReadImageArgs() const { OCL_NOT_AVAILABLE(); }
int Device::maxWriteImageArgs() const { OCL_NOT_AVAILABLE(); }
int Device::maxSamplers() const { OCL_NOT_AVAILABLE(); }
size_t Device::maxWorkGroupSize() const { OCL_NOT_AVAILABLE(); }
int Device::maxWorkItemDims() const { OCL_NOT_AVAILABLE(); }
void Device::maxWorkItemSizes(size_t*) const { OCL_NOT_AVAILABLE(); }
int Device::memBaseAddrAlign() const { OCL_NOT_AVAILABLE(); }
int Device::nativeVectorWidthChar() const { OCL_NOT_AVAILABLE(); }
int Device::nativeVectorWidthShort() const { OCL_NOT_AVAILABLE(); }
int Device::nativeVectorWidthInt() const { OCL_NOT_AVAILABLE(); }
int Device::nativeVectorWidthLong() const { OCL_NOT_AVAILABLE(); }
int Device::nativeVectorWidthFloat() const { OCL_NOT_AVAILABLE(); }
int Device::nativeVectorWidthDouble() const { OCL_NOT_AVAILABLE(); }
int Device::nativeVectorWidthHalf() const { OCL_NOT_AVAILABLE(); }
int Device::preferredVectorWidthChar() const { OCL_NOT_AVAILABLE(); }
int Device::preferredVectorWidthShort() const { OCL_NOT_AVAILABLE(); }
int Device::preferredVectorWidthInt() const { OCL_NOT_AVAILABLE(); }
int Device::preferredVectorWidthLong() const { OCL_NOT_AVAILABLE(); }
int Device::preferredVectorWidthFloat() const { OCL_NOT_AVAILABLE(); }
int Device::preferredVectorWidthDouble() const { OCL_NOT_AVAILABLE(); }
int Device::preferredVectorWidthHalf() const { OCL_NOT_AVAILABLE(); }
size_t Device::printfBufferSize() const { OCL_NOT_AVAILABLE(); }
size_t Device::profilingTimerResolution() const { OCL_NOT_AVAILABLE(); }
/* static */
const Device& Device::getDefault()
{
static Device dummy;
return dummy;
}
Context::Context() : p(NULL) { }
Context::Context(int dtype) : p(NULL) { }
Context::~Context() { }
Context::Context(const Context& c) : p(NULL) { }
Context& Context::operator=(const Context& c) { return *this; }
bool Context::create() { return false; }
bool Context::create(int dtype) { return false; }
size_t Context::ndevices() const { return 0; }
const Device& Context::device(size_t idx) const { OCL_NOT_AVAILABLE(); }
Program Context::getProg(const ProgramSource& prog, const String& buildopt, String& errmsg) { OCL_NOT_AVAILABLE(); }
void Context::unloadProg(Program& prog) { }
/* static */
Context& Context::getDefault(bool initialize)
{
static Context dummy;
return dummy;
}
void* Context::ptr() const { return NULL; }
bool Context::useSVM() const { return false; }
void Context::setUseSVM(bool enabled) { }
Platform::Platform() : p(NULL) { }
Platform::~Platform() { }
Platform::Platform(const Platform&) : p(NULL) { }
Platform& Platform::operator=(const Platform&) { return *this; }
void* Platform::ptr() const { return NULL; }
/* static */
Platform& Platform::getDefault()
{
static Platform dummy;
return dummy;
}
void attachContext(const String& platformName, void* platformID, void* context, void* deviceID) { OCL_NOT_AVAILABLE(); }
void convertFromBuffer(void* cl_mem_buffer, size_t step, int rows, int cols, int type, UMat& dst) { OCL_NOT_AVAILABLE(); }
void convertFromImage(void* cl_mem_image, UMat& dst) { OCL_NOT_AVAILABLE(); }
void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device) { OCL_NOT_AVAILABLE(); }
Queue::Queue() : p(NULL) { }
Queue::Queue(const Context& c, const Device& d) : p(NULL) { OCL_NOT_AVAILABLE(); }
Queue::~Queue() { }
Queue::Queue(const Queue& q) {}
Queue& Queue::operator=(const Queue& q) { return *this; }
bool Queue::create(const Context& c, const Device& d) { OCL_NOT_AVAILABLE(); }
void Queue::finish() {}
void* Queue::ptr() const { return NULL; }
/* static */
Queue& Queue::getDefault()
{
static Queue dummy;
return dummy;
}
/// @brief Returns OpenCL command queue with enable profiling mode support
const Queue& Queue::getProfilingQueue() const { OCL_NOT_AVAILABLE(); }
KernelArg::KernelArg()
: flags(0), m(0), obj(0), sz(0), wscale(1), iwscale(1)
{
}
KernelArg::KernelArg(int _flags, UMat* _m, int _wscale, int _iwscale, const void* _obj, size_t _sz)
: flags(_flags), m(_m), obj(_obj), sz(_sz), wscale(_wscale), iwscale(_iwscale)
{
OCL_NOT_AVAILABLE();
}
KernelArg KernelArg::Constant(const Mat& m)
{
OCL_NOT_AVAILABLE();
}
Kernel::Kernel() : p(NULL) { }
Kernel::Kernel(const char* kname, const Program& prog) : p(NULL) { OCL_NOT_AVAILABLE(); }
Kernel::Kernel(const char* kname, const ProgramSource& prog, const String& buildopts, String* errmsg) : p(NULL) { OCL_NOT_AVAILABLE(); }
Kernel::~Kernel() { }
Kernel::Kernel(const Kernel& k) : p(NULL) { }
Kernel& Kernel::operator=(const Kernel& k) { return *this; }
bool Kernel::empty() const { return true; }
bool Kernel::create(const char* kname, const Program& prog) { OCL_NOT_AVAILABLE(); }
bool Kernel::create(const char* kname, const ProgramSource& prog, const String& buildopts, String* errmsg) { OCL_NOT_AVAILABLE(); }
int Kernel::set(int i, const void* value, size_t sz) { OCL_NOT_AVAILABLE(); }
int Kernel::set(int i, const Image2D& image2D) { OCL_NOT_AVAILABLE(); }
int Kernel::set(int i, const UMat& m) { OCL_NOT_AVAILABLE(); }
int Kernel::set(int i, const KernelArg& arg) { OCL_NOT_AVAILABLE(); }
bool Kernel::run(int dims, size_t globalsize[], size_t localsize[], bool sync, const Queue& q) { OCL_NOT_AVAILABLE(); }
bool Kernel::runTask(bool sync, const Queue& q) { OCL_NOT_AVAILABLE(); }
int64 Kernel::runProfiling(int dims, size_t globalsize[], size_t localsize[], const Queue& q) { OCL_NOT_AVAILABLE(); }
size_t Kernel::workGroupSize() const { OCL_NOT_AVAILABLE(); }
size_t Kernel::preferedWorkGroupSizeMultiple() const { OCL_NOT_AVAILABLE(); }
bool Kernel::compileWorkGroupSize(size_t wsz[]) const { OCL_NOT_AVAILABLE(); }
size_t Kernel::localMemSize() const { OCL_NOT_AVAILABLE(); }
void* Kernel::ptr() const { return NULL; }
Program::Program() : p(NULL) { }
Program::Program(const ProgramSource& src, const String& buildflags, String& errmsg) : p(NULL) { OCL_NOT_AVAILABLE(); }
Program::Program(const Program& prog) : p(NULL) { }
Program& Program::operator=(const Program& prog) { return *this; }
Program::~Program() { }
bool Program::create(const ProgramSource& src, const String& buildflags, String& errmsg) { OCL_NOT_AVAILABLE(); }
void* Program::ptr() const { return NULL; }
void Program::getBinary(std::vector<char>& binary) const { OCL_NOT_AVAILABLE(); }
bool Program::read(const String& buf, const String& buildflags) { OCL_NOT_AVAILABLE(); }
bool Program::write(String& buf) const { OCL_NOT_AVAILABLE(); }
const ProgramSource& Program::source() const { OCL_NOT_AVAILABLE(); }
String Program::getPrefix() const { OCL_NOT_AVAILABLE(); }
/* static */ String Program::getPrefix(const String& buildflags) { OCL_NOT_AVAILABLE(); }
ProgramSource::ProgramSource() : p(NULL) { }
ProgramSource::ProgramSource(const String& module, const String& name, const String& codeStr, const String& codeHash) : p(NULL) { }
ProgramSource::ProgramSource(const String& prog) : p(NULL) { }
ProgramSource::ProgramSource(const char* prog) : p(NULL) { }
ProgramSource::~ProgramSource() { }
ProgramSource::ProgramSource(const ProgramSource& prog) : p(NULL) { }
ProgramSource& ProgramSource::operator=(const ProgramSource& prog) { return *this; }
const String& ProgramSource::source() const { OCL_NOT_AVAILABLE(); }
ProgramSource::hash_t ProgramSource::hash() const { OCL_NOT_AVAILABLE(); }
/* static */ ProgramSource ProgramSource::fromBinary(const String& module, const String& name, const unsigned char* binary, const size_t size, const cv::String& buildOptions) { OCL_NOT_AVAILABLE(); }
/* static */ ProgramSource ProgramSource::fromSPIR(const String& module, const String& name, const unsigned char* binary, const size_t size, const cv::String& buildOptions) { OCL_NOT_AVAILABLE(); }
PlatformInfo::PlatformInfo() : p(NULL) { }
PlatformInfo::PlatformInfo(void* id) : p(NULL) { OCL_NOT_AVAILABLE(); }
PlatformInfo::~PlatformInfo() { }
PlatformInfo::PlatformInfo(const PlatformInfo& i) : p(NULL) { }
PlatformInfo& PlatformInfo::operator=(const PlatformInfo& i) { return *this; }
String PlatformInfo::name() const { OCL_NOT_AVAILABLE(); }
String PlatformInfo::vendor() const { OCL_NOT_AVAILABLE(); }
String PlatformInfo::version() const { OCL_NOT_AVAILABLE(); }
int PlatformInfo::deviceNumber() const { OCL_NOT_AVAILABLE(); }
void PlatformInfo::getDevice(Device& device, int d) const { OCL_NOT_AVAILABLE(); }
const char* convertTypeStr(int sdepth, int ddepth, int cn, char* buf) { OCL_NOT_AVAILABLE(); }
const char* typeToStr(int t) { OCL_NOT_AVAILABLE(); }
const char* memopTypeToStr(int t) { OCL_NOT_AVAILABLE(); }
const char* vecopTypeToStr(int t) { OCL_NOT_AVAILABLE(); }
const char* getOpenCLErrorString(int errorCode) { OCL_NOT_AVAILABLE(); }
String kernelToStr(InputArray _kernel, int ddepth, const char* name) { OCL_NOT_AVAILABLE(); }
void getPlatfomsInfo(std::vector<PlatformInfo>& platform_info) { OCL_NOT_AVAILABLE(); }
int predictOptimalVectorWidth(InputArray src1, InputArray src2, InputArray src3,
InputArray src4, InputArray src5, InputArray src6,
InputArray src7, InputArray src8, InputArray src9,
OclVectorStrategy strat)
{ OCL_NOT_AVAILABLE(); }
int checkOptimalVectorWidth(const int *vectorWidths,
InputArray src1, InputArray src2, InputArray src3,
InputArray src4, InputArray src5, InputArray src6,
InputArray src7, InputArray src8, InputArray src9,
OclVectorStrategy strat)
{ OCL_NOT_AVAILABLE(); }
int predictOptimalVectorWidthMax(InputArray src1, InputArray src2, InputArray src3,
InputArray src4, InputArray src5, InputArray src6,
InputArray src7, InputArray src8, InputArray src9)
{ OCL_NOT_AVAILABLE(); }
void buildOptionsAddMatrixDescription(String& buildOptions, const String& name, InputArray _m) { OCL_NOT_AVAILABLE(); }
Image2D::Image2D() : p(NULL) { }
Image2D::Image2D(const UMat &src, bool norm, bool alias) { OCL_NOT_AVAILABLE(); }
Image2D::Image2D(const Image2D & i) : p(NULL) { OCL_NOT_AVAILABLE(); }
Image2D::~Image2D() { }
Image2D& Image2D::operator=(const Image2D & i) { return *this; }
/* static */ bool Image2D::canCreateAlias(const UMat &u) { OCL_NOT_AVAILABLE(); }
/* static */ bool Image2D::isFormatSupported(int depth, int cn, bool norm) { OCL_NOT_AVAILABLE(); }
void* Image2D::ptr() const { return NULL; }
Timer::Timer(const Queue& q) : p(NULL) {}
Timer::~Timer() {}
void Timer::start() { OCL_NOT_AVAILABLE(); }
void Timer::stop() { OCL_NOT_AVAILABLE();}
uint64 Timer::durationNS() const { OCL_NOT_AVAILABLE(); }
MatAllocator* getOpenCLAllocator() { return NULL; }
internal::ProgramEntry::operator ProgramSource&() const { OCL_NOT_AVAILABLE(); }
}}
#if defined(_MSC_VER)
#pragma warning(pop)
#elif defined(__clang__)
#pragma clang diagnostic pop
#elif defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
......@@ -983,7 +983,11 @@ void ORB_Impl::detectAndCompute( InputArray _image, InputArray _mask,
int descPatchSize = cvCeil(halfPatchSize*sqrt(2.0));
int border = std::max(edgeThreshold, std::max(descPatchSize, HARRIS_BLOCK_SIZE/2))+1;
#ifdef HAVE_OPENCL
bool useOCL = ocl::isOpenCLActivated() && OCL_FORCE_CHECK(_image.isUMat() || _descriptors.isUMat());
#else
bool useOCL = false;
#endif
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.type() != CV_8UC1 )
......
......@@ -50,6 +50,9 @@
#include "logger.h"
#define BITS_PER_CHAR 8
#define BITS_PER_BASE 2 // for DNA/RNA sequences
#define BASE_PER_CHAR (BITS_PER_CHAR/BITS_PER_BASE)
#define HISTOS_PER_BASE (1<<BITS_PER_BASE)
namespace cvflann
......@@ -762,7 +765,7 @@ private:
memoryCounter_ += int(veclen_*sizeof(CentersType));
unsigned int* mean_accumulator = new unsigned int[accumulator_veclen];
memset(mean_accumulator, 0, accumulator_veclen);
memset(mean_accumulator, 0, sizeof(unsigned int)*accumulator_veclen);
for (unsigned int i=0; i<indices_length; ++i) {
variance += static_cast<unsigned long long>( ensureSquareDistance<Distance>(
......@@ -814,6 +817,73 @@ private:
}
void computeDnaNodeStatistics(KMeansNodePtr node, int* indices,
unsigned int indices_length)
{
const unsigned int histos_veclen = static_cast<unsigned int>(
veclen_*sizeof(CentersType)*(HISTOS_PER_BASE*BASE_PER_CHAR));
unsigned long long variance = 0ull;
unsigned int* histograms = new unsigned int[histos_veclen];
memset(histograms, 0, sizeof(unsigned int)*histos_veclen);
for (unsigned int i=0; i<indices_length; ++i) {
variance += static_cast<unsigned long long>( ensureSquareDistance<Distance>(
distance_(dataset_[indices[i]], ZeroIterator<ElementType>(), veclen_)));
unsigned char* vec = (unsigned char*)dataset_[indices[i]];
for (size_t k=0, l=0; k<histos_veclen; k+=HISTOS_PER_BASE*BASE_PER_CHAR, ++l) {
histograms[k + ((vec[l]) & 0x03)]++;
histograms[k + 4 + ((vec[l]>>2) & 0x03)]++;
histograms[k + 8 + ((vec[l]>>4) & 0x03)]++;
histograms[k +12 + ((vec[l]>>6) & 0x03)]++;
}
}
CentersType* mean = new CentersType[veclen_];
memoryCounter_ += int(veclen_*sizeof(CentersType));
unsigned char* char_mean = (unsigned char*)mean;
unsigned int* h = histograms;
for (size_t k=0, l=0; k<histos_veclen; k+=HISTOS_PER_BASE*BASE_PER_CHAR, ++l) {
char_mean[l] = (h[k] > h[k+1] ? h[k+2] > h[k+3] ? h[k] > h[k+2] ? 0x00 : 0x10
: h[k] > h[k+3] ? 0x00 : 0x11
: h[k+2] > h[k+3] ? h[k+1] > h[k+2] ? 0x01 : 0x10
: h[k+1] > h[k+3] ? 0x01 : 0x11)
| (h[k+4]>h[k+5] ? h[k+6] > h[k+7] ? h[k+4] > h[k+6] ? 0x00 : 0x1000
: h[k+4] > h[k+7] ? 0x00 : 0x1100
: h[k+6] > h[k+7] ? h[k+5] > h[k+6] ? 0x0100 : 0x1000
: h[k+5] > h[k+7] ? 0x0100 : 0x1100)
| (h[k+8]>h[k+9] ? h[k+10]>h[k+11] ? h[k+8] >h[k+10] ? 0x00 : 0x100000
: h[k+8] >h[k+11] ? 0x00 : 0x110000
: h[k+10]>h[k+11] ? h[k+9] >h[k+10] ? 0x010000 : 0x100000
: h[k+9] >h[k+11] ? 0x010000 : 0x110000)
| (h[k+12]>h[k+13] ? h[k+14]>h[k+15] ? h[k+12] >h[k+14] ? 0x00 : 0x10000000
: h[k+12] >h[k+15] ? 0x00 : 0x11000000
: h[k+14]>h[k+15] ? h[k+13] >h[k+14] ? 0x01000000 : 0x10000000
: h[k+13] >h[k+15] ? 0x01000000 : 0x11000000);
}
variance = static_cast<unsigned long long>(
0.5 + static_cast<double>(variance) / static_cast<double>(indices_length));
variance -= static_cast<unsigned long long>(
ensureSquareDistance<Distance>(
distance_(mean, ZeroIterator<ElementType>(), veclen_)));
DistanceType radius = 0;
for (unsigned int i=0; i<indices_length; ++i) {
DistanceType tmp = distance_(mean, dataset_[indices[i]], veclen_);
if (tmp>radius) {
radius = tmp;
}
}
node->variance = static_cast<DistanceType>(variance);
node->radius = radius;
node->pivot = mean;
delete[] histograms;
}
template<typename DistType>
void computeNodeStatistics(KMeansNodePtr node, int* indices,
unsigned int indices_length,
......@@ -847,6 +917,22 @@ private:
computeBitfieldNodeStatistics(node, indices, indices_length);
}
void computeNodeStatistics(KMeansNodePtr node, int* indices,
unsigned int indices_length,
const cvflann::DNAmmingLUT* identifier)
{
(void)identifier;
computeDnaNodeStatistics(node, indices, indices_length);
}
void computeNodeStatistics(KMeansNodePtr node, int* indices,
unsigned int indices_length,
const cvflann::DNAmming2<unsigned char>* identifier)
{
(void)identifier;
computeDnaNodeStatistics(node, indices, indices_length);
}
void refineClustering(int* indices, int indices_length, int branching, CentersType** centers,
std::vector<DistanceType>& radiuses, int* belongs_to, int* count)
......@@ -1040,6 +1126,112 @@ private:
}
void refineDnaClustering(int* indices, int indices_length, int branching, CentersType** centers,
std::vector<DistanceType>& radiuses, int* belongs_to, int* count)
{
for (int i=0; i<branching; ++i) {
centers[i] = new CentersType[veclen_];
memoryCounter_ += (int)(veclen_*sizeof(CentersType));
}
const unsigned int histos_veclen = static_cast<unsigned int>(
veclen_*sizeof(CentersType)*(HISTOS_PER_BASE*BASE_PER_CHAR));
cv::AutoBuffer<unsigned int> histos_buf(branching*histos_veclen);
Matrix<unsigned int> histos(histos_buf.data(), branching, histos_veclen);
bool converged = false;
int iteration = 0;
while (!converged && iteration<iterations_) {
converged = true;
iteration++;
// compute the new cluster centers
for (int i=0; i<branching; ++i) {
memset(histos[i],0,sizeof(unsigned int)*histos_veclen);
radiuses[i] = 0;
}
for (int i=0; i<indices_length; ++i) {
unsigned char* vec = (unsigned char*)dataset_[indices[i]];
unsigned int* h = histos[belongs_to[i]];
for (size_t k=0, l=0; k<histos_veclen; k+=HISTOS_PER_BASE*BASE_PER_CHAR, ++l) {
h[k + ((vec[l]) & 0x03)]++;
h[k + 4 + ((vec[l]>>2) & 0x03)]++;
h[k + 8 + ((vec[l]>>4) & 0x03)]++;
h[k +12 + ((vec[l]>>6) & 0x03)]++;
}
}
for (int i=0; i<branching; ++i) {
unsigned int* h = histos[i];
unsigned char* charCenter = (unsigned char*)centers[i];
for (size_t k=0, l=0; k<histos_veclen; k+=HISTOS_PER_BASE*BASE_PER_CHAR, ++l) {
charCenter[l]= (h[k] > h[k+1] ? h[k+2] > h[k+3] ? h[k] > h[k+2] ? 0x00 : 0x10
: h[k] > h[k+3] ? 0x00 : 0x11
: h[k+2] > h[k+3] ? h[k+1] > h[k+2] ? 0x01 : 0x10
: h[k+1] > h[k+3] ? 0x01 : 0x11)
| (h[k+4]>h[k+5] ? h[k+6] > h[k+7] ? h[k+4] > h[k+6] ? 0x00 : 0x1000
: h[k+4] > h[k+7] ? 0x00 : 0x1100
: h[k+6] > h[k+7] ? h[k+5] > h[k+6] ? 0x0100 : 0x1000
: h[k+5] > h[k+7] ? 0x0100 : 0x1100)
| (h[k+8]>h[k+9] ? h[k+10]>h[k+11] ? h[k+8] >h[k+10] ? 0x00 : 0x100000
: h[k+8] >h[k+11] ? 0x00 : 0x110000
: h[k+10]>h[k+11] ? h[k+9] >h[k+10] ? 0x010000 : 0x100000
: h[k+9] >h[k+11] ? 0x010000 : 0x110000)
| (h[k+12]>h[k+13] ? h[k+14]>h[k+15] ? h[k+12] >h[k+14] ? 0x00 : 0x10000000
: h[k+12] >h[k+15] ? 0x00 : 0x11000000
: h[k+14]>h[k+15] ? h[k+13] >h[k+14] ? 0x01000000 : 0x10000000
: h[k+13] >h[k+15] ? 0x01000000 : 0x11000000);
}
}
std::vector<int> new_centroids(indices_length);
std::vector<DistanceType> dists(indices_length);
// reassign points to clusters
KMeansDistanceComputer<ElementType**> invoker(
distance_, dataset_, branching, indices, centers, veclen_, new_centroids, dists);
parallel_for_(cv::Range(0, (int)indices_length), invoker);
for (int i=0; i < indices_length; ++i) {
DistanceType dist(dists[i]);
int new_centroid(new_centroids[i]);
if (dist > radiuses[new_centroid]) {
radiuses[new_centroid] = dist;
}
if (new_centroid != belongs_to[i]) {
count[belongs_to[i]]--;
count[new_centroid]++;
belongs_to[i] = new_centroid;
converged = false;
}
}
for (int i=0; i<branching; ++i) {
// if one cluster converges to an empty cluster,
// move an element into that cluster
if (count[i]==0) {
int j = (i+1)%branching;
while (count[j]<=1) {
j = (j+1)%branching;
}
for (int k=0; k<indices_length; ++k) {
if (belongs_to[k]==j) {
// for cluster j, we move the furthest element from the center to the empty cluster i
if ( distance_(dataset_[indices[k]], centers[j], veclen_) == radiuses[j] ) {
belongs_to[k] = i;
count[j]--;
count[i]++;
break;
}
}
}
converged = false;
}
}
}
}
void computeSubClustering(KMeansNodePtr node, int* indices, int indices_length,
int branching, int level, CentersType** centers,
std::vector<DistanceType>& radiuses, int* belongs_to, int* count)
......@@ -1139,7 +1331,7 @@ private:
/**
* The methods responsible with doing the recursive hierarchical clustering on
* binary vectors.
* As some might have heared that KMeans on binary data doesn't make sense,
* As some might have heard that KMeans on binary data doesn't make sense,
* it's worth a little explanation why it actually fairly works. As
* with the Hierarchical Clustering algortihm, we seed several centers for the
* current node by picking some of its points. Then in a first pass each point
......@@ -1222,6 +1414,34 @@ private:
}
void refineAndSplitClustering(
KMeansNodePtr node, int* indices, int indices_length, int branching,
int level, CentersType** centers, std::vector<DistanceType>& radiuses,
int* belongs_to, int* count, const cvflann::DNAmmingLUT* identifier)
{
(void)identifier;
refineDnaClustering(
indices, indices_length, branching, centers, radiuses, belongs_to, count);
computeAnyBitfieldSubClustering(node, indices, indices_length, branching,
level, centers, radiuses, belongs_to, count);
}
void refineAndSplitClustering(
KMeansNodePtr node, int* indices, int indices_length, int branching,
int level, CentersType** centers, std::vector<DistanceType>& radiuses,
int* belongs_to, int* count, const cvflann::DNAmming2<unsigned char>* identifier)
{
(void)identifier;
refineDnaClustering(
indices, indices_length, branching, centers, radiuses, belongs_to, count);
computeAnyBitfieldSubClustering(node, indices, indices_length, branching,
level, centers, radiuses, belongs_to, count);
}
/**
* The method responsible with actually doing the recursive hierarchical
* clustering
......
......@@ -366,6 +366,7 @@ typedef ::cvflann::Hamming<uchar> HammingDistance;
#else
typedef ::cvflann::HammingLUT HammingDistance;
#endif
typedef ::cvflann::DNAmming2<uchar> DNAmmingDistance;
Index::Index()
{
......@@ -418,6 +419,9 @@ void Index::build(InputArray _data, const IndexParams& params, flann_distance_t
buildIndex< ::cvflann::L1<float> >(index, data, params);
break;
#if MINIFLANN_SUPPORT_EXOTIC_DISTANCE_TYPES
case FLANN_DIST_DNAMMING:
buildIndex< DNAmmingDistance >(index, data, params);
break;
case FLANN_DIST_MAX:
buildIndex< ::cvflann::MaxDistance<float> >(index, data, params);
break;
......@@ -473,6 +477,9 @@ void Index::release()
deleteIndex< ::cvflann::L1<float> >(index);
break;
#if MINIFLANN_SUPPORT_EXOTIC_DISTANCE_TYPES
case FLANN_DIST_DNAMMING:
deleteIndex< DNAmmingDistance >(index);
break;
case FLANN_DIST_MAX:
deleteIndex< ::cvflann::MaxDistance<float> >(index);
break;
......@@ -594,7 +601,8 @@ void Index::knnSearch(InputArray _query, OutputArray _indices,
CV_INSTRUMENT_REGION();
Mat query = _query.getMat(), indices, dists;
int dtype = distType == FLANN_DIST_HAMMING ? CV_32S : CV_32F;
int dtype = (distType == FLANN_DIST_HAMMING)
|| (distType == FLANN_DIST_DNAMMING) ? CV_32S : CV_32F;
createIndicesDists( _indices, _dists, indices, dists, query.rows, knn, knn, dtype );
......@@ -610,6 +618,9 @@ void Index::knnSearch(InputArray _query, OutputArray _indices,
runKnnSearch< ::cvflann::L1<float> >(index, query, indices, dists, knn, params);
break;
#if MINIFLANN_SUPPORT_EXOTIC_DISTANCE_TYPES
case FLANN_DIST_DNAMMING:
runKnnSearch<DNAmmingDistance>(index, query, indices, dists, knn, params);
break;
case FLANN_DIST_MAX:
runKnnSearch< ::cvflann::MaxDistance<float> >(index, query, indices, dists, knn, params);
break;
......@@ -638,7 +649,8 @@ int Index::radiusSearch(InputArray _query, OutputArray _indices,
CV_INSTRUMENT_REGION();
Mat query = _query.getMat(), indices, dists;
int dtype = distType == FLANN_DIST_HAMMING ? CV_32S : CV_32F;
int dtype = (distType == FLANN_DIST_HAMMING)
|| (distType == FLANN_DIST_DNAMMING) ? CV_32S : CV_32F;
CV_Assert( maxResults > 0 );
createIndicesDists( _indices, _dists, indices, dists, query.rows, maxResults, INT_MAX, dtype );
......@@ -655,6 +667,8 @@ int Index::radiusSearch(InputArray _query, OutputArray _indices,
case FLANN_DIST_L1:
return runRadiusSearch< ::cvflann::L1<float> >(index, query, indices, dists, radius, params);
#if MINIFLANN_SUPPORT_EXOTIC_DISTANCE_TYPES
case FLANN_DIST_DNAMMING:
return runRadiusSearch< DNAmmingDistance >(index, query, indices, dists, radius, params);
case FLANN_DIST_MAX:
return runRadiusSearch< ::cvflann::MaxDistance<float> >(index, query, indices, dists, radius, params);
case FLANN_DIST_HIST_INTERSECT:
......@@ -718,6 +732,9 @@ void Index::save(const String& filename) const
saveIndex< ::cvflann::L1<float> >(this, index, fout);
break;
#if MINIFLANN_SUPPORT_EXOTIC_DISTANCE_TYPES
case FLANN_DIST_DNAMMING:
saveIndex< DNAmmingDistance >(this, index, fout);
break;
case FLANN_DIST_MAX:
saveIndex< ::cvflann::MaxDistance<float> >(this, index, fout);
break;
......@@ -799,6 +816,7 @@ bool Index::load(InputArray _data, const String& filename)
distType = (flann_distance_t)idistType;
if( !((distType == FLANN_DIST_HAMMING && featureType == CV_8U) ||
(distType == FLANN_DIST_DNAMMING && featureType == CV_8U) ||
(distType != FLANN_DIST_HAMMING && featureType == CV_32F)) )
{
fprintf(stderr, "Reading FLANN index error: unsupported feature type %d for the index type %d\n", featureType, algo);
......@@ -818,6 +836,9 @@ bool Index::load(InputArray _data, const String& filename)
loadIndex< ::cvflann::L1<float> >(this, index, data, fin);
break;
#if MINIFLANN_SUPPORT_EXOTIC_DISTANCE_TYPES
case FLANN_DIST_DNAMMING:
loadIndex< DNAmmingDistance >(this, index, data, fin);
break;
case FLANN_DIST_MAX:
loadIndex< ::cvflann::MaxDistance<float> >(this, index, data, fin);
break;
......
......@@ -256,6 +256,9 @@ enum InterpolationFlags{
INTER_LANCZOS4 = 4,
/** Bit exact bilinear interpolation */
INTER_LINEAR_EXACT = 5,
/** Bit exact nearest neighbor interpolation. This will produce same results as
the nearest neighbor method in PIL, scikit-image or Matlab. */
INTER_NEAREST_EXACT = 6,
/** mask for interpolation codes */
INTER_MAX = 7,
/** flag, fills all of the destination image pixels. If some of them correspond to outliers in the
......
......@@ -254,4 +254,30 @@ PERF_TEST_P(MatInfo_Size_Scale_NN, ResizeNN,
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(MatInfo_Size_Scale_NN, ResizeNNExact,
testing::Combine(
testing::Values(CV_8UC1, CV_8UC3, CV_8UC4),
testing::Values(sz720p, sz1080p),
testing::Values(0.25, 0.5, 2.0)
)
)
{
int matType = get<0>(GetParam());
Size from = get<1>(GetParam());
double scale = get<2>(GetParam());
cv::Mat src(from, matType);
Size to(cvRound(from.width * scale), cvRound(from.height * scale));
cv::Mat dst(to, matType);
declare.in(src, WARMUP_RNG).out(dst);
declare.time(100);
TEST_CYCLE() resize(src, dst, dst.size(), 0, 0, INTER_NEAREST_EXACT);
EXPECT_GT(countNonZero(dst.reshape(1)), 0);
SANITY_CHECK_NOTHING();
}
} // namespace
......@@ -14,13 +14,14 @@ namespace {
class fixedpoint64
{
private:
static const int fixedShift = 32;
int64_t val;
fixedpoint64(int64_t _val) : val(_val) {}
static CV_ALWAYS_INLINE uint64_t fixedround(const uint64_t& _val) { return (_val + ((1LL << fixedShift) >> 1)); }
public:
static const int fixedShift = 32;
typedef fixedpoint64 WT;
typedef int64_t raw_t;
CV_ALWAYS_INLINE fixedpoint64() { val = 0; }
CV_ALWAYS_INLINE fixedpoint64(const fixedpoint64& v) { val = v.val; }
CV_ALWAYS_INLINE fixedpoint64(const int8_t& _val) { val = ((int64_t)_val) << fixedShift; }
......@@ -97,13 +98,14 @@ public:
class ufixedpoint64
{
private:
static const int fixedShift = 32;
uint64_t val;
ufixedpoint64(uint64_t _val) : val(_val) {}
static CV_ALWAYS_INLINE uint64_t fixedround(const uint64_t& _val) { return (_val + ((1LL << fixedShift) >> 1)); }
public:
static const int fixedShift = 32;
typedef ufixedpoint64 WT;
typedef uint64_t raw_t;
CV_ALWAYS_INLINE ufixedpoint64() { val = 0; }
CV_ALWAYS_INLINE ufixedpoint64(const ufixedpoint64& v) { val = v.val; }
CV_ALWAYS_INLINE ufixedpoint64(const uint8_t& _val) { val = ((uint64_t)_val) << fixedShift; }
......@@ -157,19 +159,24 @@ public:
CV_ALWAYS_INLINE bool isZero() { return val == 0; }
static CV_ALWAYS_INLINE ufixedpoint64 zero() { return ufixedpoint64(); }
static CV_ALWAYS_INLINE ufixedpoint64 one() { return ufixedpoint64((uint64_t)(1ULL << fixedShift)); }
static CV_ALWAYS_INLINE ufixedpoint64 fromRaw(uint64_t v) { return ufixedpoint64(v); }
CV_ALWAYS_INLINE uint64_t raw() { return val; }
CV_ALWAYS_INLINE uint32_t cvFloor() const { return cv::saturate_cast<uint32_t>(val >> fixedShift); }
friend class ufixedpoint32;
};
class fixedpoint32
{
private:
static const int fixedShift = 16;
int32_t val;
fixedpoint32(int32_t _val) : val(_val) {}
static CV_ALWAYS_INLINE uint32_t fixedround(const uint32_t& _val) { return (_val + ((1 << fixedShift) >> 1)); }
public:
static const int fixedShift = 16;
typedef fixedpoint64 WT;
typedef int32_t raw_t;
CV_ALWAYS_INLINE fixedpoint32() { val = 0; }
CV_ALWAYS_INLINE fixedpoint32(const fixedpoint32& v) { val = v.val; }
CV_ALWAYS_INLINE fixedpoint32(const int8_t& _val) { val = ((int32_t)_val) << fixedShift; }
......@@ -217,13 +224,14 @@ public:
class ufixedpoint32
{
private:
static const int fixedShift = 16;
uint32_t val;
ufixedpoint32(uint32_t _val) : val(_val) {}
static CV_ALWAYS_INLINE uint32_t fixedround(const uint32_t& _val) { return (_val + ((1 << fixedShift) >> 1)); }
public:
static const int fixedShift = 16;
typedef ufixedpoint64 WT;
typedef uint32_t raw_t;
CV_ALWAYS_INLINE ufixedpoint32() { val = 0; }
CV_ALWAYS_INLINE ufixedpoint32(const ufixedpoint32& v) { val = v.val; }
CV_ALWAYS_INLINE ufixedpoint32(const uint8_t& _val) { val = ((uint32_t)_val) << fixedShift; }
......@@ -261,19 +269,23 @@ public:
CV_ALWAYS_INLINE bool isZero() { return val == 0; }
static CV_ALWAYS_INLINE ufixedpoint32 zero() { return ufixedpoint32(); }
static CV_ALWAYS_INLINE ufixedpoint32 one() { return ufixedpoint32((1U << fixedShift)); }
static CV_ALWAYS_INLINE ufixedpoint32 fromRaw(uint32_t v) { return ufixedpoint32(v); }
CV_ALWAYS_INLINE uint32_t raw() { return val; }
friend class ufixedpoint16;
};
class fixedpoint16
{
private:
static const int fixedShift = 8;
int16_t val;
fixedpoint16(int16_t _val) : val(_val) {}
static CV_ALWAYS_INLINE uint16_t fixedround(const uint16_t& _val) { return (_val + ((1 << fixedShift) >> 1)); }
public:
static const int fixedShift = 8;
typedef fixedpoint32 WT;
typedef int16_t raw_t;
CV_ALWAYS_INLINE fixedpoint16() { val = 0; }
CV_ALWAYS_INLINE fixedpoint16(const fixedpoint16& v) { val = v.val; }
CV_ALWAYS_INLINE fixedpoint16(const int8_t& _val) { val = ((int16_t)_val) << fixedShift; }
......@@ -314,13 +326,14 @@ public:
class ufixedpoint16
{
private:
static const int fixedShift = 8;
uint16_t val;
ufixedpoint16(uint16_t _val) : val(_val) {}
static CV_ALWAYS_INLINE uint16_t fixedround(const uint16_t& _val) { return (_val + ((1 << fixedShift) >> 1)); }
public:
static const int fixedShift = 8;
typedef ufixedpoint32 WT;
typedef uint16_t raw_t;
CV_ALWAYS_INLINE ufixedpoint16() { val = 0; }
CV_ALWAYS_INLINE ufixedpoint16(const ufixedpoint16& v) { val = v.val; }
CV_ALWAYS_INLINE ufixedpoint16(const uint8_t& _val) { val = ((uint16_t)_val) << fixedShift; }
......@@ -357,7 +370,7 @@ public:
static CV_ALWAYS_INLINE ufixedpoint16 one() { return ufixedpoint16((uint16_t)(1 << fixedShift)); }
static CV_ALWAYS_INLINE ufixedpoint16 fromRaw(uint16_t v) { return ufixedpoint16(v); }
CV_ALWAYS_INLINE ufixedpoint16 raw() { return val; }
CV_ALWAYS_INLINE uint16_t raw() { return val; }
};
}
......
......@@ -51,6 +51,7 @@
#include "opencl_kernels_imgproc.hpp"
#include "hal_replacement.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include "opencv2/core/utils/buffer_area.private.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "resize.hpp"
......@@ -1104,6 +1105,121 @@ resizeNN( const Mat& src, Mat& dst, double fx, double fy )
}
}
class resizeNN_bitexactInvoker : public ParallelLoopBody
{
public:
resizeNN_bitexactInvoker(const Mat& _src, Mat& _dst, int* _x_ofse, int _ify, int _ify0)
: src(_src), dst(_dst), x_ofse(_x_ofse), ify(_ify), ify0(_ify0) {}
virtual void operator() (const Range& range) const CV_OVERRIDE
{
Size ssize = src.size(), dsize = dst.size();
int pix_size = (int)src.elemSize();
for( int y = range.start; y < range.end; y++ )
{
uchar* D = dst.ptr(y);
int _sy = (ify * y + ify0) >> 16;
int sy = std::min(_sy, ssize.height-1);
const uchar* S = src.ptr(sy);
int x = 0;
switch( pix_size )
{
case 1:
#if CV_SIMD
for( ; x <= dsize.width - v_uint8::nlanes; x += v_uint8::nlanes )
v_store(D + x, vx_lut(S, x_ofse + x));
#endif
for( ; x < dsize.width; x++ )
D[x] = S[x_ofse[x]];
break;
case 2:
#if CV_SIMD
for( ; x <= dsize.width - v_uint16::nlanes; x += v_uint16::nlanes )
v_store((ushort*)D + x, vx_lut((ushort*)S, x_ofse + x));
#endif
for( ; x < dsize.width; x++ )
*((ushort*)D + x) = *((ushort*)S + x_ofse[x]);
break;
case 3:
for( ; x < dsize.width; x++, D += 3 )
{
const uchar* _tS = S + x_ofse[x] * 3;
D[0] = _tS[0]; D[1] = _tS[1]; D[2] = _tS[2];
}
break;
case 4:
#if CV_SIMD
for( ; x <= dsize.width - v_uint32::nlanes; x += v_uint32::nlanes )
v_store((uint32_t*)D + x, vx_lut((uint32_t*)S, x_ofse + x));
#endif
for( ; x < dsize.width; x++ )
*((uint32_t*)D + x) = *((uint32_t*)S + x_ofse[x]);
break;
case 6:
for( ; x < dsize.width; x++, D += 6 )
{
const ushort* _tS = (const ushort*)(S + x_ofse[x]*6);
ushort* _tD = (ushort*)D;
_tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2];
}
break;
case 8:
#if CV_SIMD
for( ; x <= dsize.width - v_uint64::nlanes; x += v_uint64::nlanes )
v_store((uint64_t*)D + x, vx_lut((uint64_t*)S, x_ofse + x));
#endif
for( ; x < dsize.width; x++ )
*((uint64_t*)D + x) = *((uint64_t*)S + x_ofse[x]);
break;
case 12:
for( ; x < dsize.width; x++, D += 12 )
{
const int* _tS = (const int*)(S + x_ofse[x]*12);
int* _tD = (int*)D;
_tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2];
}
break;
default:
for( x = 0; x < dsize.width; x++, D += pix_size )
{
const uchar* _tS = S + x_ofse[x] * pix_size;
for (int k = 0; k < pix_size; k++)
D[k] = _tS[k];
}
}
}
}
private:
const Mat& src;
Mat& dst;
int* x_ofse;
const int ify;
const int ify0;
};
static void resizeNN_bitexact( const Mat& src, Mat& dst, double /*fx*/, double /*fy*/ )
{
Size ssize = src.size(), dsize = dst.size();
int ifx = ((ssize.width << 16) + dsize.width / 2) / dsize.width; // 16bit fixed-point arithmetic
int ifx0 = ifx / 2 - 1; // This method uses center pixel coordinate as Pillow and scikit-images do.
int ify = ((ssize.height << 16) + dsize.height / 2) / dsize.height;
int ify0 = ify / 2 - 1;
cv::utils::BufferArea area;
int* x_ofse = 0;
area.allocate(x_ofse, dsize.width, CV_SIMD_WIDTH);
area.commit();
for( int x = 0; x < dsize.width; x++ )
{
int sx = (ifx * x + ifx0) >> 16;
x_ofse[x] = std::min(sx, ssize.width-1); // offset in element (not byte)
}
Range range(0, dsize.height);
resizeNN_bitexactInvoker invoker(src, dst, x_ofse, ify, ify0);
parallel_for_(range, invoker, dst.total()/(double)(1<<16));
}
struct VResizeNoVec
{
......@@ -3723,6 +3839,12 @@ void resize(int src_type,
return;
}
if( interpolation == INTER_NEAREST_EXACT )
{
resizeNN_bitexact( src, dst, inv_scale_x, inv_scale_y );
return;
}
int k, sx, sy, dx, dy;
......
......@@ -258,23 +258,20 @@ softdouble getGaussianKernelFixedPoint_ED(CV_OUT std::vector<int64_t>& result, c
}
static void getGaussianKernel(int n, double sigma, int ktype, Mat& res) { res = getGaussianKernel(n, sigma, ktype); }
template <typename T> static void getGaussianKernel(int n, double sigma, int, std::vector<T>& res);
//{ res = getFixedpointGaussianKernel<T>(n, sigma); }
template<> void getGaussianKernel<ufixedpoint16>(int n, double sigma, int, std::vector<ufixedpoint16>& res)
template <typename FT> static void getGaussianKernel(int n, double sigma, int, std::vector<FT>& res)
{
std::vector<softdouble> res_sd;
softdouble s0 = getGaussianKernelBitExact(res_sd, n, sigma);
CV_UNUSED(s0);
std::vector<int64_t> fixed_256;
softdouble approx_err = getGaussianKernelFixedPoint_ED(fixed_256, res_sd, 8);
softdouble approx_err = getGaussianKernelFixedPoint_ED(fixed_256, res_sd, FT::fixedShift);
CV_UNUSED(approx_err);
res.resize(n);
for (int i = 0; i < n; i++)
{
res[i] = ufixedpoint16::fromRaw((uint16_t)fixed_256[i]);
res[i] = FT::fromRaw((typename FT::raw_t)fixed_256[i]);
//printf("%03d: %d\n", i, res[i].raw());
}
}
......@@ -688,6 +685,43 @@ void GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
return;
}
}
if(sdepth == CV_16U && ((borderType & BORDER_ISOLATED) || !_src.isSubmatrix()))
{
CV_LOG_INFO(NULL, "GaussianBlur: running bit-exact version...");
std::vector<ufixedpoint32> fkx, fky;
createGaussianKernels(fkx, fky, type, ksize, sigma1, sigma2);
static bool param_check_gaussian_blur_bitexact_kernels = utils::getConfigurationParameterBool("OPENCV_GAUSSIANBLUR_CHECK_BITEXACT_KERNELS", false);
if (param_check_gaussian_blur_bitexact_kernels && !validateGaussianBlurKernel(fkx))
{
CV_LOG_INFO(NULL, "GaussianBlur: bit-exact fx kernel can't be applied: ksize=" << ksize << " sigma=" << Size2d(sigma1, sigma2));
}
else if (param_check_gaussian_blur_bitexact_kernels && !validateGaussianBlurKernel(fky))
{
CV_LOG_INFO(NULL, "GaussianBlur: bit-exact fy kernel can't be applied: ksize=" << ksize << " sigma=" << Size2d(sigma1, sigma2));
}
else
{
// TODO: implement ocl_sepFilter2D_BitExact -- how to deal with bdepth?
// CV_OCL_RUN(useOpenCL,
// ocl_sepFilter2D_BitExact(_src, _dst, sdepth,
// ksize,
// (const uint32_t*)&fkx[0], (const uint32_t*)&fky[0],
// Point(-1, -1), 0, borderType,
// 16/*shift_bits*/)
// );
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if (src.data == dst.data)
src = src.clone();
CV_CPU_DISPATCH(GaussianBlurFixedPoint, (src, dst, (const uint32_t*)&fkx[0], (int)fkx.size(), (const uint32_t*)&fky[0], (int)fky.size(), borderType),
CV_CPU_DISPATCH_MODES_ALL);
return;
}
}
#ifdef HAVE_OPENCL
if (useOpenCL)
......
......@@ -54,9 +54,10 @@
namespace cv {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
// forward declarations
void GaussianBlurFixedPoint(const Mat& src, /*const*/ Mat& dst,
const uint16_t/*ufixedpoint16*/* fkx, int fkx_size,
const uint16_t/*ufixedpoint16*/* fky, int fky_size,
template <typename RFT>
void GaussianBlurFixedPoint(const Mat& src, Mat& dst,
const RFT* fkx, int fkx_size,
const RFT* fky, int fky_size,
int borderType);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
......@@ -192,8 +193,9 @@ void hlineSmooth3N<uint8_t, ufixedpoint16>(const uint8_t* src, int cn, const ufi
}
}
}
template <typename ET, typename FT>
void hlineSmooth3N121(const ET* src, int cn, const FT*, int, FT* dst, int len, int borderType)
template <typename ET, typename FT, typename VFT>
void hlineSmooth3N121Impl(const ET* src, int cn, const FT*, int, FT* dst, int len, int borderType)
{
if (len == 1)
{
......@@ -217,7 +219,13 @@ void hlineSmooth3N121(const ET* src, int cn, const FT*, int, FT* dst, int len, i
}
src += cn; dst += cn;
for (int i = cn; i < (len - 1)*cn; i++, src++, dst++)
int i = cn, lencn = (len - 1)*cn;
#if CV_SIMD
const int VECSZ = VFT::nlanes;
for (; i <= lencn - VECSZ; i += VECSZ, src += VECSZ, dst += VECSZ)
v_store((typename FT::raw_t*)dst, (vx_load_expand(src - cn) + vx_load_expand(src + cn) + (vx_load_expand(src) << 1)) << (FT::fixedShift-2));
#endif
for (; i < lencn; i++, src++, dst++)
*dst = (FT(src[-cn])>>2) + (FT(src[cn])>>2) + (FT(src[0])>>1);
// Point that fall right from border
......@@ -231,51 +239,19 @@ void hlineSmooth3N121(const ET* src, int cn, const FT*, int, FT* dst, int len, i
}
}
}
template <typename ET, typename FT>
void hlineSmooth3N121(const ET* src, int cn, const FT*, int, FT* dst, int len, int borderType);
template <>
void hlineSmooth3N121<uint8_t, ufixedpoint16>(const uint8_t* src, int cn, const ufixedpoint16*, int, ufixedpoint16* dst, int len, int borderType)
void hlineSmooth3N121<uint8_t, ufixedpoint16>(const uint8_t* src, int cn, const ufixedpoint16* _m, int _n, ufixedpoint16* dst, int len, int borderType)
{
if (len == 1)
{
if (borderType != BORDER_CONSTANT)
for (int k = 0; k < cn; k++)
dst[k] = ufixedpoint16(src[k]);
else
for (int k = 0; k < cn; k++)
dst[k] = ufixedpoint16(src[k]) >> 1;
}
else
{
// Point that fall left from border
for (int k = 0; k < cn; k++)
dst[k] = (ufixedpoint16(src[k])>>1) + (ufixedpoint16(src[cn + k])>>2);
if (borderType != BORDER_CONSTANT)// If BORDER_CONSTANT out of border values are equal to zero and could be skipped
{
int src_idx = borderInterpolate(-1, len, borderType);
for (int k = 0; k < cn; k++)
dst[k] = dst[k] + (ufixedpoint16(src[src_idx*cn + k])>>2);
}
src += cn; dst += cn;
int i = cn, lencn = (len - 1)*cn;
#if CV_SIMD
const int VECSZ = v_uint16::nlanes;
for (; i <= lencn - VECSZ; i += VECSZ, src += VECSZ, dst += VECSZ)
v_store((uint16_t*)dst, (vx_load_expand(src - cn) + vx_load_expand(src + cn) + (vx_load_expand(src) << 1)) << 6);
#endif
for (; i < lencn; i++, src++, dst++)
*((uint16_t*)dst) = (uint16_t(src[-cn]) + uint16_t(src[cn]) + (uint16_t(src[0]) << 1)) << 6;
// Point that fall right from border
for (int k = 0; k < cn; k++)
dst[k] = (ufixedpoint16(src[k - cn])>>2) + (ufixedpoint16(src[k])>>1);
if (borderType != BORDER_CONSTANT)// If BORDER_CONSTANT out of border values are equal to zero and could be skipped
{
int src_idx = (borderInterpolate(len, len, borderType) - (len - 1))*cn;
for (int k = 0; k < cn; k++)
dst[k] = dst[k] + (ufixedpoint16(src[src_idx + k])>>2);
}
}
hlineSmooth3N121Impl<uint8_t, ufixedpoint16, v_uint16>(src, cn, _m, _n, dst, len, borderType);
}
template <>
void hlineSmooth3N121<uint16_t, ufixedpoint32>(const uint16_t* src, int cn, const ufixedpoint32* _m, int _n, ufixedpoint32* dst, int len, int borderType)
{
hlineSmooth3N121Impl<uint16_t, ufixedpoint32, v_uint32>(src, cn, _m, _n, dst, len, borderType);
}
template <typename ET, typename FT>
void hlineSmooth3Naba(const ET* src, int cn, const FT* m, int, FT* dst, int len, int borderType)
{
......@@ -1376,6 +1352,28 @@ void vlineSmooth3N121<uint8_t, ufixedpoint16>(const ufixedpoint16* const * src,
for (; i < len; i++)
dst[i] = (((uint32_t)(((uint16_t*)(src[0]))[i]) + (uint32_t)(((uint16_t*)(src[2]))[i]) + ((uint32_t)(((uint16_t*)(src[1]))[i]) << 1)) + (1 << 9)) >> 10;
}
template <>
void vlineSmooth3N121<uint16_t, ufixedpoint32>(const ufixedpoint32* const * src, const ufixedpoint32*, int, uint16_t* dst, int len)
{
int i = 0;
#if CV_SIMD
const int VECSZ = v_uint32::nlanes;
for (; i <= len - 2*VECSZ; i += 2*VECSZ)
{
v_uint64 v_src00, v_src01, v_src02, v_src03, v_src10, v_src11, v_src12, v_src13, v_src20, v_src21, v_src22, v_src23;
v_expand(vx_load((uint32_t*)(src[0]) + i), v_src00, v_src01);
v_expand(vx_load((uint32_t*)(src[0]) + i + VECSZ), v_src02, v_src03);
v_expand(vx_load((uint32_t*)(src[1]) + i), v_src10, v_src11);
v_expand(vx_load((uint32_t*)(src[1]) + i + VECSZ), v_src12, v_src13);
v_expand(vx_load((uint32_t*)(src[2]) + i), v_src20, v_src21);
v_expand(vx_load((uint32_t*)(src[2]) + i + VECSZ), v_src22, v_src23);
v_store(dst + i, v_pack(v_rshr_pack<18>(v_src00 + v_src20 + (v_src10 + v_src10), v_src01 + v_src21 + (v_src11 + v_src11)),
v_rshr_pack<18>(v_src02 + v_src22 + (v_src12 + v_src12), v_src03 + v_src23 + (v_src13 + v_src13))));
}
#endif
for (; i < len; i++)
dst[i] = (((uint64_t)((uint32_t*)(src[0]))[i]) + (uint64_t)(((uint32_t*)(src[2]))[i]) + ((uint64_t(((uint32_t*)(src[1]))[i]) << 1)) + (1 << 17)) >> 18;
}
template <typename ET, typename FT>
void vlineSmooth5N(const FT* const * src, const FT* m, int, ET* dst, int len)
{
......@@ -1525,6 +1523,39 @@ void vlineSmooth5N14641<uint8_t, ufixedpoint16>(const ufixedpoint16* const * src
(((uint32_t)(((uint16_t*)(src[1]))[i]) + (uint32_t)(((uint16_t*)(src[3]))[i])) << 2) +
(uint32_t)(((uint16_t*)(src[0]))[i]) + (uint32_t)(((uint16_t*)(src[4]))[i]) + (1 << 11)) >> 12;
}
template <>
void vlineSmooth5N14641<uint16_t, ufixedpoint32>(const ufixedpoint32* const * src, const ufixedpoint32*, int, uint16_t* dst, int len)
{
int i = 0;
#if CV_SIMD
const int VECSZ = v_uint32::nlanes;
for (; i <= len - 2*VECSZ; i += 2*VECSZ)
{
v_uint64 v_src00, v_src10, v_src20, v_src30, v_src40;
v_uint64 v_src01, v_src11, v_src21, v_src31, v_src41;
v_uint64 v_src02, v_src12, v_src22, v_src32, v_src42;
v_uint64 v_src03, v_src13, v_src23, v_src33, v_src43;
v_expand(vx_load((uint32_t*)(src[0]) + i), v_src00, v_src01);
v_expand(vx_load((uint32_t*)(src[0]) + i + VECSZ), v_src02, v_src03);
v_expand(vx_load((uint32_t*)(src[1]) + i), v_src10, v_src11);
v_expand(vx_load((uint32_t*)(src[1]) + i + VECSZ), v_src12, v_src13);
v_expand(vx_load((uint32_t*)(src[2]) + i), v_src20, v_src21);
v_expand(vx_load((uint32_t*)(src[2]) + i + VECSZ), v_src22, v_src23);
v_expand(vx_load((uint32_t*)(src[3]) + i), v_src30, v_src31);
v_expand(vx_load((uint32_t*)(src[3]) + i + VECSZ), v_src32, v_src33);
v_expand(vx_load((uint32_t*)(src[4]) + i), v_src40, v_src41);
v_expand(vx_load((uint32_t*)(src[4]) + i + VECSZ), v_src42, v_src43);
v_store(dst + i, v_pack(v_rshr_pack<20>((v_src20 << 2) + (v_src20 << 1) + ((v_src10 + v_src30) << 2) + v_src00 + v_src40,
(v_src21 << 2) + (v_src21 << 1) + ((v_src11 + v_src31) << 2) + v_src01 + v_src41),
v_rshr_pack<20>((v_src22 << 2) + (v_src22 << 1) + ((v_src12 + v_src32) << 2) + v_src02 + v_src42,
(v_src23 << 2) + (v_src23 << 1) + ((v_src13 + v_src33) << 2) + v_src03 + v_src43)));
}
#endif
for (; i < len; i++)
dst[i] = ((uint64_t)(((uint32_t*)(src[2]))[i]) * 6 +
(((uint64_t)(((uint32_t*)(src[1]))[i]) + (uint64_t)(((uint32_t*)(src[3]))[i])) << 2) +
(uint64_t)(((uint32_t*)(src[0]))[i]) + (uint64_t)(((uint32_t*)(src[4]))[i]) + (1 << 19)) >> 20;
}
template <typename ET, typename FT>
void vlineSmooth(const FT* const * src, const FT* m, int n, ET* dst, int len)
{
......@@ -2029,25 +2060,42 @@ private:
} // namespace anon
void GaussianBlurFixedPoint(const Mat& src, /*const*/ Mat& dst,
const uint16_t/*ufixedpoint16*/* fkx, int fkx_size,
const uint16_t/*ufixedpoint16*/* fky, int fky_size,
int borderType)
template <typename RFT, typename ET, typename FT>
void GaussianBlurFixedPointImpl(const Mat& src, /*const*/ Mat& dst,
const RFT* fkx, int fkx_size,
const RFT* fky, int fky_size,
int borderType)
{
CV_INSTRUMENT_REGION();
CV_Assert(src.depth() == CV_8U && ((borderType & BORDER_ISOLATED) || !src.isSubmatrix()));
fixedSmoothInvoker<uint8_t, ufixedpoint16> invoker(
src.ptr<uint8_t>(), src.step1(),
dst.ptr<uint8_t>(), dst.step1(), dst.cols, dst.rows, dst.channels(),
(const ufixedpoint16*)fkx, fkx_size, (const ufixedpoint16*)fky, fky_size,
CV_Assert(src.depth() == DataType<ET>::depth && ((borderType & BORDER_ISOLATED) || !src.isSubmatrix()));
fixedSmoothInvoker<ET, FT> invoker(
src.ptr<ET>(), src.step1(),
dst.ptr<ET>(), dst.step1(), dst.cols, dst.rows, dst.channels(),
(const FT*)fkx, fkx_size, (const FT*)fky, fky_size,
borderType & ~BORDER_ISOLATED);
{
// TODO AVX guard (external call)
parallel_for_(Range(0, dst.rows), invoker, std::max(1, std::min(getNumThreads(), getNumberOfCPUs())));
}
}
template <>
void GaussianBlurFixedPoint<uint16_t>(const Mat& src, /*const*/ Mat& dst,
const uint16_t/*ufixedpoint16*/* fkx, int fkx_size,
const uint16_t/*ufixedpoint16*/* fky, int fky_size,
int borderType)
{
GaussianBlurFixedPointImpl<uint16_t, uint8_t, ufixedpoint16>(src, dst, fkx, fkx_size, fky, fky_size, borderType);
}
template <>
void GaussianBlurFixedPoint<uint32_t>(const Mat& src, /*const*/ Mat& dst,
const uint32_t/*ufixedpoint32*/* fkx, int fkx_size,
const uint32_t/*ufixedpoint32*/* fky, int fky_size,
int borderType)
{
GaussianBlurFixedPointImpl<uint32_t, uint16_t, ufixedpoint32>(src, dst, fkx, fkx_size, fky, fky_size, borderType);
}
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace
......@@ -346,14 +346,24 @@ protected:
CV_ResizeExactTest::CV_ResizeExactTest() : CV_ResizeTest()
{
max_interpolation = 1;
max_interpolation = 2;
}
void CV_ResizeExactTest::get_test_array_types_and_sizes(int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types)
{
CV_ResizeTest::get_test_array_types_and_sizes(test_case_idx, sizes, types);
interpolation = INTER_LINEAR_EXACT;
switch (interpolation)
{
case 0:
interpolation = INTER_LINEAR_EXACT;
break;
case 1:
interpolation = INTER_NEAREST_EXACT;
break;
default:
CV_Assert(interpolation < max_interpolation);
}
if (CV_MAT_DEPTH(types[INPUT][0]) == CV_32F ||
CV_MAT_DEPTH(types[INPUT][0]) == CV_64F)
types[INPUT][0] = types[INPUT_OUTPUT][0] = types[REF_INPUT_OUTPUT][0] = CV_MAKETYPE(CV_8U, CV_MAT_CN(types[INPUT][0]));
......
......@@ -152,4 +152,89 @@ TEST(Resize_Bitexact, Linear8U)
}
}
PARAM_TEST_CASE(Resize_Bitexact, int)
{
public:
int depth;
virtual void SetUp()
{
depth = GET_PARAM(0);
}
double CountDiff(const Mat& src)
{
Mat dstExact; cv::resize(src, dstExact, Size(), 2, 1, INTER_NEAREST_EXACT);
Mat dstNonExact; cv::resize(src, dstNonExact, Size(), 2, 1, INTER_NEAREST);
return cv::norm(dstExact, dstNonExact, NORM_INF);
}
};
TEST_P(Resize_Bitexact, Nearest8U_vsNonExact)
{
Mat mat_color, mat_gray;
Mat src_color = imread(cvtest::findDataFile("shared/lena.png"));
Mat src_gray; cv::cvtColor(src_color, src_gray, COLOR_BGR2GRAY);
src_color.convertTo(mat_color, depth);
src_gray.convertTo(mat_gray, depth);
EXPECT_EQ(CountDiff(mat_color), 0) << "color, type: " << depth;
EXPECT_EQ(CountDiff(mat_gray), 0) << "gray, type: " << depth;
}
// Now INTER_NEAREST's convention and INTER_NEAREST_EXACT's one are different.
INSTANTIATE_TEST_CASE_P(DISABLED_Imgproc, Resize_Bitexact,
testing::Values(CV_8U, CV_16U, CV_32F, CV_64F)
);
TEST(Resize_Bitexact, Nearest8U)
{
Mat src[6], dst[6];
// 2x decimation
src[0] = (Mat_<uint8_t>(1, 6) << 0, 1, 2, 3, 4, 5);
dst[0] = (Mat_<uint8_t>(1, 3) << 0, 2, 4);
// decimation odd to 1
src[1] = (Mat_<uint8_t>(1, 5) << 0, 1, 2, 3, 4);
dst[1] = (Mat_<uint8_t>(1, 1) << 2);
// decimation n*2-1 to n
src[2] = (Mat_<uint8_t>(1, 5) << 0, 1, 2, 3, 4);
dst[2] = (Mat_<uint8_t>(1, 3) << 0, 2, 4);
// decimation n*2+1 to n
src[3] = (Mat_<uint8_t>(1, 5) << 0, 1, 2, 3, 4);
dst[3] = (Mat_<uint8_t>(1, 2) << 1, 3);
// zoom
src[4] = (Mat_<uint8_t>(3, 5) <<
0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
10, 11, 12, 13, 14);
dst[4] = (Mat_<uint8_t>(5, 7) <<
0, 1, 1, 2, 3, 3, 4,
0, 1, 1, 2, 3, 3, 4,
5, 6, 6, 7, 8, 8, 9,
10, 11, 11, 12, 13, 13, 14,
10, 11, 11, 12, 13, 13, 14);
src[5] = (Mat_<uint8_t>(2, 3) <<
0, 1, 2,
3, 4, 5);
dst[5] = (Mat_<uint8_t>(4, 6) <<
0, 0, 1, 1, 2, 2,
0, 0, 1, 1, 2, 2,
3, 3, 4, 4, 5, 5,
3, 3, 4, 4, 5, 5);
for (int i = 0; i < 6; i++)
{
Mat calc;
resize(src[i], calc, dst[i].size(), 0, 0, INTER_NEAREST_EXACT);
EXPECT_EQ(cvtest::norm(calc, dst[i], cv::NORM_L1), 0);
}
}
}} // namespace
......@@ -7,13 +7,15 @@
namespace opencv_test { namespace {
static const int fixedShiftU8 = 8;
static const int64_t fixedOne = (1L << fixedShiftU8);
int64_t v[][9] = {
{ fixedOne }, // size 1, sigma 0
{ fixedOne >> 2, fixedOne >> 1, fixedOne >> 2 }, // size 3, sigma 0
{ fixedOne >> 4, fixedOne >> 2, 6 * (fixedOne >> 4), fixedOne >> 2, fixedOne >> 4 }, // size 5, sigma 0
{ fixedOne >> 5, 7 * (fixedOne >> 6), 7 * (fixedOne >> 5), 9 * (fixedOne >> 5), 7 * (fixedOne >> 5), 7 * (fixedOne >> 6), fixedOne >> 5 }, // size 7, sigma 0
static const int64_t fixedOneU8 = (1L << fixedShiftU8);
static const int fixedShiftU16 = 16;
static const int64_t fixedOneU16 = (1L << fixedShiftU16);
int64_t vU8[][9] = {
{ fixedOneU8 }, // size 1, sigma 0
{ fixedOneU8 >> 2, fixedOneU8 >> 1, fixedOneU8 >> 2 }, // size 3, sigma 0
{ fixedOneU8 >> 4, fixedOneU8 >> 2, 6 * (fixedOneU8 >> 4), fixedOneU8 >> 2, fixedOneU8 >> 4 }, // size 5, sigma 0
{ fixedOneU8 >> 5, 7 * (fixedOneU8 >> 6), 7 * (fixedOneU8 >> 5), 9 * (fixedOneU8 >> 5), 7 * (fixedOneU8 >> 5), 7 * (fixedOneU8 >> 6), fixedOneU8 >> 5 }, // size 7, sigma 0
{ 4, 13, 30, 51, 60, 51, 30, 13, 4 }, // size 9, sigma 0
#if 1
#define CV_TEST_INACCURATE_GAUSSIAN_BLUR
......@@ -24,6 +26,14 @@ namespace opencv_test { namespace {
#endif
};
int64_t vU16[][9] = {
{ fixedOneU16 }, // size 1, sigma 0
{ fixedOneU16 >> 2, fixedOneU16 >> 1, fixedOneU16 >> 2 }, // size 3, sigma 0
{ fixedOneU16 >> 4, fixedOneU16 >> 2, 6 * (fixedOneU16 >> 4), fixedOneU16 >> 2, fixedOneU16 >> 4 }, // size 5, sigma 0
{ fixedOneU16 >> 5, 7 * (fixedOneU16 >> 6), 7 * (fixedOneU16 >> 5), 9 * (fixedOneU16 >> 5), 7 * (fixedOneU16 >> 5), 7 * (fixedOneU16 >> 6), fixedOneU16 >> 5 }, // size 7, sigma 0
{ 4<<8, 13<<8, 30<<8, 51<<8, 60<<8, 51<<8, 30<<8, 13<<8, 4<<8 } // size 9, sigma 0
};
template <typename T, int fixedShift>
T eval(Mat src, vector<int64_t> kernelx, vector<int64_t> kernely)
{
......@@ -39,8 +49,6 @@ namespace opencv_test { namespace {
return saturate_cast<T>((val + fixedRound) >> (fixedShift * 2));
}
TEST(GaussianBlur_Bitexact, Linear8U)
{
struct testmode
{
int type;
......@@ -50,34 +58,6 @@ TEST(GaussianBlur_Bitexact, Linear8U)
double sigma_y;
vector<int64_t> kernel_x;
vector<int64_t> kernel_y;
} modes[] = {
{ CV_8UC1, Size( 1, 1), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC1, Size( 2, 2), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC1, Size( 3, 1), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC1, Size( 1, 3), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC1, Size( 3, 3), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC1, Size( 3, 3), Size(5, 5), 0, 0, vector<int64_t>(v[2], v[2]+5), vector<int64_t>(v[2], v[2]+5) },
{ CV_8UC1, Size( 3, 3), Size(7, 7), 0, 0, vector<int64_t>(v[3], v[3]+7), vector<int64_t>(v[3], v[3]+7) },
{ CV_8UC1, Size( 5, 5), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC1, Size( 5, 5), Size(5, 5), 0, 0, vector<int64_t>(v[2], v[2]+5), vector<int64_t>(v[2], v[2]+5) },
{ CV_8UC1, Size( 3, 5), Size(5, 5), 0, 0, vector<int64_t>(v[2], v[2]+5), vector<int64_t>(v[2], v[2]+5) },
{ CV_8UC1, Size( 5, 5), Size(5, 5), 0, 0, vector<int64_t>(v[2], v[2]+5), vector<int64_t>(v[2], v[2]+5) },
{ CV_8UC1, Size( 5, 5), Size(7, 7), 0, 0, vector<int64_t>(v[3], v[3]+7), vector<int64_t>(v[3], v[3]+7) },
{ CV_8UC1, Size( 7, 7), Size(7, 7), 0, 0, vector<int64_t>(v[3], v[3]+7), vector<int64_t>(v[3], v[3]+7) },
{ CV_8UC1, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC2, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC3, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC4, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(v[1], v[1]+3), vector<int64_t>(v[1], v[1]+3) },
{ CV_8UC1, Size( 256, 128), Size(5, 5), 0, 0, vector<int64_t>(v[2], v[2]+5), vector<int64_t>(v[2], v[2]+5) },
{ CV_8UC1, Size( 256, 128), Size(7, 7), 0, 0, vector<int64_t>(v[3], v[3]+7), vector<int64_t>(v[3], v[3]+7) },
{ CV_8UC1, Size( 256, 128), Size(9, 9), 0, 0, vector<int64_t>(v[4], v[4]+9), vector<int64_t>(v[4], v[4]+9) },
#ifdef CV_TEST_INACCURATE_GAUSSIAN_BLUR
{ CV_8UC1, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(v[5], v[5]+3), vector<int64_t>(v[6], v[6]+3) },
{ CV_8UC2, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(v[5], v[5]+3), vector<int64_t>(v[6], v[6]+3) },
{ CV_8UC3, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(v[5], v[5]+3), vector<int64_t>(v[6], v[6]+3) },
{ CV_8UC4, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(v[5], v[5]+3), vector<int64_t>(v[6], v[6]+3) },
{ CV_8UC1, Size( 256, 128), Size(5, 5), 0.375, 0.75, vector<int64_t>(v[7], v[7]+5), vector<int64_t>(v[8], v[8]+5) }
#endif
};
int bordermodes[] = {
......@@ -93,11 +73,12 @@ TEST(GaussianBlur_Bitexact, Linear8U)
// BORDER_REFLECT_101
};
for (int modeind = 0, _modecnt = sizeof(modes) / sizeof(modes[0]); modeind < _modecnt; ++modeind)
template <int fixedShift>
void checkMode(const testmode& mode)
{
int type = modes[modeind].type, depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
int dcols = modes[modeind].sz.width, drows = modes[modeind].sz.height;
Size kernel = modes[modeind].kernel;
int type = mode.type, depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
int dcols = mode.sz.width, drows = mode.sz.height;
Size kernel = mode.kernel;
int rows = drows + 20, cols = dcols + 20;
Mat src(rows, cols, type), refdst(drows, dcols, type), dst;
......@@ -142,25 +123,93 @@ TEST(GaussianBlur_Bitexact, Linear8U)
for (int i = 0; i < dcols; i++)
{
if (depth == CV_8U)
dst_chan.at<uint8_t>(j, i) = eval<uint8_t, fixedShiftU8>(src_chan(Rect(i,j,kernel.width,kernel.height)), modes[modeind].kernel_x, modes[modeind].kernel_y);
dst_chan.at<uint8_t>(j, i) = eval<uint8_t, fixedShift>(src_chan(Rect(i,j,kernel.width,kernel.height)), mode.kernel_x, mode.kernel_y);
else if (depth == CV_16U)
dst_chan.at<uint16_t>(j, i) = eval<uint16_t, fixedShiftU8>(src_chan(Rect(i, j, kernel.width, kernel.height)), modes[modeind].kernel_x, modes[modeind].kernel_y);
dst_chan.at<uint16_t>(j, i) = eval<uint16_t, fixedShift>(src_chan(Rect(i, j, kernel.width, kernel.height)), mode.kernel_x, mode.kernel_y);
else if (depth == CV_16S)
dst_chan.at<int16_t>(j, i) = eval<int16_t, fixedShiftU8>(src_chan(Rect(i, j, kernel.width, kernel.height)), modes[modeind].kernel_x, modes[modeind].kernel_y);
dst_chan.at<int16_t>(j, i) = eval<int16_t, fixedShift>(src_chan(Rect(i, j, kernel.width, kernel.height)), mode.kernel_x, mode.kernel_y);
else if (depth == CV_32S)
dst_chan.at<int32_t>(j, i) = eval<int32_t, fixedShiftU8>(src_chan(Rect(i, j, kernel.width, kernel.height)), modes[modeind].kernel_x, modes[modeind].kernel_y);
dst_chan.at<int32_t>(j, i) = eval<int32_t, fixedShift>(src_chan(Rect(i, j, kernel.width, kernel.height)), mode.kernel_x, mode.kernel_y);
else
CV_Assert(0);
}
mixChannels(dst_chan, refdst, toFrom, 1);
}
cv::GaussianBlur(src_roi, dst, kernel, modes[modeind].sigma_x, modes[modeind].sigma_y, bordermodes[borderind]);
cv::GaussianBlur(src_roi, dst, kernel, mode.sigma_x, mode.sigma_y, bordermodes[borderind]);
EXPECT_GE(0, cvtest::norm(refdst, dst, cv::NORM_L1))
<< "GaussianBlur " << cn << "-chan mat " << drows << "x" << dcols << " by kernel " << kernel << " sigma(" << modes[modeind].sigma_x << ";" << modes[modeind].sigma_y << ") failed with max diff " << cvtest::norm(refdst, dst, cv::NORM_INF);
<< "GaussianBlur " << cn << "-chan mat " << drows << "x" << dcols << " by kernel " << kernel << " sigma(" << mode.sigma_x << ";" << mode.sigma_y << ") failed with max diff " << cvtest::norm(refdst, dst, cv::NORM_INF);
}
}
TEST(GaussianBlur_Bitexact, Linear8U)
{
testmode modes[] = {
{ CV_8UC1, Size( 1, 1), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC1, Size( 2, 2), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC1, Size( 3, 1), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC1, Size( 1, 3), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC1, Size( 3, 3), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC1, Size( 3, 3), Size(5, 5), 0, 0, vector<int64_t>(vU8[2], vU8[2]+5), vector<int64_t>(vU8[2], vU8[2]+5) },
{ CV_8UC1, Size( 3, 3), Size(7, 7), 0, 0, vector<int64_t>(vU8[3], vU8[3]+7), vector<int64_t>(vU8[3], vU8[3]+7) },
{ CV_8UC1, Size( 5, 5), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC1, Size( 5, 5), Size(5, 5), 0, 0, vector<int64_t>(vU8[2], vU8[2]+5), vector<int64_t>(vU8[2], vU8[2]+5) },
{ CV_8UC1, Size( 3, 5), Size(5, 5), 0, 0, vector<int64_t>(vU8[2], vU8[2]+5), vector<int64_t>(vU8[2], vU8[2]+5) },
{ CV_8UC1, Size( 5, 5), Size(5, 5), 0, 0, vector<int64_t>(vU8[2], vU8[2]+5), vector<int64_t>(vU8[2], vU8[2]+5) },
{ CV_8UC1, Size( 5, 5), Size(7, 7), 0, 0, vector<int64_t>(vU8[3], vU8[3]+7), vector<int64_t>(vU8[3], vU8[3]+7) },
{ CV_8UC1, Size( 7, 7), Size(7, 7), 0, 0, vector<int64_t>(vU8[3], vU8[3]+7), vector<int64_t>(vU8[3], vU8[3]+7) },
{ CV_8UC1, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC2, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC3, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC4, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(vU8[1], vU8[1]+3), vector<int64_t>(vU8[1], vU8[1]+3) },
{ CV_8UC1, Size( 256, 128), Size(5, 5), 0, 0, vector<int64_t>(vU8[2], vU8[2]+5), vector<int64_t>(vU8[2], vU8[2]+5) },
{ CV_8UC1, Size( 256, 128), Size(7, 7), 0, 0, vector<int64_t>(vU8[3], vU8[3]+7), vector<int64_t>(vU8[3], vU8[3]+7) },
{ CV_8UC1, Size( 256, 128), Size(9, 9), 0, 0, vector<int64_t>(vU8[4], vU8[4]+9), vector<int64_t>(vU8[4], vU8[4]+9) },
#ifdef CV_TEST_INACCURATE_GAUSSIAN_BLUR
{ CV_8UC1, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(vU8[5], vU8[5]+3), vector<int64_t>(vU8[6], vU8[6]+3) },
{ CV_8UC2, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(vU8[5], vU8[5]+3), vector<int64_t>(vU8[6], vU8[6]+3) },
{ CV_8UC3, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(vU8[5], vU8[5]+3), vector<int64_t>(vU8[6], vU8[6]+3) },
{ CV_8UC4, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(vU8[5], vU8[5]+3), vector<int64_t>(vU8[6], vU8[6]+3) },
{ CV_8UC1, Size( 256, 128), Size(5, 5), 0.375, 0.75, vector<int64_t>(vU8[7], vU8[7]+5), vector<int64_t>(vU8[8], vU8[8]+5) }
#endif
};
for (int modeind = 0, _modecnt = sizeof(modes) / sizeof(modes[0]); modeind < _modecnt; ++modeind)
{
checkMode<fixedShiftU8>(modes[modeind]);
}
}
TEST(GaussianBlur_Bitexact, Linear16U)
{
testmode modes[] = {
{ CV_16UC1, Size( 1, 1), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC1, Size( 2, 2), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC1, Size( 3, 1), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC1, Size( 1, 3), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC1, Size( 3, 3), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC1, Size( 3, 3), Size(5, 5), 0, 0, vector<int64_t>(vU16[2], vU16[2]+5), vector<int64_t>(vU16[2], vU16[2]+5) },
{ CV_16UC1, Size( 3, 3), Size(7, 7), 0, 0, vector<int64_t>(vU16[3], vU16[3]+7), vector<int64_t>(vU16[3], vU16[3]+7) },
{ CV_16UC1, Size( 5, 5), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC1, Size( 5, 5), Size(5, 5), 0, 0, vector<int64_t>(vU16[2], vU16[2]+5), vector<int64_t>(vU16[2], vU16[2]+5) },
{ CV_16UC1, Size( 3, 5), Size(5, 5), 0, 0, vector<int64_t>(vU16[2], vU16[2]+5), vector<int64_t>(vU16[2], vU16[2]+5) },
{ CV_16UC1, Size( 5, 5), Size(5, 5), 0, 0, vector<int64_t>(vU16[2], vU16[2]+5), vector<int64_t>(vU16[2], vU16[2]+5) },
{ CV_16UC1, Size( 5, 5), Size(7, 7), 0, 0, vector<int64_t>(vU16[3], vU16[3]+7), vector<int64_t>(vU16[3], vU16[3]+7) },
{ CV_16UC1, Size( 7, 7), Size(7, 7), 0, 0, vector<int64_t>(vU16[3], vU16[3]+7), vector<int64_t>(vU16[3], vU16[3]+7) },
{ CV_16UC1, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC2, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC3, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC4, Size( 256, 128), Size(3, 3), 0, 0, vector<int64_t>(vU16[1], vU16[1]+3), vector<int64_t>(vU16[1], vU16[1]+3) },
{ CV_16UC1, Size( 256, 128), Size(5, 5), 0, 0, vector<int64_t>(vU16[2], vU16[2]+5), vector<int64_t>(vU16[2], vU16[2]+5) },
{ CV_16UC1, Size( 256, 128), Size(7, 7), 0, 0, vector<int64_t>(vU16[3], vU16[3]+7), vector<int64_t>(vU16[3], vU16[3]+7) },
{ CV_16UC1, Size( 256, 128), Size(9, 9), 0, 0, vector<int64_t>(vU16[4], vU16[4]+9), vector<int64_t>(vU16[4], vU16[4]+9) },
};
for (int modeind = 0, _modecnt = sizeof(modes) / sizeof(modes[0]); modeind < _modecnt; ++modeind)
{
checkMode<16>(modes[modeind]);
}
}
TEST(GaussianBlur_Bitexact, regression_15015)
......
......@@ -81,8 +81,8 @@ void calcGST(const Mat& inputImg, Mat& imgCoherencyOut, Mat& imgOrientationOut,
// GST components calculation (stop)
// eigenvalue calculation (start)
// lambda1 = J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2)
// lambda2 = J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2)
// lambda1 = 0.5*(J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2))
// lambda2 = 0.5*(J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2))
Mat tmp1, tmp2, tmp3, tmp4;
tmp1 = J11 + J22;
tmp2 = J11 - J22;
......@@ -91,8 +91,10 @@ void calcGST(const Mat& inputImg, Mat& imgCoherencyOut, Mat& imgOrientationOut,
sqrt(tmp2 + 4.0 * tmp3, tmp4);
Mat lambda1, lambda2;
lambda1 = tmp1 + tmp4; // biggest eigenvalue
lambda2 = tmp1 - tmp4; // smallest eigenvalue
lambda1 = tmp1 + tmp4;
lambda1 = 0.5*lambda1; // biggest eigenvalue
lambda2 = tmp1 - tmp4;
lambda2 = 0.5*lambda2; // smallest eigenvalue
// eigenvalue calculation (stop)
// Coherency calculation (start)
......
......@@ -15,8 +15,9 @@ opencv_fd:
rgb: false
sample: "object_detection"
# YOLO4 object detection family from Darknet (https://github.com/AlexeyAB/darknet)
# YOLO object detection family from Darknet (https://pjreddie.com/darknet/yolo/)
# Might be used for all YOLOv2, TinyYolov2 and YOLOv3
# Might be used for all YOLOv2, TinyYolov2, YOLOv3, YOLOv4 and TinyYolov4
yolo:
model: "yolov3.weights"
config: "yolov3.cfg"
......
......@@ -31,16 +31,16 @@ def calcGST(inputIMG, w):
# GST components calculations (stop)
# eigenvalue calculation (start)
# lambda1 = J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2)
# lambda2 = J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2)
# lambda1 = 0.5*(J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2))
# lambda2 = 0.5*(J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2))
tmp1 = J11 + J22
tmp2 = J11 - J22
tmp2 = cv.multiply(tmp2, tmp2)
tmp3 = cv.multiply(J12, J12)
tmp4 = np.sqrt(tmp2 + 4.0 * tmp3)
lambda1 = tmp1 + tmp4 # biggest eigenvalue
lambda2 = tmp1 - tmp4 # smallest eigenvalue
lambda1 = 0.5*(tmp1 + tmp4) # biggest eigenvalue
lambda2 = 0.5*(tmp1 - tmp4) # smallest eigenvalue
# eigenvalue calculation (stop)
# Coherency calculation (start)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册