提交 35062105 编写于 作者: M marina.kolpakova

connected components labeling

上级 2c8d1107
......@@ -917,6 +917,9 @@ CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTra
GpuMat& labels,
GpuMat& buf, Stream& stream = Stream::Null());
//! performs connected componnents labeling.
CV_EXPORTS void labelComponents(const GpuMat& image, GpuMat& mask, GpuMat& components, const cv::Scalar& lo, const cv::Scalar& hi);
////////////////////////////////// Histograms //////////////////////////////////
//! Compute levels with even distribution. levels will have 1 row and nLevels cols and CV_32SC1 type.
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//M*/
#include "opencv2/gpu/device/common.hpp"
#include <iostream>
#include <stdio.h>
namespace cv { namespace gpu { namespace device
{
namespace ccl
{
enum
{
WARP_SIZE = 32,
WARP_LOG = 5,
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
STA_SIZE_MARGE_Y = 4,
STA_SIZE_MARGE_X = 32,
TPB_X = 1,
TPB_Y = 4,
TILE_COLS = CTA_SIZE_X * TPB_X,
TILE_ROWS = CTA_SIZE_Y * TPB_Y
};
typedef unsigned char component;
enum Edges { UP = 1, DOWN = 2, LEFT = 4, RIGHT = 8, EMPTY = 0xF0 };
template<typename T>
struct InInterval
{
__host__ __device__ __forceinline__ InInterval(const T& _lo, const T& _hi) : lo(-_lo), hi(_hi) {};
T lo, hi;
__device__ __forceinline__ bool operator() (const T& a, const T& b) const
{
T d = a - b;
return lo <= d && d <= hi;
}
};
template<typename F>
__global__ void computeComponents(const DevMem2D image, DevMem2D components, F connected)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= image.cols || y >= image.rows) return;
int intensity = image(y, x);
component c = 0;
if ( x > 0 && connected(intensity, image(y, x - 1)))
c |= LEFT;
if ( y > 0 && connected(intensity, image(y - 1, x)))
c |= UP;
if ( x - 1 < image.cols && connected(intensity, image(y, x + 1)))
c |= RIGHT;
if ( y - 1 < image.rows && connected(intensity, image(y + 1, x)))
c |= DOWN;
components(y, x) = c;
}
void computeEdges(const DevMem2D& image, DevMem2D components, const int lo, const int hi)
{
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(divUp(image.cols, block.x), divUp(image.rows, block.y));
InInterval<int> inInt(lo, hi);
computeComponents<InInterval<int> ><<<grid, block>>>(image, components, inInt);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void lableTiles(const DevMem2D edges, DevMem2Di comps)
{
int x = threadIdx.x + blockIdx.x * TILE_COLS;
int y = threadIdx.y + blockIdx.y * TILE_ROWS;
if (x >= edges.cols || y >= edges.rows) return;
//currently x is 1
int bounds = ((y + TPB_Y) < edges.rows);
__shared__ int labelsTile[TILE_ROWS][TILE_COLS];
__shared__ int edgesTile[TILE_ROWS][TILE_COLS];
int new_labels[TPB_Y][TPB_X];
int old_labels[TPB_Y][TPB_X];
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
component c = edges(bounds * (y + CTA_SIZE_Y * i), x + CTA_SIZE_X * j);
if (!xloc) c &= ~LEFT;
if (!yloc) c &= ~UP;
if (xloc == TILE_COLS -1) c &= ~RIGHT;
if (yloc == TILE_ROWS -1) c &= ~DOWN;
new_labels[i][j] = yloc * TILE_COLS + xloc;
edgesTile[yloc][xloc] = c;
}
for (int i = 0; ; ++i)
{
//1. backup
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
old_labels[i][j] = new_labels[i][j];
labelsTile[yloc][xloc] = new_labels[i][j];
}
__syncthreads();
//2. compare local arrays
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
component c = edgesTile[yloc][xloc];
int label = new_labels[i][j];
if (c & UP)
label = min(label, labelsTile[yloc - 1][xloc]);
if (c & DOWN)
label = min(label, labelsTile[yloc + 1][xloc]);
if (c & LEFT)
label = min(label, labelsTile[yloc][xloc - 1]);
if (c & RIGHT)
label = min(label, labelsTile[yloc][xloc + 1]);
new_labels[i][j] = label;
}
__syncthreads();
//3. determine: Is any value changed?
int changed = 0;
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
if (new_labels[i][j] < old_labels[i][j])
{
changed = 1;
atomicMin(&labelsTile[0][0] + old_labels[i][j], new_labels[i][j]);
}
}
changed = __syncthreads_or(changed);
if (!changed)
break;
//4. Compact paths
const int *labels = &labelsTile[0][0];
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int label = new_labels[i][j];
while( labels[label] < label ) label = labels[label];
new_labels[i][j] = label;
}
__syncthreads();
}
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int label = new_labels[i][j];
int yloc = label / TILE_COLS;
int xloc = label - yloc * TILE_COLS;
xloc += blockIdx.x * TILE_COLS;
yloc += blockIdx.y * TILE_ROWS;
label = yloc * edges.cols + xloc;
// do it for x too.
if (y + CTA_SIZE_Y * i < comps.rows) comps(y + CTA_SIZE_Y * i, x + CTA_SIZE_X * j) = label;
}
}
__device__ __forceinline__ int root(const DevMem2Di& comps, int label)
{
while(1)
{
int y = label / comps.cols;
int x = label - y * comps.cols;
int parent = comps(y, x);
if (label == parent) break;
label = parent;
}
return label;
}
__device__ __forceinline__ void isConnected(DevMem2Di& comps, int l1, int l2, bool& changed)
{
int r1 = root(comps, l1);
int r2 = root(comps, l2);
if (r1 == r2) return;
int mi = min(r1, r2);
int ma = max(r1, r2);
int y = ma / comps.cols;
int x = ma - y * comps.cols;
atomicMin(&comps.ptr(y)[x], mi);
changed = true;
}
__global__ void crossMerge(const int tilesNumY, const int tilesNumX, int tileSizeY, int tileSizeX,
const DevMem2D edges, DevMem2Di comps, const int yIncomplete, int xIncomplete)
{
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int stride = blockDim.y * blockDim.x;
int ybegin = blockIdx.y * (tilesNumY * tileSizeY);
int yend = ybegin + tilesNumY * tileSizeY;
if (blockIdx.y == gridDim.y - 1)
{
yend -= yIncomplete * tileSizeY;
yend -= tileSizeY;
tileSizeY = (edges.rows % tileSizeY);
yend += tileSizeY;
}
int xbegin = blockIdx.x * tilesNumX * tileSizeX;
int xend = xbegin + tilesNumX * tileSizeX;
if (blockIdx.x == gridDim.x - 1)
{
if (xIncomplete) yend = ybegin;
xend -= xIncomplete * tileSizeX;
xend -= tileSizeX;
tileSizeX = (edges.cols % tileSizeX);
xend += tileSizeX;
}
if (blockIdx.y == (gridDim.y - 1) && yIncomplete)
{
xend = xbegin;
}
int tasksV = (tilesNumX - 1) * (yend - ybegin);
int tasksH = (tilesNumY - 1) * (xend - xbegin);
int total = tasksH + tasksV;
bool changed;
do
{
changed = false;
for (int taskIdx = tid; taskIdx < total; taskIdx += stride)
{
if (taskIdx < tasksH)
{
int indexH = taskIdx;
int row = indexH / (xend - xbegin);
int col = indexH - row * (xend - xbegin);
int y = ybegin + (row + 1) * tileSizeY;
int x = xbegin + col;
component e = edges( x, y);
if (e & UP)
{
int lc = comps(y,x);
int lu = comps(y - 1, x);
isConnected(comps, lc, lu, changed);
}
}
else
{
int indexV = taskIdx - tasksH;
int col = indexV / (yend - ybegin);
int row = indexV - col * (yend - ybegin);
int x = xbegin + (col + 1) * tileSizeX;
int y = ybegin + row;
component e = edges(x, y);
if (e & LEFT)
{
int lc = comps(y, x);
int ll = comps(y, x - 1);
isConnected(comps, lc, ll, changed);
}
}
}
} while (__syncthreads_or(changed));
}
__global__ void flatten(const DevMem2D edges, DevMem2Di comps)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if( x < comps.cols && y < comps.rows)
comps(y, x) = root(comps, comps(y, x));
}
void labelComponents(const DevMem2D& edges, DevMem2Di comps)
{
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(divUp(edges.cols, TILE_COLS), divUp(edges.rows, TILE_ROWS));
lableTiles<<<grid, block>>>(edges, comps);
cudaSafeCall( cudaGetLastError() );
int tileSizeX = TILE_COLS, tileSizeY = TILE_ROWS;
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
while (grid.x > 1 || grid.y > 1)
{
dim3 mergeGrid(ceilf(grid.x / 2.0), ceilf(grid.y / 2.0));
dim3 mergeBlock(STA_SIZE_MARGE_X, STA_SIZE_MARGE_Y);
std::cout << "merging: " << grid.y << " x " << grid.x << " ---> " << mergeGrid.y << " x " << mergeGrid.x << " for tiles: " << tileSizeY << " x " << tileSizeX << std::endl;
crossMerge<<<mergeGrid, mergeBlock>>>(2, 2, tileSizeY, tileSizeX, edges, comps, ceilf(grid.y / 2.0) - grid.y / 2, ceilf(grid.x / 2.0) - grid.x / 2);
tileSizeX <<= 1;
tileSizeY <<= 1;
grid = mergeGrid;
cudaSafeCall( cudaGetLastError() );
}
grid.x = divUp(edges.cols, block.x);
grid.y = divUp(edges.rows, block.y);
flatten<<<grid, block>>>(edges, comps);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
} } }
\ No newline at end of file
......@@ -47,8 +47,26 @@
void cv::gpu::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::labelComponents(const GpuMat& image, GpuMat& mask, GpuMat& components, const cv::Scalar& lo, const cv::Scalar& hi) { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */
namespace cv { namespace gpu { namespace device
{
namespace ccl
{
void labelComponents(const DevMem2D& edges, DevMem2Di comps);
void computeEdges(const DevMem2D& image, DevMem2D edges, const int lo, const int hi);
}
}}}
void cv::gpu::labelComponents(const GpuMat& image, GpuMat& mask, GpuMat& components, const cv::Scalar& lo, const cv::Scalar& hi)
{
device::ccl::computeEdges(image, mask, lo[0], hi[0]);
device::ccl::labelComponents(mask, components);
}
namespace
{
typedef NppStatus (*init_func_t)(NppiSize oSize, NppiGraphcutState** ppState, Npp8u* pDeviceMem);
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//M*/
#include "precomp.hpp"
#include <string>
#include <iostream>
struct Labeling : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
cv::Mat loat_image()
{
return cv::imread(std::string( cvtest::TS::ptr()->get_data_path() ) + "labeling/label.png");
}
};
TEST_P(Labeling, ConnectedComponents)
{
cv::Mat image;
cvtColor(loat_image(), image, CV_BGR2GRAY);
cv::Mat image_cpu = image.clone();
// cv::floodFill(image, cv::Point(1,1),cv::Scalar::all(64), 0, cv::Scalar::all(0), cv::Scalar::all(256));
cv::gpu::GpuMat mask;
mask.create(image.rows, image.cols, CV_8UC1);
cv::gpu::GpuMat components;
components.create(image.rows, image.cols, CV_32SC1);
std::cout << "summary: " << image.cols << " " << image.rows << " "
<< cv::gpu::GpuMat(image).cols << " " << cv::gpu::GpuMat(image).rows<< " "
<< mask.cols << " " << mask.rows<< " "
<< components.cols << " " << components.rows<< std::endl;
cv::gpu::labelComponents(cv::gpu::GpuMat(image), mask, components, cv::Scalar::all(0), cv::Scalar::all(2));
// // for(int i = 0; i + 32 < image.rows; i += 32)
// // for(int j = 0; j + 32 < image.cols; j += 32)
// // {
// // std::cout << cv::Mat(cv::Mat(mask), cv::Rect(j, i, 32, 32 ))<< std::endl;
// // std::cout << cv::Mat(cv::Mat(components), cv::Rect(j, i, 32, 32 )) << std::endl;
// // }
// std::cout << cv::Mat(components) << std::endl;
// cv::imshow("test", image);
// cv::waitKey(0);
// for(int i = 0; i + 32 < image.rows; i += 32)
// for(int j = 0; j + 32 < image.cols; j += 32)
// cv::rectangle(image, cv::Rect(j, i, 32, 32) , CV_RGB(255, 255, 255));
cv::imshow("test", image);
cv::waitKey(0);
cv::imshow("test", cv::Mat(mask) * 10);
cv::waitKey(0);
cv::imshow("test", cv::Mat(components) * 2);
cv::waitKey(0);
std::cout << "test! " << image.cols << std::endl;
}
INSTANTIATE_TEST_CASE_P(ConnectedComponents, Labeling, ALL_DEVICES);
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册