提交 3a04d08b 编写于 作者: M Maria Dimashova

added Kinect support (initial version without settings configuration yet) and sample on usage

上级 b63b3df5
......@@ -323,6 +323,7 @@ if(WIN32)
set(WITH_VIDEOINPUT ON CACHE BOOL "Enable VideoInput support")
endif()
set(WITH_OPENNI OFF CACHE BOOL "Include OpenNI support")
# ===================================================
# Macros that checks if module have been installed.
......@@ -753,6 +754,28 @@ if (WIN32 AND WITH_VIDEOINPUT)
endif()
endif()
############################### OpenNI ################################
set(HAVE_OPENNI FALSE)
set(HAVE_PRIME_SENSOR_FOR_OPENNI FALSE)
if(WITH_OPENNI)
# find OpenNI library
unset(OPENNI_LIBRARY CACHE)
unset(PRIME_SENSOR_MODULES_FOR_OPENNI CACHE)
find_library(OPENNI_LIBRARY "OpenNI" PATHS "/usr/lib" "c:/Program Files/OpenNI/Lib" DOC "OpenNI library")
if(OPENNI_LIBRARY)
set(HAVE_OPENNI TRUE)
# the check: are PrimeSensor Modules for OpenNI installed?
if(WIN32)
find_file(PRIME_SENSOR_MODULES_FOR_OPENNI "XnCore.dll" PATHS "c:/Program Files/Prime Sense/Sensor/Bin" DOC "Core library of PrimeSensor Modules for OpenNI")
elseif(UNIX OR APPLE)
find_library(PRIME_SENSOR_MODULES_FOR_OPENNI "XnCore" PATHS "/usr/lib" DOC "Core library of PrimeSensor Modules for OpenNI")
endif()
if(PRIME_SENSOR_MODULES_FOR_OPENNI)
set(HAVE_PRIME_SENSOR_FOR_OPENNI TRUE)
endif()
endif()
endif() #if(WITH_OPENNI)
############################## Eigen2 ##############################
......@@ -1330,6 +1353,12 @@ else()
message(STATUS " OpenEXR: NO")
endif()
if(NOT HAVE_OPENNI OR HAVE_PRIME_SENSOR_FOR_OPENNI)
message(STATUS " OpenNI: ${HAVE_OPENNI}")
else()
message(STATUS " OpenNI: ${HAVE_OPENNI} (WARNING: PrimeSensor Modules for OpenNI are not installed.)")
endif() #if(NOT HAVE_OPENNI OR HAVE_PRIME_SENSOR_FOR_OPENNI)
if(UNIX AND NOT APPLE)
message(STATUS "")
message(STATUS " Video I/O: ")
......@@ -1350,7 +1379,7 @@ else()
message(STATUS " V4L/V4L2: ${HAVE_CAMV4L}/${HAVE_CAMV4L2}")
endif()
message(STATUS " Xine: ${HAVE_XINE}")
endif()
endif() #if(UNIX AND NOT APPLE)
if(APPLE)
message(STATUS "")
......
......@@ -112,6 +112,9 @@
/* Xine video library */
#cmakedefine HAVE_XINE
/* OpenNI library */
#cmakedefine HAVE_OPENNI
/* LZ77 compression/decompression library (used for PNG) */
#cmakedefine HAVE_ZLIB
......
......@@ -174,6 +174,18 @@ if(UNIX)
endforeach()
endif()
#OpenNI
if(WITH_OPENNI AND HAVE_OPENNI)
set(highgui_srcs ${highgui_srcs} src/cap_openni.cpp)
if(WIN32)
set(OPENNI_INCLUDE_DIR "c:/Program Files/OpenNI/Include")
elseif(UNIX OR APPLE)
set(OPENNI_INCLUDE_DIR "/usr/include/ni")
endif()
include_directories(${OPENNI_INCLUDE_DIR})
endif()
#YV
if(APPLE)
add_definitions(-DHAVE_QUICKTIME=1)
......@@ -286,6 +298,10 @@ if(WITH_TIFF AND NOT TIFF_FOUND)
endif()
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} opencv_core opencv_imgproc zlib ${JPEG_LIBRARIES} ${PNG_LIBRARIES} ${TIFF_LIBRARIES} ${JASPER_LIBRARIES} ${HIGHGUI_LIBRARIES} ${OPENEXR_LIBRARIES})
if( OPENNI_LIBRARY )
target_link_libraries(${the_target} ${OPENNI_LIBRARY})
endif()
if(APPLE)
target_link_libraries(${the_target} "-lbz2 -framework Cocoa -framework QuartzCore")
......
......@@ -288,7 +288,9 @@ enum
CV_CAP_DSHOW =700, // DirectShow (via videoInput)
CV_CAP_PVAPI =800 // PvAPI, Prosilica GigE SDK
CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK
CV_CAP_OPENNI =900 // OpenNI (for Kinect)
};
/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
......@@ -367,6 +369,23 @@ CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );
/* close video file writer */
CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
enum
{
// Data given from depth generator.
OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
// Data given from RGB image generator.
OPENNI_BGR_IMAGE = 5,
OPENNI_GRAY_IMAGE = 6
};
const int OPENNI_BAD_DEPTH_VAL = 0;
const int OPENNI_BAD_DISP_VAL = 0;
/****************************************************************************************\
* Obsolete functions/synonyms *
\****************************************************************************************/
......
......@@ -123,6 +123,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
CV_CAP_MIL,
CV_CAP_QT,
CV_CAP_UNICAP,
CV_CAP_OPENNI,
-1
};
......@@ -142,7 +143,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
defined(HAVE_CAMV4L) || defined (HAVE_CAMV4L2) || defined(HAVE_GSTREAMER) || \
defined(HAVE_DC1394_2) || defined(HAVE_DC1394) || defined(HAVE_CMU1394) || \
defined(HAVE_GSTREAMER) || defined(HAVE_MIL) || defined(HAVE_QUICKTIME) || \
defined(HAVE_UNICAP) || defined(HAVE_PVAPI)
defined(HAVE_UNICAP) || defined(HAVE_PVAPI) || defined(HAVE_OPENNI)
// local variable to memorize the captured device
CvCapture *capture;
#endif
......@@ -241,6 +242,14 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
return capture;
break;
#endif
#ifdef HAVE_OPENNI
case CV_CAP_OPENNI:
capture = cvCreateCameraCapture_OpenNI (index);
if (capture)
return capture;
break;
#endif
}
}
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#ifdef HAVE_OPENNI
#define HACK_WITH_XML
#ifdef HACK_WITH_XML
#include <iostream>
#include <fstream>
#endif
#include "XnCppWrapper.h"
const std::string XMLConfig =
"<OpenNI>"
"<Licenses>"
"<License vendor=\"PrimeSense\" key=\"0KOIk2JeIBYClPWVnMoRKn5cdY4=\"/>"
"</Licenses>"
"<Log writeToConsole=\"false\" writeToFile=\"false\">"
"<LogLevel value=\"3\"/>"
"<Masks>"
"<Mask name=\"ALL\" on=\"true\"/>"
"</Masks>"
"<Dumps>"
"</Dumps>"
"</Log>"
"<ProductionNodes>"
"<Node type=\"Image\" name=\"Image1\">"
"<Configuration>"
"<MapOutputMode xRes=\"640\" yRes=\"480\" FPS=\"30\"/>"
"<Mirror on=\"true\"/>"
"</Configuration>"
"</Node> "
"<Node type=\"Depth\" name=\"Depth1\">"
"<Configuration>"
"<MapOutputMode xRes=\"640\" yRes=\"480\" FPS=\"30\"/>"
"<Mirror on=\"true\"/>"
"</Configuration>"
"</Node>"
"</ProductionNodes>"
"</OpenNI>";
class CvCapture_OpenNI : public CvCapture
{
public:
CvCapture_OpenNI();
virtual ~CvCapture_OpenNI();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
bool isOpened() const;
protected:
struct OutputMap
{
public:
cv::Mat mat;
IplImage* getIplImagePtr();
private:
IplImage iplHeader;
};
static const int outputTypesCount = 7;
static const unsigned short badDepth = 0;
static const unsigned int badDisparity = 0;
IplImage* retrieveDepthMap();
IplImage* retrievePointCloudMap();
IplImage* retrieveDisparityMap();
IplImage* retrieveDisparityMap_32F();
IplImage* retrieveValidDepthMask();
IplImage* retrieveBGRImage();
IplImage* retrieveGrayImage();
void readCamerasParams();
// OpenNI context
xn::Context context;
bool m_isOpened;
// Data generators with its metadata
xn::DepthGenerator depthGenerator;
xn::DepthMetaData depthMetaData;
xn::ImageGenerator imageGenerator;
xn::ImageMetaData imageMetaData;
// Cameras settings:
// Distance between IR projector and IR camera (in meters)
XnDouble baseline;
// Focal length for the IR camera in VGA resolution (in pixels)
XnUInt64 depthFocalLength_VGA;
// The value for shadow (occluded pixels)
XnUInt64 shadowValue;
// The value for pixels without a valid disparity measurement
XnUInt64 noSampleValue;
std::vector<OutputMap> outputMaps;
};
IplImage* CvCapture_OpenNI::OutputMap::getIplImagePtr()
{
if( mat.empty() )
return 0;
iplHeader = IplImage(mat);
return &iplHeader;
}
bool CvCapture_OpenNI::isOpened() const
{
return m_isOpened;
}
CvCapture_OpenNI::CvCapture_OpenNI()
{
XnStatus status = XN_STATUS_OK;
// Initialize the context with default configuration.
status = context.Init();
m_isOpened = (status == XN_STATUS_OK);
if( m_isOpened )
{
// Configure the context.
#ifdef HACK_WITH_XML
// Write configuration to the temporary file.
// This is a hack, because there is a bug in RunXmlScript().
// TODO: remove hack when bug in RunXmlScript() will be fixed.
char xmlFilename[100];
tmpnam( xmlFilename );
std::ofstream outfile( xmlFilename );
outfile.write( XMLConfig.c_str(), XMLConfig.length() );
outfile.close();
status = context.RunXmlScriptFromFile( xmlFilename );
// Remove temporary configuration file.
remove( xmlFilename );
#else
status = context.RunXmlScript( XMLConfig.c_str() );
#endif
if( status != XN_STATUS_OK )
CV_Error(CV_StsError, ("Failed to apply XML configuration: " + std::string(xnGetStatusString(status))).c_str() );
// Initialize generators.
status = depthGenerator.Create( context );
if( status != XN_STATUS_OK )
CV_Error(CV_StsError, ("Failed to create depth generator: " + std::string(xnGetStatusString(status))).c_str() );
imageGenerator.Create( context );
if( status != XN_STATUS_OK )
CV_Error(CV_StsError, ("Failed to create image generator: " + std::string(xnGetStatusString(status))).c_str() );
// Start generating data.
status = context.StartGeneratingAll();
if( status != XN_STATUS_OK )
CV_Error(CV_StsError, ("Failed to start generating OpenNI data: " + std::string(xnGetStatusString(status))).c_str() );
readCamerasParams();
outputMaps.resize( outputTypesCount );
}
}
CvCapture_OpenNI::~CvCapture_OpenNI()
{
context.StopGeneratingAll();
context.Shutdown();
}
void CvCapture_OpenNI::readCamerasParams()
{
XnDouble pixelSize = 0;
if( depthGenerator.GetRealProperty( "ZPPS", pixelSize ) != XN_STATUS_OK )
CV_Error( CV_StsError, "Could not read pixel size!" );
// pixel size @ VGA = pixel size @ SXGA x 2
pixelSize *= 2.0; // in mm
// focal length of IR camera in pixels for VGA resolution
XnUInt64 zpd; // in mm
if( depthGenerator.GetIntProperty( "ZPD", zpd ) != XN_STATUS_OK )
CV_Error( CV_StsError, "Could not read virtual plane distance!" );
if( depthGenerator.GetRealProperty( "LDDIS", baseline ) != XN_STATUS_OK )
CV_Error( CV_StsError, "Could not read base line!" );
// baseline from cm -> mm
baseline *= 10;
// focal length from mm -> pixels (valid for 640x480)
depthFocalLength_VGA = (XnUInt64)((double)zpd / (double)pixelSize);
if( depthGenerator.GetIntProperty( "ShadowValue", shadowValue ) != XN_STATUS_OK )
CV_Error( CV_StsError, "Could not read shadow value!" );
if( depthGenerator.GetIntProperty("NoSampleValue", noSampleValue ) != XN_STATUS_OK )
CV_Error( CV_StsError, "Could not read no sample value!" );
}
double CvCapture_OpenNI::getProperty(int)
{
assert(0);
// TODO
return 0;
}
bool CvCapture_OpenNI::setProperty(int, double)
{
assert(0);
// TODO
return true;
}
bool CvCapture_OpenNI::grabFrame()
{
if( !isOpened() )
return false;
XnStatus status = context.WaitAnyUpdateAll();
if( status != XN_STATUS_OK )
return false;
depthGenerator.GetMetaData( depthMetaData );
imageGenerator.GetMetaData( imageMetaData );
return true;
}
inline void getDepthMapFromMetaData( const xn::DepthMetaData& depthMetaData, cv::Mat& depthMap, XnUInt64 noSampleValue, XnUInt64 shadowValue, unsigned short badDepth )
{
int cols = depthMetaData.XRes();
int rows = depthMetaData.YRes();
depthMap.create( rows, cols, CV_16UC1 );
const XnDepthPixel* pDepthMap = depthMetaData.Data();
// CV_Assert( sizeof(unsigned short) == sizeof(XnDepthPixel) );
memcpy( depthMap.data, pDepthMap, cols*rows*sizeof(XnDepthPixel) );
cv::Mat badMask = (depthMap == noSampleValue) | (depthMap == shadowValue) | (depthMap == 0);
// mask the pixels with invalid depth
depthMap.setTo( cv::Scalar::all( badDepth ), badMask );
}
IplImage* CvCapture_OpenNI::retrieveDepthMap()
{
if( depthMetaData.XRes() <= 0 || depthMetaData.YRes() <= 0 )
return 0;
getDepthMapFromMetaData( depthMetaData, outputMaps[OPENNI_DEPTH_MAP].mat, noSampleValue, shadowValue, badDepth );
return outputMaps[OPENNI_DEPTH_MAP].getIplImagePtr();
}
IplImage* CvCapture_OpenNI::retrievePointCloudMap()
{
int cols = depthMetaData.XRes(), rows = depthMetaData.YRes();
if( cols <= 0 || rows <= 0 )
return 0;
// X = (x - centerX) * depth / F[in pixels]
// Y = (y - centerY) * depth / F[in pixels]
// Z = depth
// Multiply by 0.001 to convert from mm in meters.
float mult = 0.001f / depthFocalLength_VGA;
int centerX = cols >> 1;
int centerY = rows >> 1;
cv::Mat depth;
getDepthMapFromMetaData( depthMetaData, depth, noSampleValue, shadowValue, badDepth );
const float badPoint = 0;
cv::Mat XYZ( rows, cols, CV_32FC3, cv::Scalar::all(badPoint) );
for( int y = 0; y < rows; y++ )
{
for( int x = 0; x < cols; x++ )
{
unsigned short d = depth.at<unsigned short>(y, x);
// Check for invalid measurements
if( d == badDepth ) // not valid
continue;
// Fill in XYZ
cv::Point3f point3D;
point3D.x = (x - centerX) * d * mult;
point3D.y = (y - centerY) * d * mult;
point3D.z = d * 0.001f;
XYZ.at<cv::Point3f>(y,x) = point3D;
}
}
outputMaps[OPENNI_POINT_CLOUD_MAP].mat = XYZ;
return outputMaps[OPENNI_POINT_CLOUD_MAP].getIplImagePtr();
}
void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
XnUInt64 noSampleValue, XnUInt64 shadowValue,
short badDepth, unsigned int badDisparity )
{
cv::Mat depth;
getDepthMapFromMetaData( depthMetaData, depth, noSampleValue, shadowValue, badDepth );
CV_Assert( depth.type() == CV_16UC1 );
// disparity = baseline * F / z;
float mult = baseline /*mm*/ * F /*pixels*/;
disp.create( depth.size(), CV_32FC1);
disp = cv::Scalar::all(badDisparity);
for( int y = 0; y < disp.rows; y++ )
{
for( int x = 0; x < disp.cols; x++ )
{
unsigned short curDepth = depth.at<unsigned short>(y,x);
if( curDepth != badDepth )
disp.at<float>(y,x) = mult / curDepth;
}
}
}
IplImage* CvCapture_OpenNI::retrieveDisparityMap()
{
if( depthMetaData.XRes() <= 0 || depthMetaData.YRes() <= 0 )
return 0;
cv::Mat disp32;
computeDisparity_32F( depthMetaData, disp32, baseline, depthFocalLength_VGA,
noSampleValue, shadowValue, badDepth, badDisparity );
disp32.convertTo( outputMaps[OPENNI_DISPARITY_MAP].mat, CV_8UC1 );
return outputMaps[OPENNI_DISPARITY_MAP].getIplImagePtr();
}
IplImage* CvCapture_OpenNI::retrieveDisparityMap_32F()
{
if( depthMetaData.XRes() <= 0 || depthMetaData.YRes() <= 0 )
return 0;
computeDisparity_32F( depthMetaData, outputMaps[OPENNI_DISPARITY_MAP_32F].mat, baseline, depthFocalLength_VGA,
noSampleValue, shadowValue, badDepth, badDisparity );
return outputMaps[OPENNI_DISPARITY_MAP_32F].getIplImagePtr();
}
IplImage* CvCapture_OpenNI::retrieveValidDepthMask()
{
if( depthMetaData.XRes() <= 0 || depthMetaData.YRes() <= 0 )
return 0;
cv::Mat depth;
getDepthMapFromMetaData( depthMetaData, depth, noSampleValue, shadowValue, badDepth );
outputMaps[OPENNI_VALID_DEPTH_MASK].mat = depth != badDepth;
return outputMaps[OPENNI_VALID_DEPTH_MASK].getIplImagePtr();
}
inline void getBGRImageFromMetaData( const xn::ImageMetaData& imageMetaData, cv::Mat& bgrImage )
{
int cols = imageMetaData.XRes();
int rows = imageMetaData.YRes();
cv::Mat rgbImage( rows, cols, CV_8UC3 );
const XnRGB24Pixel* pRgbImage = imageMetaData.RGB24Data();
// CV_Assert( 3*sizeof(uchar) == sizeof(XnRGB24Pixel) );
memcpy( rgbImage.data, pRgbImage, cols*rows*sizeof(XnRGB24Pixel) );
cv::cvtColor( rgbImage, bgrImage, CV_RGB2BGR );
}
IplImage* CvCapture_OpenNI::retrieveBGRImage()
{
if( imageMetaData.XRes() <= 0 || imageMetaData.YRes() <= 0 )
return 0;
getBGRImageFromMetaData( imageMetaData, outputMaps[OPENNI_BGR_IMAGE].mat );
return outputMaps[OPENNI_BGR_IMAGE].getIplImagePtr();
}
IplImage* CvCapture_OpenNI::retrieveGrayImage()
{
if( imageMetaData.XRes() <= 0 || imageMetaData.YRes() <= 0 )
return 0;
CV_Assert( imageMetaData.BytesPerPixel() == 3 ); // RGB
cv::Mat rgbImage;
getBGRImageFromMetaData( imageMetaData, rgbImage );
cv::cvtColor( rgbImage, outputMaps[OPENNI_GRAY_IMAGE].mat, CV_BGR2GRAY );
return outputMaps[OPENNI_GRAY_IMAGE].getIplImagePtr();
}
IplImage* CvCapture_OpenNI::retrieveFrame( int dataType )
{
IplImage* image = 0;
CV_Assert( dataType < outputTypesCount && dataType >= 0);
if( dataType == OPENNI_DEPTH_MAP )
{
image = retrieveDepthMap();
}
else if( dataType == OPENNI_POINT_CLOUD_MAP )
{
image = retrievePointCloudMap();
}
else if( dataType == OPENNI_DISPARITY_MAP )
{
image = retrieveDisparityMap();
}
else if( dataType == OPENNI_DISPARITY_MAP_32F )
{
image = retrieveDisparityMap_32F();
}
else if( dataType == OPENNI_VALID_DEPTH_MASK )
{
image = retrieveValidDepthMask();
}
else if( dataType == OPENNI_BGR_IMAGE )
{
image = retrieveBGRImage();
}
else if( dataType == OPENNI_GRAY_IMAGE )
{
image = retrieveGrayImage();
}
return image;
}
CvCapture* cvCreateCameraCapture_OpenNI( int /*index*/ )
{
// TODO devices enumeration (if several Kinects)
CvCapture_OpenNI* capture = new CvCapture_OpenNI();
if( capture->isOpened() )
return capture;
delete capture;
return 0;
}
#endif
......@@ -121,6 +121,7 @@ CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc,
CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_DShow( int index );
CvCapture* cvCreateCameraCapture_OpenNI( int index );
CVAPI(int) cvHaveImageReader(const char* filename);
CVAPI(int) cvHaveImageWriter(const char* filename);
......
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
void colorizeDisparity( const Mat& gray, Mat& rgb, float S=1.f, float V=1.f )
{
CV_Assert( !gray.empty() );
CV_Assert( gray.type() == CV_8UC1 );
// TODO do maxDisp constant (when camera properties will be accessible)
double maxDisp = 0;
minMaxLoc( gray, 0, &maxDisp );
rgb.create( gray.size(), CV_8UC3 );
for( int y = 0; y < gray.rows; y++ )
{
for( int x = 0; x < gray.cols; x++ )
{
uchar d = gray.at<uchar>(y,x);
unsigned int H = ((uchar)maxDisp - d) * 240 / (uchar)maxDisp;
unsigned int hi = (H/60) % 6;
float f = H/60.f - H/60;
float p = V * (1 - S);
float q = V * (1 - f * S);
float t = V * (1 - (1 - f) * S);
Point3f res;
if( hi == 0 ) //R = V, G = t, B = p
res = Point3f( p, t, V );
if( hi == 1 ) // R = q, G = V, B = p
res = Point3f( p, V, q );
if( hi == 2 ) // R = p, G = V, B = t
res = Point3f( t, V, p );
if( hi == 3 ) // R = p, G = q, B = V
res = Point3f( V, q, p );
if( hi == 4 ) // R = t, G = p, B = V
res = Point3f( V, p, t );
if( hi == 5 ) // R = V, G = p, B = q
res = Point3f( q, p, V );
uchar b = (uchar)(std::max(0.f, std::min (res.x, 1.f)) * 255.f);
uchar g = (uchar)(std::max(0.f, std::min (res.y, 1.f)) * 255.f);
uchar r = (uchar)(std::max(0.f, std::min (res.z, 1.f)) * 255.f);
rgb.at<Point3_<uchar> >(y,x) = Point3_<uchar>(b, g, r);
}
}
}
void help()
{
cout << "\nThis program demonstrates usage of Kinect sensor.\n"
"The user gets some of the supported output images.\n"
"\nAll supported output map types:\n"
"1.) Data given from depth generator\n"
" OPENNI_DEPTH_MAP - depth values in mm (CV_16UC1)\n"
" OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3)\n"
" OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1)\n"
" OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1)\n"
" OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not ocluded, not shaded etc.) (CV_8UC1)\n"
"2.) Data given from RGB image generator\n"
" OPENNI_BGR_IMAGE - color image (CV_8UC3)\n"
" OPENNI_GRAY_IMAGE - gray image (CV_8UC1)\n"
<< endl;
}
/*
* To work with Kinect the user must install OpenNI library and PrimeSensorModule for OpenNI and
* configure OpenCV with WITH_OPENNI flag is ON (using CMake).
*/
int main()
{
help();
cout << "Kinect opening ..." << endl;
VideoCapture capture(0); // or CV_CAP_OPENNI
cout << "done." << endl;
if( !capture.isOpened() )
{
cout << "Can not open a capture object." << endl;
return -1;
}
for(;;)
{
Mat depthMap;
Mat validDepthMap;
Mat disparityMap;
Mat bgrImage;
Mat grayImage;
if( !capture.grab() )
{
cout << "Can not grab images." << endl;
return -1;
}
else
{
if( capture.retrieve( depthMap, OPENNI_DEPTH_MAP ) )
{
const float scaleFactor = 0.05f;
Mat show; depthMap.convertTo( show, CV_8UC1, scaleFactor );
imshow( "depth map", show );
}
if( capture.retrieve( disparityMap, OPENNI_DISPARITY_MAP ) )
{
#if 0 // original disparity
imshow( "original disparity map", disparityMap );
#else // colorized disparity for more visibility
Mat colorDisparityMap;
colorizeDisparity( disparityMap, colorDisparityMap );
Mat validColorDisparityMap;
colorDisparityMap.copyTo( validColorDisparityMap, disparityMap != OPENNI_BAD_DISP_VAL );
imshow( "colorized disparity map", validColorDisparityMap );
#endif
}
if( capture.retrieve( validDepthMap, OPENNI_VALID_DEPTH_MASK ) )
imshow( "valid depth map", validDepthMap );
if( capture.retrieve( bgrImage, OPENNI_BGR_IMAGE ) )
imshow( "rgb image", bgrImage );
if( capture.retrieve( grayImage, OPENNI_GRAY_IMAGE ) )
imshow( "gray image", grayImage );
}
if( waitKey( 30 ) >= 0 )
break;
}
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册