--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "precomp.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#ifdef HAVE_OPENNI
+
+#define HACK_WITH_XML
+
+#ifdef HACK_WITH_XML
+#include <iostream>
+#include <fstream>
+#endif
+
+#include "XnCppWrapper.h"
+
+const std::string XMLConfig =
+"<OpenNI>"
+ "<Licenses>"
+ "<License vendor=\"PrimeSense\" key=\"0KOIk2JeIBYClPWVnMoRKn5cdY4=\"/>"
+ "</Licenses>"
+ "<Log writeToConsole=\"false\" writeToFile=\"false\">"
+ "<LogLevel value=\"3\"/>"
+ "<Masks>"
+ "<Mask name=\"ALL\" on=\"true\"/>"
+ "</Masks>"
+ "<Dumps>"
+ "</Dumps>"
+ "</Log>"
+ "<ProductionNodes>"
+ "<Node type=\"Image\" name=\"Image1\">"
+ "<Configuration>"
+ "<MapOutputMode xRes=\"640\" yRes=\"480\" FPS=\"30\"/>"
+ "<Mirror on=\"true\"/>"
+ "</Configuration>"
+ "</Node> "
+ "<Node type=\"Depth\" name=\"Depth1\">"
+ "<Configuration>"
+ "<MapOutputMode xRes=\"640\" yRes=\"480\" FPS=\"30\"/>"
+ "<Mirror on=\"true\"/>"
+ "</Configuration>"
+ "</Node>"
+ "</ProductionNodes>"
+"</OpenNI>";
+
+class CvCapture_OpenNI : public CvCapture
+{
+public:
+ CvCapture_OpenNI();
+ virtual ~CvCapture_OpenNI();
+
+ virtual double getProperty(int);
+ virtual bool setProperty(int, double);
+ virtual bool grabFrame();
+ virtual IplImage* retrieveFrame(int);
+
+ bool isOpened() const;
+
+protected:
+ struct OutputMap
+ {
+ public:
+ cv::Mat mat;
+ IplImage* getIplImagePtr();
+ private:
+ IplImage iplHeader;
+ };
+
+ static const int outputTypesCount = 7;
+
+ static const unsigned short badDepth = 0;
+ static const unsigned int badDisparity = 0;
+
+ IplImage* retrieveDepthMap();
+ IplImage* retrievePointCloudMap();
+ IplImage* retrieveDisparityMap();
+ IplImage* retrieveDisparityMap_32F();
+ IplImage* retrieveValidDepthMask();
+ IplImage* retrieveBGRImage();
+ IplImage* retrieveGrayImage();
+
+ void readCamerasParams();
+
+ // OpenNI context
+ xn::Context context;
+ bool m_isOpened;
+
+ // Data generators with its metadata
+ xn::DepthGenerator depthGenerator;
+ xn::DepthMetaData depthMetaData;
+
+ xn::ImageGenerator imageGenerator;
+ xn::ImageMetaData imageMetaData;
+
+ // Cameras settings:
+ // Distance between IR projector and IR camera (in meters)
+ XnDouble baseline;
+ // Focal length for the IR camera in VGA resolution (in pixels)
+ XnUInt64 depthFocalLength_VGA;
+ // The value for shadow (occluded pixels)
+ XnUInt64 shadowValue;
+ // The value for pixels without a valid disparity measurement
+ XnUInt64 noSampleValue;
+
+ std::vector<OutputMap> outputMaps;
+};
+
+IplImage* CvCapture_OpenNI::OutputMap::getIplImagePtr()
+{
+ if( mat.empty() )
+ return 0;
+
+ iplHeader = IplImage(mat);
+ return &iplHeader;
+}
+
+bool CvCapture_OpenNI::isOpened() const
+{
+ return m_isOpened;
+}
+
+CvCapture_OpenNI::CvCapture_OpenNI()
+{
+ XnStatus status = XN_STATUS_OK;
+
+ // Initialize the context with default configuration.
+ status = context.Init();
+ m_isOpened = (status == XN_STATUS_OK);
+
+ if( m_isOpened )
+ {
+ // Configure the context.
+#ifdef HACK_WITH_XML
+ // Write configuration to the temporary file.
+ // This is a hack, because there is a bug in RunXmlScript().
+ // TODO: remove hack when bug in RunXmlScript() will be fixed.
+ char xmlFilename[100];
+ tmpnam( xmlFilename );
+ std::ofstream outfile( xmlFilename );
+ outfile.write( XMLConfig.c_str(), XMLConfig.length() );
+ outfile.close();
+
+ status = context.RunXmlScriptFromFile( xmlFilename );
+
+ // Remove temporary configuration file.
+ remove( xmlFilename );
+#else
+ status = context.RunXmlScript( XMLConfig.c_str() );
+#endif
+ if( status != XN_STATUS_OK )
+ CV_Error(CV_StsError, ("Failed to apply XML configuration: " + std::string(xnGetStatusString(status))).c_str() );
+
+ // Initialize generators.
+ status = depthGenerator.Create( context );
+ if( status != XN_STATUS_OK )
+ CV_Error(CV_StsError, ("Failed to create depth generator: " + std::string(xnGetStatusString(status))).c_str() );
+ imageGenerator.Create( context );
+ if( status != XN_STATUS_OK )
+ CV_Error(CV_StsError, ("Failed to create image generator: " + std::string(xnGetStatusString(status))).c_str() );
+
+ // Start generating data.
+ status = context.StartGeneratingAll();
+ if( status != XN_STATUS_OK )
+ CV_Error(CV_StsError, ("Failed to start generating OpenNI data: " + std::string(xnGetStatusString(status))).c_str() );
+
+ readCamerasParams();
+
+ outputMaps.resize( outputTypesCount );
+ }
+}
+
+CvCapture_OpenNI::~CvCapture_OpenNI()
+{
+ context.StopGeneratingAll();
+ context.Shutdown();
+}
+
+void CvCapture_OpenNI::readCamerasParams()
+{
+ XnDouble pixelSize = 0;
+ if( depthGenerator.GetRealProperty( "ZPPS", pixelSize ) != XN_STATUS_OK )
+ CV_Error( CV_StsError, "Could not read pixel size!" );
+
+ // pixel size @ VGA = pixel size @ SXGA x 2
+ pixelSize *= 2.0; // in mm
+
+ // focal length of IR camera in pixels for VGA resolution
+ XnUInt64 zpd; // in mm
+ if( depthGenerator.GetIntProperty( "ZPD", zpd ) != XN_STATUS_OK )
+ CV_Error( CV_StsError, "Could not read virtual plane distance!" );
+
+ if( depthGenerator.GetRealProperty( "LDDIS", baseline ) != XN_STATUS_OK )
+ CV_Error( CV_StsError, "Could not read base line!" );
+
+ // baseline from cm -> mm
+ baseline *= 10;
+
+ // focal length from mm -> pixels (valid for 640x480)
+ depthFocalLength_VGA = (XnUInt64)((double)zpd / (double)pixelSize);
+
+ if( depthGenerator.GetIntProperty( "ShadowValue", shadowValue ) != XN_STATUS_OK )
+ CV_Error( CV_StsError, "Could not read shadow value!" );
+
+ if( depthGenerator.GetIntProperty("NoSampleValue", noSampleValue ) != XN_STATUS_OK )
+ CV_Error( CV_StsError, "Could not read no sample value!" );
+}
+
+double CvCapture_OpenNI::getProperty(int)
+{
+ assert(0);
+ // TODO
+ return 0;
+}
+
+bool CvCapture_OpenNI::setProperty(int, double)
+{
+ assert(0);
+ // TODO
+ return true;
+}
+
+bool CvCapture_OpenNI::grabFrame()
+{
+ if( !isOpened() )
+ return false;
+
+ XnStatus status = context.WaitAnyUpdateAll();
+ if( status != XN_STATUS_OK )
+ return false;
+
+ depthGenerator.GetMetaData( depthMetaData );
+ imageGenerator.GetMetaData( imageMetaData );
+ return true;
+}
+
+inline void getDepthMapFromMetaData( const xn::DepthMetaData& depthMetaData, cv::Mat& depthMap, XnUInt64 noSampleValue, XnUInt64 shadowValue, unsigned short badDepth )
+{
+ int cols = depthMetaData.XRes();
+ int rows = depthMetaData.YRes();
+
+ depthMap.create( rows, cols, CV_16UC1 );
+
+ const XnDepthPixel* pDepthMap = depthMetaData.Data();
+
+ // CV_Assert( sizeof(unsigned short) == sizeof(XnDepthPixel) );
+ memcpy( depthMap.data, pDepthMap, cols*rows*sizeof(XnDepthPixel) );
+
+ cv::Mat badMask = (depthMap == noSampleValue) | (depthMap == shadowValue) | (depthMap == 0);
+
+ // mask the pixels with invalid depth
+ depthMap.setTo( cv::Scalar::all( badDepth ), badMask );
+}
+
+IplImage* CvCapture_OpenNI::retrieveDepthMap()
+{
+ if( depthMetaData.XRes() <= 0 || depthMetaData.YRes() <= 0 )
+ return 0;
+
+ getDepthMapFromMetaData( depthMetaData, outputMaps[OPENNI_DEPTH_MAP].mat, noSampleValue, shadowValue, badDepth );
+
+ return outputMaps[OPENNI_DEPTH_MAP].getIplImagePtr();
+}
+
+IplImage* CvCapture_OpenNI::retrievePointCloudMap()
+{
+ int cols = depthMetaData.XRes(), rows = depthMetaData.YRes();
+ if( cols <= 0 || rows <= 0 )
+ return 0;
+
+ // X = (x - centerX) * depth / F[in pixels]
+ // Y = (y - centerY) * depth / F[in pixels]
+ // Z = depth
+ // Multiply by 0.001 to convert from mm in meters.
+
+ float mult = 0.001f / depthFocalLength_VGA;
+ int centerX = cols >> 1;
+ int centerY = rows >> 1;
+
+
+ cv::Mat depth;
+ getDepthMapFromMetaData( depthMetaData, depth, noSampleValue, shadowValue, badDepth );
+
+ const float badPoint = 0;
+ cv::Mat XYZ( rows, cols, CV_32FC3, cv::Scalar::all(badPoint) );
+
+ for( int y = 0; y < rows; y++ )
+ {
+ for( int x = 0; x < cols; x++ )
+ {
+
+ unsigned short d = depth.at<unsigned short>(y, x);
+
+ // Check for invalid measurements
+ if( d == badDepth ) // not valid
+ continue;
+
+ // Fill in XYZ
+ cv::Point3f point3D;
+ point3D.x = (x - centerX) * d * mult;
+ point3D.y = (y - centerY) * d * mult;
+ point3D.z = d * 0.001f;
+
+ XYZ.at<cv::Point3f>(y,x) = point3D;
+ }
+ }
+
+ outputMaps[OPENNI_POINT_CLOUD_MAP].mat = XYZ;
+
+ return outputMaps[OPENNI_POINT_CLOUD_MAP].getIplImagePtr();
+}
+
+void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
+ XnUInt64 noSampleValue, XnUInt64 shadowValue,
+ short badDepth, unsigned int badDisparity )
+{
+ cv::Mat depth;
+ getDepthMapFromMetaData( depthMetaData, depth, noSampleValue, shadowValue, badDepth );
+ CV_Assert( depth.type() == CV_16UC1 );
+
+
+ // disparity = baseline * F / z;
+
+ float mult = baseline /*mm*/ * F /*pixels*/;
+
+ disp.create( depth.size(), CV_32FC1);
+ disp = cv::Scalar::all(badDisparity);
+ for( int y = 0; y < disp.rows; y++ )
+ {
+ for( int x = 0; x < disp.cols; x++ )
+ {
+ unsigned short curDepth = depth.at<unsigned short>(y,x);
+ if( curDepth != badDepth )
+ disp.at<float>(y,x) = mult / curDepth;
+ }
+ }
+}
+
+IplImage* CvCapture_OpenNI::retrieveDisparityMap()
+{
+ if( depthMetaData.XRes() <= 0 || depthMetaData.YRes() <= 0 )
+ return 0;
+
+ cv::Mat disp32;
+ computeDisparity_32F( depthMetaData, disp32, baseline, depthFocalLength_VGA,
+ noSampleValue, shadowValue, badDepth, badDisparity );
+
+ disp32.convertTo( outputMaps[OPENNI_DISPARITY_MAP].mat, CV_8UC1 );
+
+ return outputMaps[OPENNI_DISPARITY_MAP].getIplImagePtr();
+}
+
+IplImage* CvCapture_OpenNI::retrieveDisparityMap_32F()
+{
+ if( depthMetaData.XRes() <= 0 || depthMetaData.YRes() <= 0 )
+ return 0;
+
+ computeDisparity_32F( depthMetaData, outputMaps[OPENNI_DISPARITY_MAP_32F].mat, baseline, depthFocalLength_VGA,
+ noSampleValue, shadowValue, badDepth, badDisparity );
+
+ return outputMaps[OPENNI_DISPARITY_MAP_32F].getIplImagePtr();
+}
+
+IplImage* CvCapture_OpenNI::retrieveValidDepthMask()
+{
+ if( depthMetaData.XRes() <= 0 || depthMetaData.YRes() <= 0 )
+ return 0;
+
+ cv::Mat depth;
+ getDepthMapFromMetaData( depthMetaData, depth, noSampleValue, shadowValue, badDepth );
+
+ outputMaps[OPENNI_VALID_DEPTH_MASK].mat = depth != badDepth;
+
+ return outputMaps[OPENNI_VALID_DEPTH_MASK].getIplImagePtr();
+}
+
+inline void getBGRImageFromMetaData( const xn::ImageMetaData& imageMetaData, cv::Mat& bgrImage )
+{
+ int cols = imageMetaData.XRes();
+ int rows = imageMetaData.YRes();
+
+ cv::Mat rgbImage( rows, cols, CV_8UC3 );
+
+ const XnRGB24Pixel* pRgbImage = imageMetaData.RGB24Data();
+
+ // CV_Assert( 3*sizeof(uchar) == sizeof(XnRGB24Pixel) );
+ memcpy( rgbImage.data, pRgbImage, cols*rows*sizeof(XnRGB24Pixel) );
+ cv::cvtColor( rgbImage, bgrImage, CV_RGB2BGR );
+}
+
+IplImage* CvCapture_OpenNI::retrieveBGRImage()
+{
+ if( imageMetaData.XRes() <= 0 || imageMetaData.YRes() <= 0 )
+ return 0;
+
+ getBGRImageFromMetaData( imageMetaData, outputMaps[OPENNI_BGR_IMAGE].mat );
+
+ return outputMaps[OPENNI_BGR_IMAGE].getIplImagePtr();
+}
+
+IplImage* CvCapture_OpenNI::retrieveGrayImage()
+{
+ if( imageMetaData.XRes() <= 0 || imageMetaData.YRes() <= 0 )
+ return 0;
+
+ CV_Assert( imageMetaData.BytesPerPixel() == 3 ); // RGB
+
+ cv::Mat rgbImage;
+ getBGRImageFromMetaData( imageMetaData, rgbImage );
+ cv::cvtColor( rgbImage, outputMaps[OPENNI_GRAY_IMAGE].mat, CV_BGR2GRAY );
+
+ return outputMaps[OPENNI_GRAY_IMAGE].getIplImagePtr();
+}
+
+IplImage* CvCapture_OpenNI::retrieveFrame( int dataType )
+{
+ IplImage* image = 0;
+ CV_Assert( dataType < outputTypesCount && dataType >= 0);
+
+ if( dataType == OPENNI_DEPTH_MAP )
+ {
+ image = retrieveDepthMap();
+ }
+ else if( dataType == OPENNI_POINT_CLOUD_MAP )
+ {
+ image = retrievePointCloudMap();
+ }
+ else if( dataType == OPENNI_DISPARITY_MAP )
+ {
+ image = retrieveDisparityMap();
+ }
+ else if( dataType == OPENNI_DISPARITY_MAP_32F )
+ {
+ image = retrieveDisparityMap_32F();
+ }
+ else if( dataType == OPENNI_VALID_DEPTH_MASK )
+ {
+ image = retrieveValidDepthMask();
+ }
+ else if( dataType == OPENNI_BGR_IMAGE )
+ {
+ image = retrieveBGRImage();
+ }
+ else if( dataType == OPENNI_GRAY_IMAGE )
+ {
+ image = retrieveGrayImage();
+ }
+
+ return image;
+}
+
+
+CvCapture* cvCreateCameraCapture_OpenNI( int /*index*/ )
+{
+ // TODO devices enumeration (if several Kinects)
+ CvCapture_OpenNI* capture = new CvCapture_OpenNI();
+
+ if( capture->isOpened() )
+ return capture;
+
+ delete capture;
+ return 0;
+}
+
+#endif
--- /dev/null
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+
+#include <iostream>
+
+using namespace cv;
+using namespace std;
+
+void colorizeDisparity( const Mat& gray, Mat& rgb, float S=1.f, float V=1.f )
+{
+ CV_Assert( !gray.empty() );
+ CV_Assert( gray.type() == CV_8UC1 );
+
+ // TODO do maxDisp constant (when camera properties will be accessible)
+ double maxDisp = 0;
+ minMaxLoc( gray, 0, &maxDisp );
+
+ rgb.create( gray.size(), CV_8UC3 );
+ for( int y = 0; y < gray.rows; y++ )
+ {
+ for( int x = 0; x < gray.cols; x++ )
+ {
+ uchar d = gray.at<uchar>(y,x);
+ unsigned int H = ((uchar)maxDisp - d) * 240 / (uchar)maxDisp;
+
+ unsigned int hi = (H/60) % 6;
+ float f = H/60.f - H/60;
+ float p = V * (1 - S);
+ float q = V * (1 - f * S);
+ float t = V * (1 - (1 - f) * S);
+
+ Point3f res;
+
+ if( hi == 0 ) //R = V, G = t, B = p
+ res = Point3f( p, t, V );
+ if( hi == 1 ) // R = q, G = V, B = p
+ res = Point3f( p, V, q );
+ if( hi == 2 ) // R = p, G = V, B = t
+ res = Point3f( t, V, p );
+ if( hi == 3 ) // R = p, G = q, B = V
+ res = Point3f( V, q, p );
+ if( hi == 4 ) // R = t, G = p, B = V
+ res = Point3f( V, p, t );
+ if( hi == 5 ) // R = V, G = p, B = q
+ res = Point3f( q, p, V );
+
+ uchar b = (uchar)(std::max(0.f, std::min (res.x, 1.f)) * 255.f);
+ uchar g = (uchar)(std::max(0.f, std::min (res.y, 1.f)) * 255.f);
+ uchar r = (uchar)(std::max(0.f, std::min (res.z, 1.f)) * 255.f);
+
+ rgb.at<Point3_<uchar> >(y,x) = Point3_<uchar>(b, g, r);
+ }
+ }
+}
+
+void help()
+{
+ cout << "\nThis program demonstrates usage of Kinect sensor.\n"
+ "The user gets some of the supported output images.\n"
+ "\nAll supported output map types:\n"
+ "1.) Data given from depth generator\n"
+ " OPENNI_DEPTH_MAP - depth values in mm (CV_16UC1)\n"
+ " OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3)\n"
+ " OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1)\n"
+ " OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1)\n"
+ " OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not ocluded, not shaded etc.) (CV_8UC1)\n"
+ "2.) Data given from RGB image generator\n"
+ " OPENNI_BGR_IMAGE - color image (CV_8UC3)\n"
+ " OPENNI_GRAY_IMAGE - gray image (CV_8UC1)\n"
+ << endl;
+}
+
+/*
+ * To work with Kinect the user must install OpenNI library and PrimeSensorModule for OpenNI and
+ * configure OpenCV with WITH_OPENNI flag is ON (using CMake).
+ */
+int main()
+{
+ help();
+
+ cout << "Kinect opening ..." << endl;
+ VideoCapture capture(0); // or CV_CAP_OPENNI
+ cout << "done." << endl;
+
+ if( !capture.isOpened() )
+ {
+ cout << "Can not open a capture object." << endl;
+ return -1;
+ }
+
+ for(;;)
+ {
+ Mat depthMap;
+ Mat validDepthMap;
+ Mat disparityMap;
+ Mat bgrImage;
+ Mat grayImage;
+
+ if( !capture.grab() )
+ {
+ cout << "Can not grab images." << endl;
+ return -1;
+ }
+ else
+ {
+ if( capture.retrieve( depthMap, OPENNI_DEPTH_MAP ) )
+ {
+ const float scaleFactor = 0.05f;
+ Mat show; depthMap.convertTo( show, CV_8UC1, scaleFactor );
+ imshow( "depth map", show );
+ }
+
+ if( capture.retrieve( disparityMap, OPENNI_DISPARITY_MAP ) )
+ {
+#if 0 // original disparity
+ imshow( "original disparity map", disparityMap );
+#else // colorized disparity for more visibility
+ Mat colorDisparityMap;
+ colorizeDisparity( disparityMap, colorDisparityMap );
+ Mat validColorDisparityMap;
+ colorDisparityMap.copyTo( validColorDisparityMap, disparityMap != OPENNI_BAD_DISP_VAL );
+ imshow( "colorized disparity map", validColorDisparityMap );
+#endif
+ }
+
+ if( capture.retrieve( validDepthMap, OPENNI_VALID_DEPTH_MASK ) )
+ imshow( "valid depth map", validDepthMap );
+
+ if( capture.retrieve( bgrImage, OPENNI_BGR_IMAGE ) )
+ imshow( "rgb image", bgrImage );
+
+ if( capture.retrieve( grayImage, OPENNI_GRAY_IMAGE ) )
+ imshow( "gray image", grayImage );
+ }
+
+ if( waitKey( 30 ) >= 0 )
+ break;
+ }
+
+ return 0;
+}