kinect+openni2+nite2+opencv2.4手势跟踪
#include "stdafx.h"
#include <afxwin.h>
#include <array>
#include <iostream>
#include <map>
#include <vector>// OpenCV 头文件
#include "opencv2/opencv.hpp"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>// NiTE 头文件
#include <OpenNI.h>
#include <NiTE.h>using namespace std;
using namespace openni;
using namespace nite;
const unsigned int roi_offset=70;
const unsigned int BIN_THRESH_OFFSET =5;
const unsigned int MEDIAN_BLUR_K = 5;
const double GRASPING_THRESH = 0.9;const cv:: Scalar COLOR_BLUE =cv::Scalar(240,40,0);
const cv::Scalar COLOR_DARK_GREEN = cv::Scalar(0, 128, 0);
const cv::Scalar COLOR_LIGHT_GREEN =cv:: Scalar(0,255,0);
const cv::Scalar COLOR_YELLOW =cv:: Scalar(0,128,200);
const cv::Scalar COLOR_RED = cv::Scalar(0,0,255);
float handDepth ;
float handflag=5;
float opencvframe=3;
struct ConvexityDefect
{cv::Point start;cv::Point end;cv::Point depth_point;float depth;
};
int main( int argc, char **argv )
{// 初始化OpenNIOpenNI::initialize();// 打开Kinect设备Device mDevice;mDevice.open( ANY_DEVICE );// 创建深度数据流VideoStream mDepthStream;mDepthStream.create( mDevice, SENSOR_DEPTH );// 设置VideoMode模式VideoMode mDepthMode;mDepthMode.setResolution( 640, 480 );mDepthMode.setFps( 30 );mDepthMode.setPixelFormat( PIXEL_FORMAT_DEPTH_1_MM );mDepthStream.setVideoMode(mDepthMode);// 同样的设置彩色数据流VideoStream mColorStream;mColorStream.create( mDevice, SENSOR_COLOR );// 设置VideoMode模式VideoMode mColorMode;mColorMode.setResolution( 640, 480 );mColorMode.setFps( 30 );mColorMode.setPixelFormat( PIXEL_FORMAT_RGB888 );mColorStream.setVideoMode( mColorMode);// 设置深度图像映射到彩色图像mDevice.setImageRegistrationMode( IMAGE_REGISTRATION_DEPTH_TO_COLOR );// 初始化 NiTENiTE::initialize();// 创建HandTracker跟踪器HandTracker mHandTracker;if( mHandTracker.create() != nite::STATUS_OK ){cerr << "Can't create user tracker" << endl;return -1;}// 设定手势探测(GESTURE_WAVE、GESTURE_CLICK和GESTURE_HAND_RAISE)mHandTracker.startGestureDetection( GESTURE_WAVE );mHandTracker.startGestureDetection( GESTURE_CLICK );//mHandTracker.startGestureDetection( GESTURE_HAND_RAISE );//mHandTracker.setSmoothingFactor(0.1f);// 创建深度图像显示cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);// 创建彩色图像显示cv::namedWindow( "Color Image", CV_WINDOW_AUTOSIZE );// 保存点坐标map< HandId,vector<cv::Point2f> > mapHandData;map< HandId,float > mapHanddepth;vector<cv::Point2f> vWaveList;vector<cv::Point2f> vClickList;cv::Point2f ptSize( 3, 3 );array<cv::Scalar,8> aHandColor;aHandColor[0] = cv::Scalar( 255, 0, 0 );aHandColor[1] = cv::Scalar( 0, 255, 0 );aHandColor[2] = cv::Scalar( 0, 0, 255 );aHandColor[3] = cv::Scalar( 255, 255, 0 );aHandColor[4] = cv::Scalar( 255, 0, 255 );aHandColor[5] = cv::Scalar( 0, 255, 255 );aHandColor[6] = cv::Scalar( 255, 255, 255 );aHandColor[7] = cv::Scalar( 0, 0, 0 );// 环境初始化后,开始获取深度数据流和彩色数据流mDepthStream.start();mColorStream.start();// 获得最大深度值int iMaxDepth = mDepthStream.getMaxPixelValue();// startwhile( true ){/* POINT pt;GetCursorPos(&pt);cerr<<pt.x<<" "<<pt.y<<endl;*/// 创建OpenCV::Mat,用于显示彩色数据图像cv::Mat cImageBGR;// 读取彩色数据帧信息流VideoFrameRef mColorFrame;mColorStream.readFrame( &mColorFrame );// 将彩色数据流转换为OpenCV格式,记得格式是:CV_8UC3(含R\G\B)const cv::Mat mImageRGB( mColorFrame.getHeight(), mColorFrame.getWidth(),CV_8UC3, (void*)mColorFrame.getData() );// RGB ==> BGRcv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );// 获取手FrameHandTrackerFrameRef mHandFrame;if( mHandTracker.readFrame( &mHandFrame ) == nite::STATUS_OK ){openni::VideoFrameRef mDepthFrame = mHandFrame.getDepthFrame();// 将深度数据转换成OpenCV格式const cv::Mat mImageDepth( mDepthFrame.getHeight(), mDepthFrame.getWidth(), CV_16UC1, (void*)mDepthFrame.getData() );// 为了让深度图像显示的更加明显一些,将CV_16UC1 ==> CV_8U格式cv::Mat mScaledDepth, mImageBGR;mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );// 将灰度图转换成BGR格式,为了画出点的颜色坐标和轨迹cv::cvtColor( mScaledDepth, mImageBGR, CV_GRAY2BGR );// 检测手势const nite::Array<GestureData>& aGestures = mHandFrame.getGestures();for( int i = 0; i < aGestures.getSize(); ++ i ){const GestureData& rGesture = aGestures[i];const Point3f& rPos = rGesture.getCurrentPosition();cv::Point2f rPos2D;mHandTracker.convertHandCoordinatesToDepth( rPos.x, rPos.y, rPos.z, &rPos2D.x, &rPos2D.y );HandId mHandID;if( mHandTracker.startHandTracking( rPos, &mHandID ) != nite::STATUS_OK )cerr << "Can't track hand" << endl;}// 得到手心坐标const nite::Array<HandData>& aHands = mHandFrame.getHands();for( int i = 0; i < aHands.getSize(); ++ i ){const HandData& rHand = aHands[i];HandId uID = rHand.getId();if( rHand.isNew() ){mapHandData.insert( make_pair( uID, vector<cv::Point2f>() ) );mapHanddepth.insert(make_pair(uID,float()));}if( rHand.isTracking() ){// 将手心坐标映射到彩色图像和深度图像中const Point3f& rPos = rHand.getPosition();cv::Point2f rPos2D;mHandTracker.convertHandCoordinatesToDepth( rPos.x, rPos.y, rPos.z, &rPos2D.x, &rPos2D.y );handDepth = rPos.z * 255/iMaxDepth;cv::Point2f aPoint=rPos2D;cv::circle( cImageBGR, aPoint, 3, cv::Scalar( 0, 0, 255 ), 4 );cv::circle( mScaledDepth, aPoint, 3, cv::Scalar(0, 0, 255), 4);// 在彩色图像中画出手的轮廓边cv::Point2f ctlPoint, ctrPoint, cdlPoint, cdrPoint;ctlPoint.x = aPoint.x - 100;ctlPoint.y = aPoint.y - 100;ctrPoint.x = aPoint.x - 100;ctrPoint.y = aPoint.y + 100;cdlPoint.x = aPoint.x + 100;cdlPoint.y = aPoint.y - 100;cdrPoint.x = aPoint.x + 100;cdrPoint.y = aPoint.y + 100;cv::line( cImageBGR, ctlPoint, ctrPoint, cv::Scalar( 255, 0, 0 ), 3 );cv::line( cImageBGR, ctlPoint, cdlPoint, cv::Scalar( 255, 0, 0 ), 3 );cv::line( cImageBGR, cdlPoint, cdrPoint, cv::Scalar( 255, 0, 0 ), 3 );cv::line( cImageBGR, ctrPoint, cdrPoint, cv::Scalar( 255, 0, 0 ), 3 );// 在深度图像中画出手的轮廓边cv::Point2f mtlPoint, mtrPoint, mdlPoint, mdrPoint;mtlPoint.x = aPoint.x - 100;mtlPoint.y = aPoint.y - 100;mtrPoint.x = aPoint.x - 100;mtrPoint.y = aPoint.y + 100;mdlPoint.x = aPoint.x + 100;mdlPoint.y = aPoint.y - 100;mdrPoint.x = aPoint.x + 100;mdrPoint.y = aPoint.y + 100;cv::line( mScaledDepth, mtlPoint, mtrPoint, cv::Scalar( 255, 0, 0 ), 3 );cv::line( mScaledDepth, mtlPoint, mdlPoint, cv::Scalar( 255, 0, 0 ), 3 );cv::line( mScaledDepth, mdlPoint, mdrPoint, cv::Scalar( 255, 0, 0 ), 3 );cv::line( mScaledDepth, mtrPoint, mdrPoint, cv::Scalar( 255, 0, 0 ), 3 );mapHandData[uID].push_back( rPos2D );mapHanddepth[uID]=handDepth;}if( rHand.isLost() ){mapHandData.erase(uID );mapHanddepth.erase(uID);}}for( auto itHand = mapHandData.begin(); itHand != mapHandData.end(); ++ itHand ){const cv::Scalar& rColor = aHandColor[ itHand->first % aHandColor.size() ];const vector<cv::Point2f>& rPoints = itHand->second;for( int i = 1; i < rPoints.size(); ++ i ){cv::line( mImageBGR, rPoints[i-1], rPoints[i], rColor, 2 );cv::line( cImageBGR, rPoints[i-1], rPoints[i], rColor, 2 );}}cv::imshow( "Depth Image", mImageBGR );cv::imshow("Color Image", cImageBGR);mHandFrame.release();}else{cerr << "Can't get new frame" << endl;}// 按键“q”退出循环if( cv::waitKey( 1 ) == 'q' )break;}mHandTracker.destroy();mColorStream.destroy();NiTE::shutdown();OpenNI::shutdown();return 0;
}
在手的部位画出一个矩形框,模拟画线,实现效果图
kinect+openni2+nite2+opencv2.4手势跟踪相关推荐
- 基于meanshift的手势跟踪与电脑鼠标控制(手势交互系统)
基于meanshift的手势跟踪与电脑鼠标控制(手势交互系统) zouxy09@qq.com http://blog.csdn.net/zouxy09 一年多前开始接触计算机视觉这个领域的时候,年幼无 ...
- OpenNI2 + NiTE2开发教程
发现了一个非常不错的关于自然交互OpeNI2+NiTE2的资源,非常感谢Heresy,这里分享链接: OpenNI 2.x 教学文章(转载自:Heresy博客,地址:https://kheresy.w ...
- 【Unity/Kinect】获取预制的手势信息KinectInterop.HandState
Kinect使用了枚举KinectInterop.HandState来描述手势. 该手势指的是手掌的状态(张开/握拳),而不是说整个手臂的肢体动作(Gesture). 同样是需要嵌套在Kinect获取 ...
- Azure Kinect(K4A)人体识别跟踪进阶
近期有一个项目用到了Azure Kinect,之前Kinect 1与Kinect 2均使用过的老用户,自然不能放过这个机会.为此专门对Azure Kinect进行了学习,以下是这次自己调研摸索的一些成 ...
- Kinect1代+KinectSDK1.8+OpenNI2.2+NITE2.0+Opencv2.4.10环境配置(2)
昨天安装好了NITE2.0和OpenNI2.2并成功运行示例程序之后以为已经彻底完工,今天准备开始着手深度图像与彩色图像的对齐,然后一脸懵逼的发现..嗯,图像貌似没法显示( ̄口 ̄),苦酒入喉心作痛,然 ...
- [坠露木兰]Kinect Face Tracking SDK[Kinect人脸跟踪]2013-4-10更新
Kinect人脸跟踪Kinect Face Tracking SDK 本文持续维护地址:http://guoming.me/kinect-face-tracking 箫鸣琴奏_CPP程序侠 相关资料免 ...
- Kinect for Windows V2和V1对比开发___彩色数据获取并用OpenCV2.4.10显示
V1彩色分辨率:640x480 V2彩色分辨率:1920x1080 1,打开彩色图像帧的方式 对于V1: 使用NuiImageStreamOpen方法打开 hr = m_PNuiSensor-> ...
- 使用kinect和visual gesture builder建立手势库实现手势识别
目录 写在最前 第一部分 手势分割 第二部分 使用VGB实现手势识别[重点] 2.1 Unity3D手势识别工程建立 2.2 手势库建立及使用 第三部分 进阶手势识别 结语 写在最前 不知不觉距离上一 ...
- Kinect2.0骨骼跟踪与数据平滑
Kinect v1和Kinect v2传感器的配置比较: Kinect v1 Kinect v2 颜色(Color) 分辨率(Resolution) 640×480 1920× ...
最新文章
- gnome3.2 安装体验
- 写底部样式一定要加的属性
- python dict根据value找对应的key_一个不得不了解的Python库——collections
- Centos下GCC引用mysql头文件和库文件
- 如何用计算机打出love,游戏中名字的LOVE怎么用符号打出来?
- 深入理解Linux守护进程
- python数组遍历输出所有组合_python遍历列表和数组实例讲解
- 一个数据包的旅程_数据科学语言的个人旅程
- 【mysql基础知识】查询当前时间之前5分钟内的数据
- 高通计划通过多层级骁龙5G移动平台 加速5G商业化
- Visual Studio 2010 实用功能总结 II
- 如何使用IEDA连接数据库
- 【NIPS 2018】循环World模型促进策略演变
- Retinex图像增强算法
- C#测绘兰勃特墨卡托投影
- C# 获取PDF文档的字体信息及指定文字的坐标,宽度和高度
- TerraSolid工具试用系列2----TerraScan点云滤波(从点云中提取地面点)备注
- matlab程序设计题题库及答案,matlab程序设计例题及答案
- Android 应用在后台弹出提示相关的笔记1
- 带通滤波器幅频特性曲线图_滤波器知识,你所要的,都在这里