去年我做了一段时间iPhone和LEGO EV3 机器人的研究,当时已经开发了iOS的EV3 wifi版SDK,能够使得iOS与EV3进行互动,但这还远远达不到我曾经设想的效果,且看我之前的博文:
【iOS与EV3混合机器人编程系列之一】iOS要干嘛?EV3可以更酷!
http://blog.csdn.net/songrotek/article/details/37652771
iPhone不仅仅要作为EV3 的大脑,还要是EV3的眼睛,脸,还有姿态感觉(陀螺仪)。因此,我之后转向视觉的研究,就是希望使iPhone可以变成EV3的眼睛。
前两篇关于OpenCV for iOS的文章只是分享搭建平台的基本方法,本篇文章将分享更具体的实践,就是用手机的摄像头识别手势,然后控制EV3机器人的行走:
视频链接:
http://v.youku.com/v_show/id_XODU1NTcxMDIw.html?from=s1.8-1-1.2
这边我暂时不便放出整个程序,但在这里分享我的思路还有一些关键代码。
这个在前面的OpenCV for iOS 2 blog有介绍:
http://blog.csdn.net/songrotek/article/details/40781105
- (void)processImage:(cv::Mat &)image
{
[self handDetectionWithImage:image];
}
这里我把手势检测的放在一个method中进行处理,接下来一步一步介绍手势的处理流程:
我采用HLS颜色区域来获取皮肤区域,如果是手的话当然就是手的区域。
Mat HLSimage;
Mat blurImage;
// Step 1.1:模糊处理
//medianBlur(image, blurImage, 5);
// Step 1.2:转换为HLS颜色
cvtColor(image, HLSimage, CV_BGR2HLS);
// Step 1.3:根据皮肤颜色范围获取皮肤区域:
int imageRow = HLSimage.rows;
int imageCol = HLSimage.cols;
for (int row = 0; row < imageRow; row++) {
for (int col = 0; col < imageCol; col++) {
uchar H = HLSimage.at<cv::Vec3b>(row,col)[0];
uchar L = HLSimage.at<cv::Vec3b>(row,col)[1];
uchar S = HLSimage.at<cv::Vec3b>(row,col)[2];
double LS_ratio = ((double) L) / ((double) S);
bool skin_pixel = (S >= 50) && (LS_ratio > 0.5) && (LS_ratio < 3.0) && ((H <= 14) || (H >= 165));
if (!skin_pixel) {
HLSimage.at<cv::Vec3b>(row,col)[0] = 0;
HLSimage.at<cv::Vec3b>(row,col)[1] = 0;
HLSimage.at<cv::Vec3b>(row,col)[2] = 0;
}
}
}
// Step 1.4: 转换为RGB
Mat skinImage;
cvtColor(HLSimage, skinImage, CV_HLS2RGB);
// Step 1.5: 对皮肤区域进行二值及平滑处理
Mat gray;
cvtColor(skinImage, gray, CV_RGB2GRAY);
Mat binary;
threshold(gray, binary, 50, 255, THRESH_BINARY);
比如说手和脸的颜色是一样的,那么用他们的边缘分开,U分量的边缘比较明显:
// Step 2.1:转换为YUV
Mat yuvImage;
cvtColor(image, yuvImage, CV_BGR2YUV);
// Step 2.2:取出U分量
vector<Mat> yuvImages;
split(yuvImage, yuvImages);
Mat& uImage = yuvImages[1];
// Step 2.3: 形态学梯度操作
Mat structure_element(5, 5, CV_8U, Scalar(1));
morphologyEx(uImage, uImage, MORPH_GRADIENT, structure_element);
threshold(uImage, uImage, 10, 255, THRESH_BINARY_INV|THRESH_OTSU);
medianBlur(binary, binary, 5);
//morphologyEx( binary, binary, MORPH_CLOSE,Mat());
//morphologyEx( binary, binary, MORPH_OPEN,Mat());
for (int row = 0; row < imageRow; row++) {
for (int col = 0; col < imageCol; col++) {
binary.at<uchar>(row,col) = uImage.at<uchar>(row,col) & binary.at<uchar>(row,col);
}
}
// Step 3.1:寻找轮廓
vector<vector<cv::Point>> contours;
vector<Vec4i> hierarchy;
findContours( binary, contours, hierarchy,
CV_RETR_TREE, CV_CHAIN_APPROX_NONE );
// Step 3.2:找到最大轮廓
int indexOfBiggestContour = -1;
int sizeOfBiggestContour = 0;
for (int i = 0; i < contours.size(); i++){
if(contours[i].size() > sizeOfBiggestContour){
sizeOfBiggestContour = int(contours[i].size());
indexOfBiggestContour = i;
}
}
// Step 3.3:检查轮廓,获取手的信息
if(indexOfBiggestContour > -1 && sizeOfBiggestContour > 400)
{
// 获取轮廓多边形
approxPolyDP(Mat(contours[indexOfBiggestContour]), contours[indexOfBiggestContour], 1.5, true);
// 获取轮廓矩形框
cv::Rect rect = boundingRect(Mat(contours[indexOfBiggestContour]));
cv::RotatedRect rotatedRect = fitEllipse(Mat(contours[indexOfBiggestContour]));
angle = rotatedRect.angle;
power = rotatedRect.size.height/rotatedRect.size.width;
//NSLog(@"power:%f angle:%f\n",power,angle);
//ellipse(image, rotatedRect, Scalar(0,0,200));
Point2f rect_points[4];
rotatedRect.points( rect_points );
for( int j = 0; j < 4; j++ )
line( image, rect_points[j], rect_points[(j+1)%4], Scalar(0,0,200), 1, 8 );
Mat temp = binary;
cv::Rect saveRect;
if (rect.width > rect.height) {
saveRect = cv::Rect(rect.x,rect.y -(rect.width/2 - rect.height/2),rect.width,rect.width);
} else {
saveRect = cv::Rect(rect.x - (rect.height/2 - rect.width/2),rect.y,rect.height,rect.height);
}
//tempRect = CGRectMake(saveRect.x, saveRect.y, saveRect.width, saveRect.height);
if (saveRect.x >= 0 && saveRect.y >= 0 && saveRect.x+saveRect.width <= temp.cols && saveRect.y+saveRect.height <= temp.rows) {
Mat ROIImage;
ROIImage = temp(saveRect);
CvSize size(96,96);
resize(ROIImage, ROIImage, size);
tempImage = [self UIImageFromCVMat:ROIImage];
rectangle(image, saveRect.tl(), saveRect.br(), Scalar(0,0,200));
}
// 在image中画出轮廓
drawContours(image, contours, indexOfBiggestContour, Scalar(255,100,100));
在获取手的轮廓之后,采用U型曲线检测来获取手指信息
// 检测手指
vector<cv::Point> uPoints;
uPoints = detectUcurveWithContour(contours[indexOfBiggestContour]);
for (int i = 0; i < uPoints.size(); i++) {
circle(image,uPoints[i], 3, Scalar(100,255,255), 2);
}
fingerTipsNum = (int)uPoints.size();
vector<cv::Point> detectUcurveWithContour(vector<cv::Point> contour)
{
cv::Rect rect = boundingRect(contour);
float toleranceMin = rect.height/5;
//float toleranceMax = rect.height*0.8;
// Step 0: 平滑一下曲线
for (int i = 1; i < contour.size() - 1; i++) {
contour[i].x = (contour[i-1].x + contour[i].x + contour[i+1].x)/3;
contour[i].y = (contour[i-1].y + contour[i].y + contour[i+1].y)/3;
}
vector<cv::Point> uPoints;
// Step 1:计算每个点与相邻点形成的夹角
vector<float> angles;
int size = int(contour.size());
int step = 5;
for (int i = 0; i < size; i++) {
int index1 = i - step;
int index2 = i;
int index3 = i + step;
index1 = index1 < 0 ? index1 + size : index1;
index3 = index3 >= size ? index3 - size : index3;
angles.push_back(getAngleWithDirection(contour[index1], contour[index2], contour[index3]));
}
// Step 2: 计算先变小后变大的点,并记录
float thresholdAngleMax = 50;
//float thresholdAngleMin = 0;
for (int i = 0; i < size; i++) {
int index1 = i - 1;
int index2 = i;
int index3 = i+1;
int index4 = i+step;
int index5 = i-step;
index1 = index1 < 0 ? index1+size:index1;
index3 = index3 >= size? index3-size:index3;
index5 = index5 < 0 ? index5+size:index5;
index4 = index4 >= size? index4-size:index4;
if (angles[index2] < angles[index1] && angles[index2] < angles[index3] && angles[i] > 0 && angles[i] < thresholdAngleMax) {
float dis1 = distanceP2P(contour[i], contour[index4]);
float dis2 = distanceP2P(contour[index5], contour[i]);
//NSLog(@"dis:%f,tor:%f",dis,toleranceMin);
if (dis1 > toleranceMin || dis2 > toleranceMin) {
uPoints.push_back(contour[i]);
//NSLog(@"angel:%f",angles[i]);
}
}
}
return uPoints;
}
辅助函数用C++写:
float distanceP2P(cv::Point a, cv::Point b){
float d= sqrt(fabs( pow(a.x-b.x,2) + pow(a.y-b.y,2) )) ;
return d;
}
float getAngleWithDirection(cv::Point s, cv::Point f, cv::Point e){
float l1 = distanceP2P(f,s);
float l2 = distanceP2P(f,e);
float dot=(s.x-f.x)*(e.x-f.x) + (s.y-f.y)*(e.y-f.y);
float angle = acos(dot/(l1*l2));
angle=angle*180/M_PI;
// 计算从s到f到e的旋转方向
cv::Point f2s = cv::Point(s.x - f.x,s.y-f.y);
cv::Point f2e = cv::Point(e.x - f.x,e.y - f.y);
float direction = f2s.x*f2e.y - f2e.x*f2s.y;
if (direction > 0 ) {
return angle;
} else {
return -angle;
}
}
float getAngle(cv::Point s, cv::Point f, cv::Point e){
float l1 = distanceP2P(f,s);
float l2 = distanceP2P(f,e);
float dot=(s.x-f.x)*(e.x-f.x) + (s.y-f.y)*(e.y-f.y);
float angle = acos(dot/(l1*l2));
angle=angle*180/M_PI;
return angle;
}
得到的结果如下图:
在光线充足的情况下,效果还是很不错的。
接下来就是利用手势的结果控制EV3了:
基本代码:
if (fingerTipsNum >= 3) {
int leftPower = int(power * 40 + (angle - 90)*0.4);
int rightPower = int(power * 40 - (angle - 90)*0.4);
leftPower = leftPower > 100? 100:leftPower;
rightPower = rightPower > 100? 100:rightPower;
leftPower = leftPower < -100? -100:leftPower;
rightPower = rightPower < -100? -100:rightPower;
NSData *data = [EV3DirectCommander turnMotorsAtPort:EV3OutputPortB power:leftPower port:EV3OutputPortD power:rightPower];
[[EADSessionController sharedController] writeData:data];
} else if (fingerTipsNum == 2)
{
int leftPower = int(power * 40 + (angle - 90)*0.4);
int rightPower = int(power * 40 - (angle - 90)*0.4);
leftPower = leftPower > 100? 100:leftPower;
rightPower = rightPower > 100? 100:rightPower;
leftPower = leftPower < -100? -100:leftPower;
rightPower = rightPower < -100? -100:rightPower;
NSData *data = [EV3DirectCommander turnMotorsAtPort:EV3OutputPortB power:-leftPower port:EV3OutputPortD power:-rightPower];
[[EADSessionController sharedController] writeData:data];
} else if (fingerTipsNum <= 1)
{
NSData *data = [EV3DirectCommander turnMotorAtPort:EV3OutputPortBD power:0];
[[EADSessionController sharedController] writeData:data];
}
}
这样伸出不同手指就有不同的控制指令了。
整体思路就写到这吧!期待交流!
版权声明:本文为博主原创文章,未经博主允许不得转载。
OpenCV for iOS 3 及 玩转EV3 之: 在iPhone上做手势检测 并控制EV3 机器人
原文地址:http://blog.csdn.net/songrotek/article/details/47659417