标签:
如上图所示,这是一段交通路口处的视频监控录像。录像中的每一个像素在大部分时间内属于静态背景,小部分时间内属于动态前景。所以将视频中的所有帧的图像叠加,然后求平均,平均后的图像会比较接近实际的背景。
代码:
#include<opencv2/opencv.hpp> #include<iostream> #include<Eigen/Dense> #include<string> cv::Mat getAverageBG(cv::VideoCapture& capture,int start,int end){ long frameCount = end - start; int index = 0; int width = capture.get(CV_CAP_PROP_FRAME_WIDTH); int height = capture.get(CV_CAP_PROP_FRAME_HEIGHT); Eigen::MatrixXd averageBgR(height, width); Eigen::MatrixXd averageBgG(height, width); Eigen::MatrixXd averageBgB(height, width); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ averageBgR(i, j) = 0; averageBgG(i, j) = 0; averageBgB(i, j) = 0; } } for (int i = start; i < end; i++){ index++; //移动内部指针到视频图像的第i帧 capture.set(CV_CAP_PROP_POS_FRAMES, i); cv::Mat curMat; capture.read(curMat); for (int i = 0; i < height; i++){ cv::Vec3b* p = curMat.ptr<cv::Vec3b>(i); for (int j = 0; j < width; j++){ averageBgR(i, j) += p[j][0] ; averageBgG(i, j) += p[j][1] ; averageBgB(i, j) += p[j][2] ; } } std::cout << index<<"/"<<frameCount << std::endl; }//for every frame cv::Mat result(height,width,CV_8UC3); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ cv::Vec3b p; //std::cout << averageBgR(i, j) << std::endl; p[0] = static_cast<uchar>(averageBgR(i, j) / (float)frameCount); p[1] = static_cast<uchar>(averageBgG(i, j) / (float)frameCount); p[2] = static_cast<uchar>(averageBgB(i, j) / (float)frameCount); result.at<cv::Vec3b>(i, j) = p; } } return result; } int main(int argc,char* argv[]){ cv::VideoCapture capture; if (argc < 3){ return 1; } std::string fileName(argv[1]); std::string outName(argv[2]); if (!capture.open(fileName)){ std::cout << "can not open video" << std::endl; return 1; } long frameCount = static_cast<long>(capture.get(CV_CAP_PROP_FRAME_COUNT)); cv::Mat bg = getAverageBG(capture, 0, frameCount); cv::imwrite(outName, bg); cv::imshow("", bg); cv::waitKey(); return 0; }
结果:
标签:
原文地址:http://www.cnblogs.com/bluebean/p/5668624.html