今天继续,下面是开始要生成正负例来训练分类器了,首先:
// TRAIN DETECTOR ========================================================== // Initialize structures tld.imgsize = size(tld.source.im0.input); //为fern准备的训练集 tld.X = cell(1,length(tld.source.idx)); //training data for fern tld.Y = cell(1,length(tld.source.idx)); %为nearest neighbor准备的训练集 tld.pEx = cell(1,length(tld.source.idx)); // training data for NN tld.nEx = cell(1,length(tld.source.idx)); //输入: //tld.source.bb:用户目标标定框 //tld.grid: 生成的gridbox信息矩阵 //输出: // overlap一维行向量,记录GRID中的各个gridbox与用户目标标定框的重叠率 overlap = bb_overlap(tld.source.bb,tld.grid);进入bb_overlap来看一下:
// Input double *bb1 = mxGetPr(prhs[0]); int M1 = mxGetM(prhs[0]); int N1 = mxGetN(prhs[0]);//4X1 double *bb2 = mxGetPr(prhs[1]); int M2 = mxGetM(prhs[1]); int N2 = mxGetN(prhs[1]);//6Xn(n表示gridbox总数) // Output if (N1 == 0 || N2 == 0) { N1 = 0; N2 = 0; } plhs[0] = mxCreateDoubleMatrix(N1, N2, mxREAL);//创建输出矩阵,1Xgridbox的数量 double *out = mxGetPr(plhs[0]); for (int j = 0; j < N2; j++) {//gridbox的数量 for (int i = 0; i < N1; i++) {//1 *out++ = bb_overlap(bb1 + M1*i, bb2 + M2*j);//计算重叠度 } }
double bb_overlap(double *bb1, double *bb2) { if (bb1[0] > bb2[2]) { return 0.0; }//判断如果两个矩形没有相交部分,重叠度就为0; if (bb1[1] > bb2[3]) { return 0.0; } if (bb1[2] < bb2[0]) { return 0.0; } if (bb1[3] < bb2[1]) { return 0.0; } double colInt = min(bb1[2], bb2[2]) - max(bb1[0], bb2[0]) + 1;//求相交矩形的宽和高 double rowInt = min(bb1[3], bb2[3]) - max(bb1[1], bb2[1]) + 1; double intersection = colInt * rowInt;//相交面积 double area1 = (bb1[2]-bb1[0]+1)*(bb1[3]-bb1[1]+1);//分别求两个输入矩形的面积 double area2 = (bb2[2]-bb2[0]+1)*(bb2[3]-bb2[1]+1); return intersection / (area1 + area2 - intersection);//求重叠率 }再接着
//输入: //tld.img{1}.input:输入图像,第一帧 //tld.bb(:,1):用户目标标定框 //输出: //tld.target:目标标定框中特定的图像 tld.target = img_patch(tld.img{1}.input,tld.bb(:,1));进入img_patch,这个函数比较庞大,先看其中用到的一部分:
//如果4个坐标值都是整数 if sum(abs(round(bb)-bb))==0 L = max([1 bb(1)]); T = max([1 bb(2)]); R = min([size(img,2) bb(3)]); B = min([size(img,1) bb(4)]); patch = img(T:B,L:R);//在不超过画面尺寸和小于1x1的情况下,取出BB框出的画面 % Sub-pixel accuracy else cp = 0.5 * [bb(1)+bb(3); bb(2)+bb(4)]-1;//bbox的中心坐标 center point %[1 0 -cp(1)] %[0 1 -cp(2)] %[0 0 1 ] H = [1 0 -cp(1); 0 1 -cp(2); 0 0 1]; bbW = bb(3,:)-bb(1,:);//宽 bbH = bb(4,:)-bb(2,:);//高 if bbW <= 0 || bbH <= 0 patch = []; return; end box = [-bbW/2 bbW/2 -bbH/2 bbH/2]; if size(img,3) == 3//如果图像有三个通道,即判断图片是否为真彩色 for i = 1:3 P = warp(img(:,:,i),inv(H),box); patch(:,:,i) = uint8(P); end else patch = warp(img,inv(H),box);//inv(H)=[1 0 cp(1); 0 1 cp(2); 0 0 1];平移变换 patch = uint8(patch); end end上面的函数功能就是对BB区域的图像提取,但是有针对坐标为整数和小数的处理,这里应该只用到整数部分,但至于小数坐标的处理跟踪了一下代码,发现是对图像作了平移的仿射变换,但是至于为什么要这么做,我也不理解,感觉直接舍去小数部分问题应该也不大吧(个人理解,没有看懂)。
好了下面开始产生正训练样本了:
//输入: //overlap:一维行向量,记录GRID中的各个gridbox与用户目标标定框的重叠率 //tld.p_par_init:opt.p_par_init= struct(‘num_closest‘,10,‘num_warps‘,20,‘noise‘,5,‘angle‘,20,‘shift‘,0.02,‘scale‘,0.02); //输出: //pX:10 X length(idxP)*20 (length(idxP)<=10,20为‘num_warps‘,20)的矩阵列向量表示一个gridbox的10棵树上的13位有效的code //pEx:225X1的列向量,各元素值为原像素值减去像素均值 //bbP:最靠近BBOX的的gridbox,列向量表示该box的4个顶点 [pX,pEx,bbP] = tldGeneratePositiveData(tld,overlap,tld.img{1},tld.p_par_init); pY = ones(1,size(pX,2));%1 X length(idxP)*20这个函数也是比较大的,但是还要耐心的往下看啊
pX = []; pEx = []; // Get closest bbox [~,idxP] = max(overlap);//表示行不管,只取列,整个表达式表示最大overlap所对应的列,一维 bbP0 = tld.grid(1:4,idxP);//1~4表示矩阵的4个顶点分布在四行,此取最靠近BBOX的的gridbox // Get overlapping bboxes idxP = find(overlap > 0.6);//返回overlap > 0.6所对应的列索引 if length(idxP) > p_par.num_closest//如果overlap > 0.6的gridbox数大于10 [~,sIdx] = sort(overlap(idxP),‘descend‘); //降序排序 idxP = idxP(sIdx(1:p_par.num_closest));//取前p_par.num_closest个最大重叠度的bboxes所在的列 end bbP = tld.grid(:,idxP);//取出10个最大重叠度的gridboxes if isempty(bbP), return; end % Get hull bbH = bb_hull(bbP);%得到能包围所有bbp中boxes的最小矩形 cols = bbH(1):bbH(3); rows = bbH(2):bbH(4); im1 = im0; //返回一个225x1(pEx)的列向量,各元素值为原像素值减去像素均值 pEx = tldGetPattern(im1,bbP0,tld.model.patchsize);// if tld.model.fliplr pEx = [pEx tldGetPattern(im1,bbP0,tld.model.patchsize,1)]; end //返回20个正例 for i = 1:p_par.num_warps//p_par.num_warps=20 if i > 1 randomize = rand; // Sets the internal randomizer to the same state //patch_input = img_patch(im0.input,bbH,randomize,p_par); //返回将画面进行仿射变换后的patch patch_blur = img_patch(im0.blur,bbH,randomize,p_par);//bbH包围所有bbp中bboxes的最小矩形 //这个很重要,保证在C调用里的偏移的起始地址可以是一样的 im1.blur(rows,cols) = patch_blur;//把仿射变换后的图像放到原图像对应的位置(能包围所有bbp中boxes的最小矩形) //im1.input(rows,cols) = patch_input; end // Measures on blured image //单次返回10Xlength(idxP)的矩阵,列向量表示一个gridbox的10棵树上的13位code, //最后返回10Xlength(idxP)*20的矩阵 pX = [pX fern(5,im1,idxP,0)];//idxP :overlap > 0.6所对应的列索引 // Measures on input image //pEx(:,i) = tldGetPattern(im1,bbP0,tld.model.patchsize); //pEx = [pEx tldGetPattern(im1,tld.grid(1:4,idxP),tld.model.patchsize)]; end当然这个函数是不能这么草草了事的,还有三大函数需要进一步细看:
1.tldGetPattern()
nBB = size(bb,2);//得到bbp0(最靠近BBOX的gridbox)的列,值为1 pattern = zeros(prod(patchsize),nBB);//15*15 X 1 矩阵,返回矩阵 if ~exist(‘flip‘,‘var‘) flip= 0; end // for every bounding box for i = 1:nBB//1 // sample patch patch = img_patch(img.input,bb(:,i));//取出对应框中的图像 // flip if needed if flip patch = fliplr(patch); end // normalize size to ‘patchsize‘ and nomalize intensities to ZMUV //返回一个225x1的列向量,各元素值为原像素值减去像素均值 pattern(:,i) = tldPatch2Pattern(patch,patchsize);//patch压缩变换到patchsize大小,然后将各个元素减去元素均值 end切入到tldPatch2Pattern看一眼:
patch = imresize(patch,patchsize); // ‘bilinear‘ is faster pattern = double(patch(:));//此时变成225X1的矩阵 pattern = pattern - mean(pattern);//mean(pattern)求各列向量的均值2.img_patch()(4个传参)
rand(‘state‘,randomize); randn(‘state‘,randomize); //‘noise‘,5,‘angle‘,20,‘shift‘,0.02,‘scale‘,0.02; NOISE = p_par.noise; ANGLE = p_par.angle; SCALE = p_par.scale; SHIFT = p_par.shift; cp = bb_center(bb)-1;//HULL矩形的中心 Sh1 = [1 0 -cp(1); 0 1 -cp(2); 0 0 1]; sca = 1-SCALE*(rand-0.5);%0.99~1.01 //[0.99~1.01 ] //[ 0.99~1.01 ] //[ 1 ] Sca = diag([sca sca 1]); ang = 2*pi/360*ANGLE*(rand-0.5);//-10 ~ 10度 实际为弧度 ca = cos(ang); sa = sin(ang); Ang = [ca, -sa; sa, ca]; Ang(end+1,end+1) = 1; shR = SHIFT*bb_height(bb)*(rand-0.5);//-0.01~1.01*bb_height(bb) shC = SHIFT*bb_width(bb)*(rand-0.5);//-0.01~1.01*bb_width(bb) Sh2 = [1 0 shC; 0 1 shR; 0 0 1]; bbW = bb_width(bb)-1; bbH = bb_height(bb)-1; box = [-bbW/2 bbW/2 -bbH/2 bbH/2]; H = Sh2*Ang*Sca*Sh1; bbsize = bb_size(bb); patch = uint8(warp(img,inv(H),box) + NOISE*randn(bbsize(1),bbsize(2)));//给图像造成5的高斯噪声以上的代码注释就少了,因为全都是关于仿射变换的,具体可以参看仿射变换,大体就是作者在论文中提到的(shift+-1%,scale +-1%, in-plane rotation +-10度)用来提高训练样本的多样性。
3.fern()(第一个传参为5,获得模式)
unsigned char *input = (unsigned char*) mxGetPr(mxGetField(prhs[1],0,"input")); unsigned char *blur = (unsigned char*) mxGetPr(mxGetField(prhs[1],0,"blur"));//获得仿射变换后的patch //if (mxGetM(prhs[1])!=iHEIGHT) { mexPrintf("fern: wrong input image.\n"); return; } // bbox indexes double *idx = mxGetPr(prhs[2]);//bbp所对应的列索引 int numIdx = mxGetM(prhs[2]) * mxGetN(prhs[2]);//1 X (<=10) // minimal variance double minVar = *mxGetPr(prhs[3]);//minVar=0 if (minVar > 0) { iimg(input,IIMG,iHEIGHT,iWIDTH);//返回IIMG,是图像进行矩形积分后的结果(运行不到这) iimg2(input,IIMG2,iHEIGHT,iWIDTH);//返回IIMG,是图像进行矩形平方积分后的结果(运行不到这) } // output patterns //创建输出矩阵:10X(<=10) plhs[0] = mxCreateDoubleMatrix(nTREES,numIdx,mxREAL); double *patt = mxGetPr(plhs[0]); //创建输出矩阵:1 X(<=10) plhs[1] = mxCreateDoubleMatrix(1,numIdx,mxREAL); double *status = mxGetPr(plhs[1]); for (int j = 0; j < numIdx; j++) {//(<=10) if (minVar > 0) { double bboxvar = bbox_var_offset(IIMG,IIMG2,BBOX+j*BBOX_STEP);//BBOX保存网格数据索引等数据(运行不到这) //E(p^2)-E^2(p) if (bboxvar < minVar) { continue; }(运行不到这) } status[j] = 1; double *tPatt = patt + j*nTREES; for (int i = 0; i < nTREES; i++) {//10 //返回对应gridbox及对应树的13位有效的像素比较码 tPatt[i] = (double) measure_tree_offset(blur, idx[j]-1, i);//idx:bbp } } return;进入measure_tree_offset
int index = 0; int *bbox = BBOX + idx_bbox*BBOX_STEP;//BBOX存储gridbox的索引等信息BBOX_STEP=7(因为grid的行为6) //OFF + bbox[5],该表达式表示该gridbox的特征点信息在OFF的偏移,bbox[5]表示图像横向上多少个网格点
//OFF = create_offsets(s,x);//记录各个特征点在各种尺度下box中的具体位置 int *off = OFF + bbox[5] + idx_tree*2*nFEAT;//OFF存储特征点在各个尺度框下的分布位置等 for (int i=0; i<nFEAT; i++) {//13 index<<=1; //off[0]为特征点的x坐标,off[1]为特征点的y坐标,bbox[0]为该gridbox在图画中的位置 int fp0 = img[off[0]+bbox[0]]; int fp1 = img[off[1]+bbox[0]]; if (fp0>fp1) { index |= 1;}//两个像素点比较并置位相应CODE off += 2;//移到下一个点对 } return index;看完上面,真的有点累啊,算了,把负例也看下好了,简单看了下,代码不算太多:
// Correct initial bbox tld.bb(:,1) = bbP(1:4,:);//最靠近BBOX的的gridbox // Variance threshold tld.var = var(pEx(:,1)) / 2;//var计算方差,这里即求各个数平方和的平均数 // disp([‘Variance : ‘ num2str(tld.var)]); // Generate Negative Examples //nx:patch variance 挑出合适的patches,并提取fern特征赋给nx, //nEx返回一个225x100(nEx)的矩阵,列向量各元素值为原像素值减去像素均值,100为num_patches //输入: //overlap:一维行向量,记录GRID中的各个gridbox与用户目标标定框的重叠率 //输出: //nx:patch variance 挑出合适的patches,并提取fern特征赋给nx //nEx:一个225x100(nEx)的矩阵,列向量各元素值为原像素值减去像素均值,100为num_patches [nX,nEx] = tldGenerateNegativeData(tld,overlap,tld.img{1});再进
// Measure patterns on all bboxes that are far from initial bbox //opt.n_par = struct(‘overlap‘,0.2,‘num_patches‘,100); idxN = find(overlap<tld.n_par.overlap);//overlap < 0.2 [nX,status] = fern(5,img,idxN,tld.var/2);//此函数通过patch variance剔除一批,剩下的进入fern特征码提取 idxN = idxN(status==1); // bboxes far and with big variance,注意C++代码中的status[j] = 1;一句 nX = nX(:,status==1);//选出进入第二级分类器的负样本 // Randomly select ‘num_patches‘ bboxes and measure patches idx = randvalues(1:length(idxN),tld.n_par.num_patches);//‘num_patches‘,100应该是随机取出100个gridbox bb = tld.grid(:,idxN(idx)); nEx = tldGetPattern(img,bb,tld.model.patchsize);//不复注解再进入fern(5,...)因为有tld.var/2,执行稍有不同,请参见上面就行。
好了,至此已经为分类器的训练产生了可用的正例和负例了。
TLD matlab源代码阅读(2),布布扣,bubuko.com
原文地址:http://blog.csdn.net/xuchenglu/article/details/26161675