上一篇文章讲了怎么简单的利用opencv的API建立目标跟踪模型,现在定制更深层次的模型。
先附上官方源码。
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <cstring>
#include "samples_utility.hpp"
using namespace std;
using namespace cv;
// prototype of the functino for feature extractor
void sobelExtractor(const Mat img, const Rect roi, Mat& feat);
int main( int argc, char** argv ){
// show help
if(argc<2){
cout<<
" Usage: tracker <video_name>\n"
" examples:\n"
" example_tracking_kcf Bolt/img/%04d.jpg\n"
" example_tracking_kcf faceocc2.webm\n"
<< endl;
return 0;
}
// declares all required variables
Rect2d roi;
Mat frame;
//! [param]
TrackerKCF::Params param;
param.desc_pca = TrackerKCF::GRAY | TrackerKCF::CN;
param.desc_npca = 0;
param.compress_feature = true;
param.compressed_size = 2;
//! [param]
// create a tracker object
//! [create]
Ptr<TrackerKCF> tracker = TrackerKCF::create(param);
//! [create]
//! [setextractor]
tracker->setFeatureExtractor(sobelExtractor);
//! [setextractor]
// set input video
std::string video = argv[1];
VideoCapture cap(video);
// get bounding box
cap >> frame;
roi=selectROI("tracker",frame);
//quit if ROI was not selected
if(roi.width==0 || roi.height==0)
return 0;
// initialize the tracker
tracker->init(frame,roi);
// perform the tracking process
printf("Start the tracking process, press ESC to quit.\n");
for ( ;; ){
// get frame from the video
cap >> frame;
// stop the program if no more images
if(frame.rows==0 || frame.cols==0)
break;
// update the tracking result
tracker->update(frame,roi);
// draw the tracked object
rectangle( frame, roi, Scalar( 255, 0, 0 ), 2, 1 );
// show image with the tracked object
imshow("tracker",frame);
//quit on ESC button
if(waitKey(1)==27)break;
}
return 0;
}
void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
Mat sobel[2];
Mat patch;
Rect region=roi;
//! [insideimage]
// extract patch inside the image
if(roi.x<0){region.x=0;region.width+=roi.x;}
if(roi.y<0){region.y=0;region.height+=roi.y;}
if(roi.x+roi.width>img.cols)region.width=img.cols-roi.x;
if(roi.y+roi.height>img.rows)region.height=img.rows-roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
//! [insideimage]
patch=img(region).clone();
cvtColor(patch,patch, CV_BGR2GRAY);
//! [padding]
// add some padding to compensate when the patch is outside image border
int addTop,addBottom, addLeft, addRight;
addTop=region.y-roi.y;
addBottom=(roi.height+roi.y>img.rows?roi.height+roi.y-img.rows:0);
addLeft=region.x-roi.x;
addRight=(roi.width+roi.x>img.cols?roi.width+roi.x-img.cols:0);
copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE);
//! [padding]
//! [sobel]
Sobel(patch, sobel[0], CV_32F,1,0,1);
Sobel(patch, sobel[1], CV_32F,0,1,1);
merge(sobel,2,feat);
//! [sobel]
//! [postprocess]
feat.convertTo(feat,CV_64F);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5
//! [postprocess]
}
这一部分解释怎样建立定制化的参数,使用你自己的特征抽取函数来建立CN Tracker。即基于自适应颜色属性的目标追踪。
1.建立定制参数
TrackerKCF::Params param;
param.desc_pca = TrackerKCF::GRAY | TrackerKCF::CN;
param.desc_npca = 0;
param.compress_feature = true;
param.compressed_size = 2;
首先,创建一个对象,每一个跟踪器都有自己的参数格式。这里选用KCF算法,由此修改算法的参数。
这里,灰度值(1个维度)和color-names特征(10个维度)将合并成11个维度的特征并压缩成2个维度。
2.使用定制函数
为CN Tracker定义自己的特征抽取函数。你需要注意:
提取的特征应该和边界框的尺寸相同。
只能使用欧氏距离可以比较的特征。LBP特征则不适合,因为他用的Hamming距离。
由于提取特征的尺寸要和边界框一致,所以需要格外注意部分超过范围的边界框,这里,可以如下复制图片的小部分来达到要求。
// extract patch inside the image
if(roi.x<0){region.x=0;region.width+=roi.x;}
if(roi.y<0){region.y=0;region.height+=roi.y;}
if(roi.x+roi.width>img.cols)region.width=img.cols-roi.x;
if(roi.y+roi.height>img.rows)region.height=img.rows-roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
3.定义特征
这里,抽取的特征是Sobel滤波在x和y方向上的响应,然后合并。
Sobel(patch, sobel[0], CV_32F,1,0,1);
Sobel(patch, sobel[1], CV_32F,0,1,1);
merge(sobel,2,feat);
4.预处理
确保数据转换成CV_64F的格式并在(-0.5,0.5)里标准化。
feat.convertTo(feat,CV_64F);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5