For reprint, please indicate the source: This article is from zhch Pan's blog http://www.cnblogs.com/zhchp-blog/
This blog is the source code reading work I did before when I did the project. po went to the Internet to help others better understand the V-LOAM project implementation. Some local codes have been modified, which may be different from the original project, but the understanding of the overall process of the project is not hindered.
Source download link: https://github.com/jinqiang/demo ABCD lidar
Node name: featureTracking
Subscribe to topic: < sensor_msgs:: Image > ("/ camera/image_raw")
Publish topic: 1, < sensor_msgs:: pointcloud2 > ("/ image_points_last")
2,<sensor_msgs::Image>("/image/show")
1 #include <math.h> 2 #include <stdio.h> 3 #include <stdlib.h> 4 #include <ros/ros.h> 5 6 #include "cameraParameters.h" 7 #include "pointDefinition.h" 8 9 using namespace std; 10 using namespace cv; 11 12 bool systemInited = false; 13 double timeCur, timeLast; 14 15 const int imagePixelNum = imageHeight * imageWidth; 16 CvSize imgSize = cvSize(imageWidth, imageHeight); 17 18 IplImage *imageCur = cvCreateImage(imgSize, IPL_DEPTH_8U, 1); 19 IplImage *imageLast = cvCreateImage(imgSize, IPL_DEPTH_8U, 1); 20 21 int showCount = 0; 22 const int showSkipNum = 2; 23 const int showDSRate = 2; 24 CvSize showSize = cvSize(imageWidth / showDSRate, imageHeight / showDSRate); 25 26 IplImage *imageShow = cvCreateImage(showSize, IPL_DEPTH_8U, 1); 27 IplImage *harrisLast = cvCreateImage(showSize, IPL_DEPTH_32F, 1); 28 29 CvMat kMat = cvMat(3, 3, CV_64FC1, kImage); 30 CvMat dMat = cvMat(4, 1, CV_64FC1, dImage); 31 32 IplImage *mapx, *mapy; 33 34 const int maxFeatureNumPerSubregion = 2; 35 const int xSubregionNum = 12; 36 const int ySubregionNum = 8; 37 const int totalSubregionNum = xSubregionNum * ySubregionNum; 38 const int MAXFEATURENUM = maxFeatureNumPerSubregion * totalSubregionNum; 39 40 const int xBoundary = 20; 41 const int yBoundary = 20; 42 const double subregionWidth = (double)(imageWidth - 2 * xBoundary) / (double)xSubregionNum; 43 const double subregionHeight = (double)(imageHeight - 2 * yBoundary) / (double)ySubregionNum; 44 45 const double maxTrackDis = 100; 46 const int winSize = 15; 47 48 IplImage *imageEig, *imageTmp, *pyrCur, *pyrLast; 49 50 CvPoint2D32f *featuresCur = new CvPoint2D32f[2 * MAXFEATURENUM]; 51 CvPoint2D32f *featuresLast = new CvPoint2D32f[2 * MAXFEATURENUM]; 52 char featuresFound[2 * MAXFEATURENUM]; 53 float featuresError[2 * MAXFEATURENUM]; 54 55 int featuresIndFromStart = 0; 56 int featuresInd[2 * MAXFEATURENUM] = {0}; 57 58 int totalFeatureNum = 0; 59 //maxFeatureNumPerSubregion=2 60 int subregionFeatureNum[2 * totalSubregionNum] = {0}; 61 62 pcl::PointCloud<ImagePoint>::Ptr imagePointsCur(new pcl::PointCloud<ImagePoint>()); 63 pcl::PointCloud<ImagePoint>::Ptr imagePointsLast(new pcl::PointCloud<ImagePoint>()); 64 65 ros::Publisher *imagePointsLastPubPointer; 66 ros::Publisher *imageShowPubPointer; 67 cv_bridge::CvImage bridge; 68 69 void imageDataHandler(const sensor_msgs::Image::ConstPtr& imageData) 70 { 71 timeLast = timeCur; 72 timeCur = imageData->header.stamp.toSec() - 0.1163; 73 74 IplImage *imageTemp = imageLast; 75 imageLast = imageCur; 76 imageCur = imageTemp; 77 78 for (int i = 0; i < imagePixelNum; i++) { 79 imageCur->imageData[i] = (char)imageData->data[i]; 80 } 81 82 IplImage *t = cvCloneImage(imageCur); 83 //Remove image distortion 84 cvRemap(t, imageCur, mapx, mapy); 85 //cvEqualizeHist(imageCur, imageCur); 86 cvReleaseImage(&t); 87 88 //Reduce it a little, maybe corner detection is faster 89 cvResize(imageLast, imageShow); 90 cvCornerHarris(imageShow, harrisLast, 3); 91 92 CvPoint2D32f *featuresTemp = featuresLast; 93 featuresLast = featuresCur; 94 featuresCur = featuresTemp; 95 96 pcl::PointCloud<ImagePoint>::Ptr imagePointsTemp = imagePointsLast; 97 imagePointsLast = imagePointsCur; 98 imagePointsCur = imagePointsTemp; 99 imagePointsCur->clear(); 100 101 if (!systemInited) { 102 systemInited = true; 103 return; 104 } 105 106 int recordFeatureNum = totalFeatureNum; 107 for (int i = 0; i < ySubregionNum; i++) { 108 for (int j = 0; j < xSubregionNum; j++) { 109 //ind Point to current subregion number 110 int ind = xSubregionNum * i + j; 111 int numToFind = maxFeatureNumPerSubregion - subregionFeatureNum[ind]; 112 113 if (numToFind > 0) { 114 int subregionLeft = xBoundary + (int)(subregionWidth * j); 115 int subregionTop = yBoundary + (int)(subregionHeight * i); 116 //Change the current subregion Box out 117 CvRect subregion = cvRect(subregionLeft, subregionTop, (int)subregionWidth, (int)subregionHeight); 118 cvSetImageROI(imageLast, subregion); 119 //In the box subregion Find good feature points in 120 cvGoodFeaturesToTrack(imageLast, imageEig, imageTmp, featuresLast + totalFeatureNum, 121 &numToFind, 0.1, 5.0, NULL, 3, 1, 0.04); 122 123 int numFound = 0; 124 for(int k = 0; k < numToFind; k++) { 125 featuresLast[totalFeatureNum + k].x += subregionLeft; 126 featuresLast[totalFeatureNum + k].y += subregionTop; 127 128 int xInd = (featuresLast[totalFeatureNum + k].x + 0.5) / showDSRate; 129 int yInd = (featuresLast[totalFeatureNum + k].y + 0.5) / showDSRate; 130 //Check whether there are suitable feature points matched in the detected corner points 131 if (((float*)(harrisLast->imageData + harrisLast->widthStep * yInd))[xInd] > 1e-7) { 132 featuresLast[totalFeatureNum + numFound].x = featuresLast[totalFeatureNum + k].x; 133 featuresLast[totalFeatureNum + numFound].y = featuresLast[totalFeatureNum + k].y; 134 featuresInd[totalFeatureNum + numFound] = featuresIndFromStart; 135 136 numFound++; 137 featuresIndFromStart++; 138 } 139 } 140 totalFeatureNum += numFound; 141 subregionFeatureNum[ind] += numFound; 142 143 cvResetImageROI(imageLast); 144 } 145 } 146 } 147 148 cvCalcOpticalFlowPyrLK(imageLast, imageCur, pyrLast, pyrCur, 149 featuresLast, featuresCur, totalFeatureNum, cvSize(winSize, winSize), 150 3, featuresFound, featuresError, 151 cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 30, 0.01), 0); 152 153 for (int i = 0; i < totalSubregionNum; i++) { 154 subregionFeatureNum[i] = 0; 155 } 156 157 ImagePoint point; 158 int featureCount = 0; 159 double meanShiftX = 0, meanShiftY = 0; 160 for (int i = 0; i < totalFeatureNum; i++) { 161 double trackDis = sqrt((featuresLast[i].x - featuresCur[i].x) 162 * (featuresLast[i].x - featuresCur[i].x) 163 + (featuresLast[i].y - featuresCur[i].y) 164 * (featuresLast[i].y - featuresCur[i].y)); 165 166 if (!(trackDis > maxTrackDis || featuresCur[i].x < xBoundary || 167 featuresCur[i].x > imageWidth - xBoundary || featuresCur[i].y < yBoundary || 168 featuresCur[i].y > imageHeight - yBoundary)) { 169 //Calculate which feature point is the current subregion Detected in, ind yes subregion Number of 170 int xInd = (int)((featuresLast[i].x - xBoundary) / subregionWidth); 171 int yInd = (int)((featuresLast[i].y - yBoundary) / subregionHeight); 172 int ind = xSubregionNum * yInd + xInd; 173 174 if (subregionFeatureNum[ind] < maxFeatureNumPerSubregion) { 175 //Filter the feature points matched by optical flow method according to the screening criteria,Here featureCount It starts from 0, 176 //therefore featuresCur[]and featuresLast[]Only the feature points of the adjacent image are saved, but not those long ago 177 featuresCur[featureCount].x = featuresCur[i].x; 178 featuresCur[featureCount].y = featuresCur[i].y; 179 featuresLast[featureCount].x = featuresLast[i].x; 180 featuresLast[featureCount].y = featuresLast[i].y; 181 //Some feature points are sifted out, so here featureCount Not necessarily i Equal 182 featuresInd[featureCount] = featuresInd[i]; 183 /* In this step, the feature points [u,v] in the image coordinate system are transformed to the camera coordinate system, i.e., [u,v] - > [x / Z, Y / Z, 1]. Refer to formula 5.5 in Lecture 14 184 * But notice that a minus sign is added here. The camera coordinate system is z-axis forward, x-axis right, y-axis down by default, and the image coordinate system is in the upper left corner of the image by default, 185 * featuresCur[featureCount].x - kImage[2]First restore the image coordinate system from the upper left corner to the center of the image, and then add a minus sign, 186 * The x-axis negative direction of the default camera coordinate system is taken as the positive direction, and the y-axis is the same. So at this time, the camera coordinate system z-axis is forward, x-axis is left, Y-axis is up 187 */ 188 point.u = -(featuresCur[featureCount].x - kImage[2]) / kImage[0]; 189 point.v = -(featuresCur[featureCount].y - kImage[5]) / kImage[4]; 190 point.ind = featuresInd[featureCount]; 191 imagePointsCur->push_back(point); 192 193 if (i >= recordFeatureNum) { 194 point.u = -(featuresLast[featureCount].x - kImage[2]) / kImage[0]; 195 point.v = -(featuresLast[featureCount].y - kImage[5]) / kImage[4]; 196 imagePointsLast->push_back(point); 197 } 198 199 meanShiftX += fabs((featuresCur[featureCount].x - featuresLast[featureCount].x) / kImage[0]); 200 meanShiftY += fabs((featuresCur[featureCount].y - featuresLast[featureCount].y) / kImage[4]); 201 202 featureCount++; 203 //subregionFeatureNum It is counted according to the number of feature points matching the current frame and the previous frame 204 subregionFeatureNum[ind]++; 205 } 206 } 207 } 208 totalFeatureNum = featureCount; 209 meanShiftX /= totalFeatureNum; 210 meanShiftY /= totalFeatureNum; 211 212 sensor_msgs::PointCloud2 imagePointsLast2; 213 pcl::toROSMsg(*imagePointsLast, imagePointsLast2); 214 imagePointsLast2.header.stamp = ros::Time().fromSec(timeLast); 215 imagePointsLastPubPointer->publish(imagePointsLast2); 216 217 //Output one image after two images, such as 0,1 No, 2 out, 3,4 No, 5 Output 218 showCount = (showCount + 1) % (showSkipNum + 1); 219 if (showCount == showSkipNum) { 220 Mat imageShowMat(imageShow); 221 bridge.image = imageShowMat; 222 bridge.encoding = "mono8"; 223 sensor_msgs::Image::Ptr imageShowPointer = bridge.toImageMsg(); 224 imageShowPubPointer->publish(imageShowPointer); 225 } 226 } 227 228 int main(int argc, char** argv) 229 { 230 ros::init(argc, argv, "featureTracking"); 231 ros::NodeHandle nh; 232 233 mapx = cvCreateImage(imgSize, IPL_DEPTH_32F, 1); 234 mapy = cvCreateImage(imgSize, IPL_DEPTH_32F, 1); 235 cvInitUndistortMap(&kMat, &dMat, mapx, mapy); 236 237 CvSize subregionSize = cvSize((int)subregionWidth, (int)subregionHeight); 238 imageEig = cvCreateImage(subregionSize, IPL_DEPTH_32F, 1); 239 imageTmp = cvCreateImage(subregionSize, IPL_DEPTH_32F, 1); 240 241 CvSize pyrSize = cvSize(imageWidth + 8, imageHeight / 3); 242 pyrCur = cvCreateImage(pyrSize, IPL_DEPTH_32F, 1); 243 pyrLast = cvCreateImage(pyrSize, IPL_DEPTH_32F, 1); 244 245 ros::Subscriber imageDataSub = nh.subscribe<sensor_msgs::Image>("/camera/image_raw", 1, imageDataHandler); 246 247 ros::Publisher imagePointsLastPub = nh.advertise<sensor_msgs::PointCloud2> ("/image_points_last", 5); 248 imagePointsLastPubPointer = &imagePointsLastPub; 249 250 ros::Publisher imageShowPub = nh.advertise<sensor_msgs::Image>("/image/show", 1); 251 imageShowPubPointer = &imageShowPub; 252 253 ros::spin(); 254 255 return 0; 256 }