void matchBoundingBoxes(std::vector<cv::DMatch> &matches, std::map<int, int> &bbBestMatches, DataFrame &prevFrame, DataFrame &currFrame)
{
for (const auto& prevBox : prevFrame.boundingBoxes)
{
int maxMatchCount = 0;
int bestMatchBoxId = -1;
for (const auto& currBox : currFrame.boundingBoxes)
{
int matchCount = 0;
for (const auto& match : matches)
{
int prevKeyPointIdx = match.queryIdx;
int currKeyPointIdx = match.trainIdx;
if (prevBox.roi.contains(prevFrame.keypoints[prevKeyPointIdx].pt) &&
currBox.roi.contains(currFrame.keypoints[currKeyPointIdx].pt))
{
matchCount++;
}
}
if (matchCount > maxMatchCount)
{
maxMatchCount = matchCount;
bestMatchBoxId = currBox.boxID;
}
}
bbBestMatches[prevBox.boxID] = bestMatchBoxId;
}
}
void computeTTCLidar(std::vector<LidarPoint> &lidarPointsPrev,
std::vector<LidarPoint> &lidarPointsCurr, double frameRate, double &TTC)
{
double dT = 1.0 / frameRate;
double laneWidth = 1.2; // width of the preceding lidar area
double yEdge = (laneWidth - 0.2) / 2;
auto checkFunc = [&yEdge](const LidarPoint &lp) { return std::abs(lp.y) < yEdge; };
// Filter out Lidar points outside the ego lane
lidarPointsPrev.erase(std::remove_if(lidarPointsPrev.begin(), lidarPointsPrev.end(), checkFunc), lidarPointsPrev.end());
lidarPointsCurr.erase(std::remove_if(lidarPointsCurr.begin(), lidarPointsCurr.end(), checkFunc), lidarPointsCurr.end());
// Sort the Lidar points based on x-coordinate
std::sort(lidarPointsPrev.begin(), lidarPointsPrev.end(), [](const LidarPoint &lp1, const LidarPoint &lp2) { return lp1.x < lp2.x; });
std::sort(lidarPointsCurr.begin(), lidarPointsCurr.end(), [](const LidarPoint &lp1, const LidarPoint &lp2) { return lp1.x < lp2.x; });
// Calculate the median x-coordinate for the Lidar points
double prevXMedian = lidarPointsPrev[lidarPointsPrev.size() / 2].x;
double currXMedian = lidarPointsCurr[lidarPointsCurr.size() / 2].x;
TTC = currXMedian * dT / (prevXMedian - currXMedian);
}
void clusterKptMatchesWithROI(BoundingBox &boundingBox_c, BoundingBox &boundingBox_p, std::vector<cv::KeyPoint> &kptsPrev, std::vector<cv::KeyPoint> &kptsCurr, std::vector<cv::DMatch> &kptMatches)
{
float shrinkFactor = 0.12;
cv::Rect smallerBox_c(
boundingBox_c.roi.x + shrinkFactor * boundingBox_c.roi.width / 2.0,
boundingBox_c.roi.y + shrinkFactor * boundingBox_c.roi.height / 2.0,
boundingBox_c.roi.width * (1 - shrinkFactor),
boundingBox_c.roi.height * (1 - shrinkFactor)
);
cv::Rect smallerBox_p(
boundingBox_p.roi.x + shrinkFactor * boundingBox_p.roi.width / 2.0,
boundingBox_p.roi.y + shrinkFactor * boundingBox_p.roi.height / 2.0,
boundingBox_p.roi.width * (1 - shrinkFactor),
boundingBox_p.roi.height * (1 - shrinkFactor)
);
std::vector<cv::DMatch> kptMatches_roi;
for (const auto& match : kptMatches)
{
cv::KeyPoint train = kptsCurr[match.trainIdx];
cv::KeyPoint query = kptsPrev[match.queryIdx];
auto train_pt = cv::Point(train.pt.x, train.pt.y);
auto query_pt = cv::Point(query.pt.x, query.pt.y);
if (smallerBox_c.contains(train_pt) && smallerBox_p.contains(query_pt))
kptMatches_roi.push_back(match);
}
double dist_mean = 0.0;
for (const auto& match : kptMatches_roi)
{
cv::KeyPoint train = kptsCurr[match.trainIdx];
cv::KeyPoint query = kptsPrev[match.queryIdx];
auto train_pt = cv::Point(train.pt.x, train.pt.y);
auto query_pt = cv::Point(query.pt.x, query.pt.y);
dist_mean += cv::norm(train_pt - query_pt);
}
dist_mean /= kptMatches_roi.size();
for (const auto& match : kptMatches_roi)
{
cv::KeyPoint train = kptsCurr[match.trainIdx];
cv::KeyPoint query = kptsPrev[match.queryIdx];
auto train_pt = cv::Point(train.pt.x, train.pt.y);
auto query_pt = cv::Point(query.pt.x, query.pt.y);
if (cv::norm(train_pt - query_pt) < dist_mean * 1.5)
boundingBox_c.kptMatches.push_back(match);
}
std::cout << "curr_bbx_matches_size: " << boundingBox_c.kptMatches.size() << std::endl;
}
void computeTTCCamera(std::vector<cv::KeyPoint> &kptsPrev, std::vector<cv::KeyPoint> &kptsCurr,
std::vector<cv::DMatch> kptMatches, double frameRate, double &TTC, cv::Mat *visImg)
{
std::vector<double> distRatios;
for (size_t i = 0; i < kptMatches.size() - 1; ++i)
{
const cv::KeyPoint& kpOuterCurr = kptsCurr[kptMatches[i].trainIdx];
const cv::KeyPoint& kpOuterPrev = kptsPrev[kptMatches[i].queryIdx];
for (size_t j = i + 1; j < kptMatches.size(); ++j)
{
const cv::KeyPoint& kpInnerCurr = kptsCurr[kptMatches[j].trainIdx];
const cv::KeyPoint& kpInnerPrev = kptsPrev[kptMatches[j].queryIdx];
double distCurr = cv::norm(kpOuterCurr.pt - kpInnerCurr.pt);
double distPrev = cv::norm(kpOuterPrev.pt - kpInnerPrev.pt);
if (distPrev > std::numeric_limits<double>::epsilon() && distCurr >= 100.0)
{
double distRatio = distCurr / distPrev;
distRatios.push_back(distRatio);
}
}
}
if (distRatios.empty())
{
TTC = NAN;
return;
}
std::sort(distRatios.begin(), distRatios.end());
size_t medIndex = distRatios.size() / 2;
double medDistRatio = (distRatios.size() % 2 == 0)
? (distRatios[medIndex - 1] + distRatios[medIndex]) / 2.0
: distRatios[medIndex];
double dT = 1.0 / frameRate;
TTC = -dT / (1.0 - medDistRatio);
}
The most important factor in the stable calculation of the TTC is the deletion of data other than required fields. Filtering prevents unnecessary data from disturbing the stability of the calculation.
In the table below you can see all performances of the TTC (Lidar and Camera) calculation for all combinations of detector and descriptor. As can be seen, Harris and the ORB detector produce unreliable results. In SHITOMASI in contrast, the difference between camera and lidar is very low. Likewise, combinations with the AKAZE detector produced very good values.
You can see the performance of detectors and identifiers for TTC (Lidar and Camera) in more detail in the table below. Also, frame by frame differences are given. If the detector/identifier failed to produce a result, this is indicated by N/A and nan.
Summary
Detector | SHITOMASI | SHITOMASI | SHITOMASI | SHITOMASI | HARRIS | HARRIS | HARRIS | HARRIS | FAST | FAST | FAST | FAST | BRISK | BRISK | BRISK | BRISK | AKAZE | AKAZE | AKAZE | AKAZE | AKAZE |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Descriptor | BRISK | BRIEF | ORB | FREAK | BRISK | BRIEF | ORB | FREAK | BRISK | BRIEF | ORB | FREAK | BRISK | BRIEF | ORB | FREAK | BRISK | BRIEF | ORB | FREAK | AKAZE |
Average | 2.19 | 5.22 | 4.03 | 2.50 | 29.80 | 14.25 | 16.03 | 17.72 | 1.86 | 4.87 | 7.42 | 4.30 | 3.87 | 5.53 | 3.72 | 4.39 | 3.22 | 2.82 | 2.45 | 2.42 | 1.82 |
Number of Valid TTC | 17 | 18 | 18 | 17 | 9 | 9 | 8 | 8 | 16 | 18 | 18 | 18 | 18 | 18 | 18 | 18 | 18 | 18 | 18 | 18 | 18 |
Welcome to the final project of the camera course. By completing all the lessons, you now have a solid understanding of keypoint detectors, descriptors, and methods to match them between successive images. Also, you know how to detect objects in an image using the YOLO deep-learning framework. And finally, you know how to associate regions in a camera image with Lidar points in 3D space. Let's take a look at our program schematic to see what we already have accomplished and what's still missing.
In this final project, you will implement the missing parts in the schematic. To do this, you will complete four major tasks:
- First, you will develop a way to match 3D objects over time by using keypoint correspondences.
- Second, you will compute the TTC based on Lidar measurements.
- You will then proceed to do the same using the camera, which requires to first associate keypoint matches to regions of interest and then to compute the TTC based on those matches.
- And lastly, you will conduct various tests with the framework. Your goal is to identify the most suitable detector/descriptor combination for TTC estimation and also to search for problems that can lead to faulty measurements by the camera or Lidar sensor. In the last course of this Nanodegree, you will learn about the Kalman filter, which is a great way to combine the two independent TTC measurements into an improved version which is much more reliable than a single sensor alone can be. But before we think about such things, let us focus on your final project in the camera course.
- cmake >= 2.8
- All OSes: click here for installation instructions
- make >= 4.1 (Linux, Mac), 3.81 (Windows)
- Linux: make is installed by default on most Linux distros
- Mac: install Xcode command line tools to get make
- Windows: Click here for installation instructions
- Git LFS
- Weight files are handled using LFS
- Install Git LFS before cloning this Repo.
- OpenCV >= 4.1
- This must be compiled from source using the
-D OPENCV_ENABLE_NONFREE=ON
cmake flag for testing the SIFT and SURF detectors. - The OpenCV 4.1.0 source code can be found here
- This must be compiled from source using the
- gcc/g++ >= 5.4
- Linux: gcc / g++ is installed by default on most Linux distros
- Mac: same deal as make - install Xcode command line tools
- Windows: recommend using MinGW
- Clone this repo.
- Make a build directory in the top level project directory:
mkdir build && cd build
- Compile:
cmake .. && make
- Run it:
./3D_object_tracking
.