Skip to content

Commit

Permalink
switching updated - signal extraction
Browse files Browse the repository at this point in the history
  • Loading branch information
alirezaahmadi committed May 18, 2022
1 parent c2c941a commit d6393eb
Show file tree
Hide file tree
Showing 4 changed files with 127 additions and 134 deletions.
24 changes: 10 additions & 14 deletions configs/params.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
frontColor_topic: '/nav_front_rs/camera/color/image_raw'
frontDepth_topic: '/nav_front_rs/camera/depth/image_raw'
frontCameraInfo_topic: '/nav_front_rs/camera/color/camera_info'

backColor_topic: '/nav_back_rs/camera/color/image_raw'
backDepth_topic: '/nav_back_rs/camera/depth/image_raw'
backCameraInfo_topic: '/nav_back_rs/camera/color/camera_info'
Expand All @@ -14,63 +13,60 @@ backCameraInfo_topic: '/nav_back_rs/camera/color/camera_info'
# frontColor_topic: '/phenobot/rs_nav_front/color/image_raw'
# frontDepth_topic: '/phenobot/rs_nav_front/aligned_depth_to_color/image_raw'
# frontCameraInfo_topic: '/phenobot/rs_nav_front/color/camera_info'

# backColor_topic: '/phenobot/rs_nav_back/color/image_raw'
# backDepth_topic: '/phenobot/rs_nav_back/aligned_depth_to_color/image_raw'
# backCameraInfo_topic: '/phenobot/rs_nav_back/color/camera_info'

# in experiments of the paper we use neither odom nor IMU !
# using Odomtery
useOdom: False
odomTopic: /odometry/base_raw
# use IMU for maintaining orinetation
useImu: False
imuTopic: /imu/orientation
#pub
# simulation
cmd_vel_topic: /phenobot/twist_mux/cmd_vel
# thorvlad
# cmd_vel_topic: /twist_mux/cmd_vel

# node params
update rate: 30
queue_size: 5

# processed image size scale
imgResizeRatio: 100
# for filtering contours
minContourArea: 10



# stationary debug mode, (without publishing velocity)
stationaryDebug: False
# run time params
# Mode 1: Driving forward with front camera (starting mode)
# Mode 2: Driving forward with back camera
# Mode 3: Driving backwards with back camera
# Mode 4: Driving backwards with front camera
navigationMode: 2

# angular velocity scaler
maxOmega: 0.05
minOmega: 0.01
omegaScaler: 0.1
maxLinearVel: 0.5
minLinearVel: 0.01

# Parameter for setting the number of rows to pass in one switch
linesToPass: 1
# Max Offset of the Window for extracting the turn features
maxMatchingDifference: 100
# Min Offset of the Window for extracting the turn features
minMatchingDifference: 0
# Threshold for keypoints
matchingKeypointsNum: 10

minKeypointNum: 10
# scanner window params
scanSteps: 5
scanStartPoint: 0
scanEndPoint: 1280
scanWindowWidth: 128

# tracker params
trackingBoxWidth: 200
topOffset: 0
bottomOffset: 0
sacleRatio: 0.4

# for dividing bushy rows
maxCoutourHeight: 120
# in case of using bigger size image size, we suggest to set ROI
Expand Down
173 changes: 82 additions & 91 deletions scripts/featureMatching.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,38 +5,83 @@
import itertools
import copy
from cv_bridge import CvBridge
import matplotlib.pyplot as plt
from scipy.signal import find_peaks

from shapely.geometry import Point
from shapely.geometry.polygon import Polygon

class featureMatching:
def __init__(self, featureParams):
self.featureParams = featureParams
self.simDistVec= []
self.meanVec = []
self.count = 0

def sampleCropRowFeatures(self, mode, rgbImg, greenIDx, mask, wLocs):
_greenIDx = greenIDx.copy()
def sampleCropRowFeatures(self, mode, rgbImg, greenIDx, binaryMask, wLocs):
_rgbImg = rgbImg.copy()
_rgbImgMasked = np.zeros(np.shape(_rgbImg), np.uint8)

# get the correct window depending on the current mode
rowId = None
if mode == 3:
rowId = 0
else:
rowId = -1
self.refWindowLoc = wLocs[rowId]
# int_coords = lambda x: np.array(x).round().astype(np.int32)
# exterior = np.array([int_coords(wLocs[rowId].exterior.coords)])
# bboxImg = self.cropBboxFromImage(_greenIDx, exterior)
idx = (mask!=0)
_rgbImgMasked[idx] = _rgbImg[idx]
self.refKeypoints, self.refDescriptors = self.detectTrackingFeatures(_rgbImgMasked)

self.drawKeyPoints(_rgbImg, self.refKeypoints)

def detectNewCropRow(self, mode, greenIDx, mask, wLocs):
# get masked rgb Image (ExG masked)
rgbMasked = self.maskRgb(rgbImg, binaryMask)
# detect keypoints in image
self.refKeypoints, self.refDescriptors = self.detectTrackingFeatures(rgbMasked)
# filter out features in desired window location
self.refKeypoints, self.refDescriptors = self.filterKeypoints(self.refKeypoints,
self.refDescriptors,
self.refWindowLoc)
# draw keypoints on rgb Image
self.drawKeyPoints(_rgbImg, self.refKeypoints, [255,0,0])

def detectNewCropLane(self, mode, rgbImg, greenIDx, binaryMask, wLocs, numofCropRows):
_rgbImg = rgbImg.copy()
# get masked rgb Image (ExG masked)
rgbMasked = self.maskRgb(rgbImg, binaryMask)
# find new crop rows and compare to old ones!
# self.matchTrackingFeatures()
return False, None

# extract keypoints and descriptors from new detected line
srcKeypoints, srcDescriptors = self.detectTrackingFeatures(rgbMasked)
# filter in desired window
windowLoc = self.refWindowLoc
# filter out features in desired window location
srcKeypoints, srcDescriptors = self.filterKeypoints(srcKeypoints, srcDescriptors, windowLoc)
# find matches between ref and src keypoints
matches = self.matchTrackingFeatures(srcKeypoints, srcDescriptors)
# if not that indicates that the window is between two lines
print(self.count, len(matches))

# draw keypoints on rgb Image
self.drawKeyPoints(_rgbImg, srcKeypoints, [0,255,0])

self.simDistVec = list(self.simDistVec)
self.simDistVec.append(len(matches))

peaks = []
if self.count > 100:
self.simDistVec = np.negative(self.simDistVec)
peaks, _ = find_peaks(self.simDistVec, prominence=2, height=-5)
plt.plot(self.simDistVec)
pickPoses = [self.simDistVec[p] for p in peaks]
plt.plot(peaks, pickPoses , "x")
plt.plot(np.zeros_like(self.simDistVec), "--", color="gray")
plt.show()

self.count+=1
return len(peaks) == numofCropRows

def maskRgb(self, rgbImg, mask):
maskedRgb = np.zeros(np.shape(rgbImg), np.uint8)
idx = (mask!=0)
maskedRgb[idx] = rgbImg[idx]
return maskedRgb

def cropBboxFromImage(self, image, bbox):
imgWHeight, imgWidth = np.shape(image)
Expand All @@ -50,15 +95,25 @@ def cropBboxFromImage(self, image, bbox):
# cv.destroyAllWindows()
return bboxImg

def drawKeyPoints(self, rgbImg, keyPoints):
def filterKeypoints(self, keyPoints, descriptors, polygon):
filteredKeypoints = []
filteredDescriptors = []
for kp, des in itertools.izip(keyPoints, descriptors):
if polygon.contains(Point(kp.pt[0], kp.pt[1])):
filteredKeypoints.append(kp)
filteredDescriptors.append(des)
return filteredKeypoints, filteredDescriptors

def drawKeyPoints(self, rgbImg, keyPoints, color=[0, 0, 255], imShow=False):
for kp in keyPoints:
ptX = kp.pt[0]
ptY = kp.pt[1]
rgbImg[int(ptY)-3:int(ptY)+3, int(ptX) -
3:int(ptX)+3] = [0, 0, 255]
cv.imshow("test", rgbImg)
cv.waitKey(0)
cv.destroyAllWindows()
3:int(ptX)+3] = color
if imShow:
cv.imshow("test", rgbImg)
cv.waitKey(0)
cv.destroyAllWindows()
return rgbImg

def detectTrackingFeatures(self, greenIDX):
Expand Down Expand Up @@ -87,90 +142,26 @@ def detectTrackingFeatures(self, greenIDX):
for kp, desc in itertools.izip(keyPointsRaw, descriptorsRaw):
matchingKeypoints.append(kp)
featureDescriptors.append(desc)

print(len(matchingKeypoints), " Key Points in the first window were detected")
return matchingKeypoints, featureDescriptors

def matchTrackingFeatures(self, greenIDX, refDescriptors):
def matchTrackingFeatures(self, srcKeypoints, srcDescriptors):
"""Function to compute the features in the second window
"""
# Initiate SIFT detector
sift = cv.xfeatures2d.SIFT_create()
# get sift key points
keyPointsRaw, descriptorsRaw = sift.detectAndCompute(greenIDX, None)
keyPtsCurr = []
descriptorsCurr = []
# get the correct window depending on the current mode
if mode == 3:
windowLoc = self.wLocs[1]
else:
windowLoc = self.wLocs[-2]
# maintain the keypoints lying in the second window
for kp, desc in itertools.izip(keyPointsRaw, descriptorsRaw):
ptX = kp.pt[0]
ptY = kp.pt[1]
# if the computed keypoint is in the second window keep it
if ptX > (windowLoc - self.turnWindowWidth) and ptX < (windowLoc + self.turnWindowWidth):
if ptY > self.max_matching_dif_features and ptY < (self.imgWidth - self.min_matching_dif_features):
keyPtsCurr.append(kp)
descriptorsCurr.append(desc)
# plot the key Points in the current image
self.primaryImg[int(ptY)-3:int(ptY)+3,
int(ptX)-3:int(ptX)+3] = [255, 0, 0]

# Check if there's a suitable number of key points
good = []
if len(keyPtsCurr) > self.matching_keypoints_th:
qualifiedMatches = []
if len(srcKeypoints) > self.featureParams["minKeypointNum"]:
# compute the matches between the key points
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(np.asarray(
refDescriptors, np.float32), np.asarray(descriptorsCurr, np.float32), k=2)

self.refDescriptors, np.float32), np.asarray(srcDescriptors, np.float32), k=2)
# search for good matches (Lowes Ratio Test)
for m, n in matches:
if m.distance < 0.8*n.distance:
good.append(m)

# if not that indicates that the window is between two lines
else:
print('Not enough key points for matching for the ',
self.nrNoPlantsSeen, ' time')
self.nrNoPlantsSeen += 1
print('# no plants seen:', self.nrNoPlantsSeen)
if self.nrNoPlantsSeen > self.smoothSize:
self.noPlantsSeen = True
# cv bridge
self.bridge = CvBridge()
# publish processed image
rosIMG = self.bridge.cv2_to_imgmsg(self.primaryImg, encoding='rgb8')
self.numVec[self.count] = len(good)
self.count += 1
if self.count > self.smoothSize:
self.meanVec[self.count] = sum(
self.numVec[self.count-(self.smoothSize+1):self.count])/self.smoothSize

# if the smoothed mean is descending the first row has passed the second window
if self.meanVec[self.count] < self.meanVec[self.count-1] and self.meanVec[self.count] > self.matching_keypoints_th and self.noPlantsSeen:
self.cnt += 1
if self.cnt >= self.matching_keypoints_th:
self.newDetectedRows += 1
# if enough rows are passed
if self.newDetectedRows == self.featureParams["linesToPass"]:
self.cnt = 0
print("All rows passed")
return True, rosIMG
else:
print(self.newDetectedRows, " row(s) passed")
self.cnt = 0
# compute the new features in the new first window
self.detectTrackingFeatures()
return False, rosIMG
else:
print("No row passed, continuing")
return False, rosIMG
qualifiedMatches.append(m)
else:
print("No row passed, continuing")
return False, rosIMG
print('Not enough key points for matching for matching')

return qualifiedMatches
5 changes: 2 additions & 3 deletions scripts/imageProc.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
from featureMatching import *
from movingVariance import *


class imageProc:
def __init__(self, scannerParams, contourParams, roiParams, trackerParams):
"""# class FeatureExtractor to extract the the line Features: bottom point, angle
Expand All @@ -37,7 +36,7 @@ def __init__(self, scannerParams, contourParams, roiParams, trackerParams):
self.reset()

def reset(self):
print("**************Reset!!!!")
print("**************Reset*************")
self.count = 0
self.pointsInTop = 0
self.pointsInBottom = 0
Expand Down Expand Up @@ -255,7 +254,7 @@ def findLinesInImage(self):
xM, xB = getLineRphi(ptsFlip)
t_i, b_i = lineIntersectImgUpDown(xM, xB, self.imgHeight)
l_i, r_i = lineIntersectImgSides(xM, xB, self.imgHeight)
print("row ID:", boxIdx, t_i, b_i, l_i, r_i )
# print("row ID:", boxIdx, t_i, b_i, l_i, r_i )
# if the linefit does not return None and the line-image intersections
# are within the image bounds
if xM is not None and b_i >= 0 and b_i <= self.imgWidth:
Expand Down
Loading

0 comments on commit d6393eb

Please sign in to comment.