Skip to content

Commit

Permalink
code cleaned up
Browse files Browse the repository at this point in the history
  • Loading branch information
alirezaahmadi committed May 12, 2022
1 parent 7b4b19f commit 1ab0313
Show file tree
Hide file tree
Showing 6 changed files with 444 additions and 473 deletions.
135 changes: 135 additions & 0 deletions scripts/contours.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@


def getCCenter(self, contours):
"""function to compute the center points of the contours
Args:
contours (_type_): contours from image
Returns:
_type_: contours centers
"""
# get center of contours
contCenterPTS = np.zeros((len(contours),2))
for i in range(0, len(contours)):
# get contour
c_curr = contours[i];

# get moments
M = cv.moments(c_curr)

# compute center of mass
if(M['m00'] != 0):
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
contCenterPTS[i,:] = [cy,cx]
# draw point on countur centers
self.processedIMG[cy-3:cy+3,cx-3:cx+3] = [255, 0, 255]

contCenterPTS = contCenterPTS[~np.all(contCenterPTS == 0, axis=1)]
return contCenterPTS


def getClosedRegions(self, binrayMask):
# find contours
contours = cv.findContours(binrayMask, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)[1]

# cv.imshow("crop rows mask",self.mask)
# self.handleKey()

# filter contours based on size
# self.filtered_contours = contours
filtered_contours = list()
for i in range(len(contours)):
if cv.contourArea(contours[i]) > self.min_contour_area:

if self.bushy :
cn_x, cn_y, cnt_w, cn_h = cv.boundingRect(contours[i])

# split to N contours h/max_coutour_height
sub_contours = self.splitContours(contours[i], cn_x, cn_y, cnt_w, cn_h)
for j in sub_contours:
if j != []:
filtered_contours.append(j)
else:
if contours[i] != []:
filtered_contours.append(contours[i])
# else:
# filtered_contours.append(contours[i])
return filtered_contours

def splitContours(self, contour, x, y, w, h):
"""splits larg contours in smaller regions
Args:
contour (_type_): _description_
x (_type_): _description_
y (_type_): _description_
w (_type_): _description_
h (_type_): _description_
Returns:
_type_: sub polygons (seperated countours)
"""
sub_polygon_num = h // self.max_coutour_height
sub_polys = list()
subContour = list()
vtx_idx = list()
# contour = [sorted(contour.squeeze().tolist(), key=operator.itemgetter(0), reverse=True)]
contour = [contour.squeeze().tolist()]
for subPoly in range(1, sub_polygon_num + 1):
for vtx in range(len(contour[0])):
if (subPoly - 1 * self.max_coutour_height) -1 <= contour[0][vtx][1] and \
(subPoly * self.max_coutour_height) -1 >= contour[0][vtx][1] and \
vtx not in vtx_idx:
subContour.append([contour[0][vtx]])
vtx_idx.append(vtx)

sub_polys.append(np.array(subContour))
subContour = list()

return sub_polys

def sortContours(self, cnts, method="left-to-right"):
"""initialize the reverse flag and sort index
Args:
cnts (_type_): _description_
method (str, optional): _description_. Defaults to "left-to-right".
Returns:
_type_: sorted countours, bboxes
"""
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return cnts, boundingBoxes

def getContoursInWindow(self, contourCenters, box):
"""iflters out countours inside a box
Args:
contourCenters (_type_): _description_
box (_type_): _description_
Returns:
_type_: contour centers
"""
points = []
for cnt in range(len(contourCenters[1])):
x, y = contourCenters[0][cnt], contourCenters[1][cnt]
if self.isInBox(list(box), [x, y]):
points.append([x, y])
return points
113 changes: 113 additions & 0 deletions scripts/featureMatching.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
# Function to compute the features of the line which has to be recognized
def detectTrackingFeatures(self, mode):
print("#[INF] detect Tracking Features")
# Initiate SIFT detector
sift = cv.xfeatures2d.SIFT_create()
# find the keypoints and featureDescriptors in the green index image with SIFT
greenIDX = self.getGreenIDX()
# get sift key points
keyPointsRaw, descriptorsRaw = sift.detectAndCompute(greenIDX,None)
self.matchingKeypoints = []
self.featureDescriptors = []
# get the correct window depending on the current mode
if mode == 3:
windowLoc = self.windowLocations[0]
else:
windowLoc = self.windowLocations[-1]
print("sample", self.windowLocations)
# maintain the keypoints lying in the first window
for kp, desc in itertools.izip(keyPointsRaw, descriptorsRaw):
ptX = kp.pt[0]
ptY = kp.pt[1]
# if the computed keypoint is in the first window keep it
if ptX > (windowLoc - 2 * self.turnWindowWidth) and ptX < (windowLoc + 2 * self.turnWindowWidth):
if ptY > self.max_matching_dif_features and ptY < (self.image_size[0] - self.min_matching_dif_features):
self.matchingKeypoints.append(kp)
self.featureDescriptors.append(desc)
# plot the first key points
self.processedIMG[int(ptY)-3:int(ptY)+3,int(ptX)-3:int(ptX)+3] = [0, 0, 255]

print(len(self.matchingKeypoints)," Key Points in the first window were detected")

def matchTrackingFeatures(self, mode):
"""Function to compute the features in the second window
"""
# Initiate SIFT detector
sift = cv.xfeatures2d.SIFT_create()
# find the keypoints and featureDescriptors in the green index image with SIFT
greenIDX = self.getGreenIDX()
# get sift key points
keyPointsRaw, descriptorsRaw = sift.detectAndCompute(greenIDX,None)
keyPtsCurr = []
descriptorsCurr = []
print("compare", self.windowLocations)
# get the correct window depending on the current mode
if mode == 3:
windowLoc = self.windowLocations[1]
else:
windowLoc = self.windowLocations[-2]
# maintain the keypoints lying in the second window
for kp, desc in itertools.izip(keyPointsRaw, descriptorsRaw):
ptX = kp.pt[0]
ptY = kp.pt[1]
# if the computed keypoint is in the second window keep it
if ptX > (windowLoc - self.turnWindowWidth) and ptX < (windowLoc + self.turnWindowWidth):
if ptY > self.max_matching_dif_features and ptY < (self.imgWidth - self.min_matching_dif_features):
keyPtsCurr.append(kp)
descriptorsCurr.append(desc)
# plot the key Points in the current image
self.processedIMG[int(ptY)-3:int(ptY)+3,int(ptX)-3:int(ptX)+3] = [255, 0, 0]

# Check if there's a suitable number of key points
good = []
if len(keyPtsCurr) > self.matching_keypoints_th:
# compute the matches between the key points
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(np.asarray(self.featureDescriptors,np.float32),np.asarray(descriptorsCurr,np.float32),k=2)

# search for good matches (Lowes Ratio Test)
for m,n in matches:
if m.distance < 0.8*n.distance:
good.append(m)

# if not that indicates that the window is between two lines
else:
print('Not enough key points for matching for the ',self.nrNoPlantsSeen, ' time')
self.nrNoPlantsSeen += 1
print('# no plants seen:',self.nrNoPlantsSeen)
if self.nrNoPlantsSeen > self.smoothSize:
self.noPlantsSeen = True
# cv bridge
self.bridge = CvBridge()
# publish processed image
rosIMG = self.bridge.cv2_to_imgmsg(self.processedIMG, encoding='rgb8')
self.numVec[self.count] = len(good)
self.count += 1
if self.count > self.smoothSize:
self.meanVec[self.count] = sum(self.numVec[self.count-(self.smoothSize+1):self.count])/self.smoothSize

# if the smoothed mean is descending the first row has passed the second window
if self.meanVec[self.count]<self.meanVec[self.count-1] and self.meanVec[self.count] > self.matching_keypoints_th and self.noPlantsSeen:
self.cnt += 1
if self.cnt >= self.matching_keypoints_th:
self.newDetectedRows += 1
# if enough rows are passed
if self.newDetectedRows == self.linesToPass:
self.cnt = 0
print("All rows passed")
return True, rosIMG
else:
print(self.newDetectedRows, " row(s) passed")
self.cnt = 0
# compute the new features in the new first window
self.computeTurnFeatures()
return False, rosIMG
else:
print("No row passed, continuing")
return False, rosIMG
else:
print("No row passed, continuing")
return False, rosIMG
93 changes: 93 additions & 0 deletions scripts/geometric.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
import numpy as np

def computeTheta(lineStart, lineEnd):
"""function to compute theta
Args:
lineStart (_type_): start point of line
lineEnd (_type_): end point of line
Returns:
_type_: angle of line
"""
return -(np.arctan2(abs(lineStart[1]-lineEnd[1]), lineStart[0]-lineEnd[0]))

def lineIntersectWin(m, b, imageHeight, topOffset, bottomOffset):
"""function to compute the bottom and top intersect between the line and the window
Args:
m (_type_): slope
b (_type_): bias
Returns:
_type_: to and bottom intersection with a box
"""
# line calculations
b_i = m * bottomOffset + b
t_i = m * (imageHeight - topOffset) + b
return t_i, b_i

def lineIntersectIMG(m, b, imageHeight):
"""function to compute the bottom and top intersect between the line and the image
Args:
m (_type_): slope
b (_type_): bias
Returns:
_type_: top and bottom intersection points on image boarders
"""
# line calculations
b_i = b
t_i = m * imageHeight + b
return t_i, b_i

def lineIntersectY(m, b, y):
""" function to evaluate the estimated line
Args:
m (_type_): slope
b (_type_): bias
y (_type_): Y loc
Returns:
_type_: X loc
"""
# line calculations
x = m * y + b
return x

def lineIntersectSides(m, b, imageWidth):
"""_summary_
Args:
m (_type_): slope
b (_type_): bias
Returns:
_type_: left and right interceptions
"""
l_i = -b / m
r_i = (imageWidth - b)/ m
return l_i, r_i

def isInBox(box, p):
"""checks if point is inside the box
Args:
box (_type_): box
p (_type_): point
Returns:
_type_: True or False
"""
bl = box[0]
tr = box[2]
if (p[0] > bl[0] and p[0] < tr[0] and p[1] > bl[1] and p[1] < tr[1]) :
return True
else :
return False

def getLineInImage(line, imageHeight):
up = [line[1], 0]
down = [line[0], imageHeight]
return up, down

def getLineRphi( xyCords):
"""sets r , phi line
Args:
xyCords (_type_): x, y coordinates of point
Returns:
_type_: r, phi of line
"""
x_coords, y_coords = zip(*xyCords)
coefficients = np.polyfit(x_coords, y_coords, 1)
return coefficients[0], coefficients[1]
Loading

0 comments on commit 1ab0313

Please sign in to comment.