so I am working on a robotics project where we have to recognize a pattern on a wall and position our robot accordingly. I developed this image processing code on my laptop that grabbed an image, converted it to HSV, applied a bit-wise mask, used Canny edge Detection, and found contours. I thought I could just copy and paste the code onto a raspberry pi 3; however, because of the decreased processing power, the fps is less than 1. I have been trying to segregate the code into threads, so I can have one thread that captures the images, one thread that converts the image to HSV and filters it, and one thread to do contour fitting. In order to have these communicate with each other, I have made queues.
Here is my initial vision code:
import numpy as np
import cv2
import time
import matplotlib.pyplot as plt
import sys
def onmouse(k, x, y, s, p):
global hsv
if k == 1: # left mouse, print pixel at x,y
print(hsv[y, x])
def distance_to_camera(Kwidth, focalLength, pixelWidth):
return (Kwidth * focalLength) / pixelWidth
def contourArea(contours):
area = []
for i in range(0,len(contours)):
area.append([cv2.contourArea(contours[i]),i])
area.sort()
if(area[len(area) - 1] >= 5 * area[0]):
return area[len(area)-1]
else: return 0
if __name__ == '__main__':
cap = cv2.VideoCapture(0)
"""
cap.set(3, 1920)
cap.set(4, 1080)
cap.set(5, 30)
time.sleep(2)
cap.set(15, -8.0)
"""
KNOWN_WIDTH = 18
# focalLength = focalLength = (rect[1][1] * 74) / 18
focalLength = 341.7075686984592
distance_data = []
counter1 = 0
numFrames = 100
samples = 1
start_time = time.time()
while (samples < numFrames):
# Capture frame-by-frame
ret, img = cap.read()
length1, width1, channels = img.shape
img = cv2.GaussianBlur(img, (5, 5), 0)
hsv = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2HSV)
# lower_green = np.array([75, 200, 170])
# lower_green = np.array([53,180,122])
#lower_green = np.array([70, 120, 120])
lower_green = np.array([70, 50, 120])
upper_green = np.array([120, 200, 255])
#upper_green = np.array([120, 200, 255])
mask = cv2.inRange(hsv, lower_green, upper_green)
res = cv2.bitwise_and(hsv, hsv, mask=mask)
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(res, 35, 125)
im2, contours, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if (len(contours) > 1):
area,place = contourArea(contours)
#print(area)
if(area != 0):
# print("Contxours: %d" % contours.size())
# print("Hierarchy: %d" % hierarchy.size())
c = contours[place]
cv2.drawContours(img, c, -1, (0, 0, 255), 3)
cv2.drawContours(edged,c, -1, (255, 0, 0), 3)
perimeter = cv2.arcLength(c, True)
M = cv2.moments(c)
cx = 0
cy = 0
if (M['m00'] != 0):
cx = int(M['m10'] / M['m00']) # Center of MASS Coordinates
cy = int(M['m01'] / M['m00'])
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img, [box], 0, (255, 0, 0), 2)
cv2.circle(img, (cx, cy), 7, (0, 0, 255), -1)
cv2.line(img, (int(width1 / 2), int(length1 / 2)), (cx, cy), (255, 0, 0), 2)
if(rect[1][1] != 0):
inches = distance_to_camera(KNOWN_WIDTH, focalLength, rect[1][1])
#print(inches)
distance_data.append(inches)
counter1+=1
samples+=1
"""
cv2.namedWindow("Image w Contours")
cv2.setMouseCallback("Image w Contours", onmouse)
cv2.imshow('Image w Contours', img)
cv2.namedWindow("HSV")
cv2.setMouseCallback("HSV", onmouse)
cv2.imshow('HSV', edged)
if cv2.waitKey(1) & 0xFF == ord('x'):
break
"""
# When everything done, release the capture
totTime = time.time() - start_time
print("--- %s seconds ---" % (totTime))
print('----%s fps ----' % (numFrames/totTime))
cap.release()
cv2.destroyAllWindows()
--- 13.469419717788696 seconds ---
----7.42422480665093 fps ----
plt.plot(distance_data)
plt.xlabel('TimeData')
plt.ylabel('Distance to Target(in) ')
plt.title('Distance vs Time From Camera')
plt.show()
This is my threaded code, which grabs frames in the main and filters it in another thread; I would like to have another thread for contour fitting, but even with these two processes the threaded code has nearly the same FPS as the previous code. Also these results are from my laptop, not the raspberry pi.
import cv2
import threading
import datetime
import numpy as np
import queue
import time
frame = queue.Queue(0)
canny = queue.Queue(0)
lower_green = np.array([70, 50, 120])
upper_green = np.array([120, 200, 255])
class FilterFrames(threading.Thread):
def __init__(self,threadID,lock):
threading.Thread.__init__(self)
self.lock = lock
self.name = threadID
self.setDaemon(True)
self.start()
def run(self):
while(True):
img1 = frame.get()
img1 = cv2.GaussianBlur(img1, (5, 5), 0)
hsv = cv2.cvtColor(img1.copy(), cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_green, upper_green)
res = cv2.bitwise_and(hsv, hsv, mask=mask)
edged = cv2.Canny(res, 35, 125)
canny.put(edged)
if __name__ == '__main__':
lock = threading.Lock()
numframes = 100
frames = 0
cap = cv2.VideoCapture(0)
filter = FilterFrames(lock=lock, threadID='Filter')
start_time = time.time()
while(frames < numframes):
ret,img = cap.read()
frame.put(img)
frames+=1
totTime = time.time() - start_time
print("--- %s seconds ---" % (totTime))
print('----%s fps ----' % (numframes/totTime))
"""
Results were:
--- 13.590131759643555 seconds ---
----7.358280388197121 fps ----
"""
cap.release()
I was wondering if there is something I am doing wrong, whether the access of the queues is slowing down the code, and if I should be using the multiprocessing module instead of threading for this application.