Major changes occurred for Bullseye version of Raspian for the CSI camera. USB cameras still work the same.
To enable old camera interface you need to enable legacy camera support. In shell window type
sudo raspi-config
Then select 3 Interface Options then I1 Legacy Camera then <Yes>, <Ok> and <Finish>
You can either use my camera utility routines or use the examples below.
My utilities can be installed using pip install camera-util but you really also need to example file from https://github.com/uutzinger/camera and in particular the capture_display.py one. Using that example should allow you to set any parameters of the camera you would want. See below
If you prefer simpler code see next section.
Untested code so far. Please make sure spaces are converted to tabs or tabs to spaces when you copy paste the code below.
# Open Computer Vision and Numpy
import cv2
import numpy as np
# Set Variables and Constants
camera_num =0
width = 320
height = 240
fps = 30
exposure = 10000 # in microseconds
autoexposure = 0 # 0 will enable it
buffer = 1
# fourcc = "YU12"
# Open the camera
cam = cv2.VideoCapture(camera_num, apiPreference=cv2.CAP_V4L2)
# Apply settings to camera
cam.set(cv2.CAP_PROP_FRAME_HEIGTH, height)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, width)
# cam.set(cv2.CAP_PROP_EXPOSURE, exposure)
cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, autoexposure)
cam.set(cv2.CAP_PROP_FPS, fps)
# This might not be needed
cam.set(cv2.CAP_PROP_BUFFERSIZE, buffer)
# cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(fourcc[0],fourcc[1],fourcc[2],fourcc[3]))
# display
interval_display = 2
# Mark location of object
# Radius of circle
radius = 20
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 2
# On some cameras you want to empty the camera buffer first
ret = True
img = np.zeros((height,width), dtype= np.uint8) # make sure width and height is correct
i=0
# Main Loop
while True:
ret, img = cam.read()
# Your code goes here
# ... Image Pipeline
# ... Do something with meArm
# Display images, but not all of them
if i > interval_display:
img = cv2.circle(img, (x,y), radius, color, thickness) # cherck if x,y is correct order
cv2.imshow('Frame',img)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
i = 0
else: i=+1
The raspberry pi camera version 1 module has following options
320x240 max 120 fps, likley only 90 works
640x480 max 90fps
1024x768 max 30 fps
1280x720 max 60fps
960x582 max 45fps
1920x1080 30 max 30fps
2592x1944 5 max 15fps
import cv2
################################################################
# Constants
###########
# HSV Threshold for blue
hue = [98, 125]
sat = [131, 255.0]
val = [33, 242]
# HSV Threshold for yellow
# hue = [6, 34]
# sat = [159, 255]
# val = [128, 242]
# Erode
kernel = None
anchor = (-1, -1)
iterations = 1
border_type = cv2.BORDER_CONSTANT
border_value = (-1)
# Find Contours
external_only = False
# Filter Contours
min_area = 300.0 # Useful
min_perimeter = 0
min_width = 0
max_width = 1000
min_height = 0
max_height = 1000
solidity = [0.0, 100]
max_vertices = 100.0 # Useful
min_vertices = 0
min_ratio = 0
max_ratio = 1000
###############################################################
# Pipeline
# Convert Color Space, Threshold Color, Erode Noise
img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_thresh = cv2.inRange(img_HSV, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))
img_er = cv2.erode(img_thresh, kernel, anchor, iterations = (int) (iterations +0.5),
borderType = border_type, borderValue = border_value)
# Find contours
if(external_only):
mode = cv2.RETR_EXTERNAL
else:
mode = cv2.RETR_LIST
method = cv2.CHAIN_APPROX_SIMPLE
contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
# Filter the contours
output = []
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
# Disable filter by height and width
#if (w < min_width or w > max_width):
# continue
#if (h < min_height or h > max_height):
# continue
area = cv2.contourArea(contour)
if (area < min_area):
continue
# Disable filter by circumference
#if (cv2.arcLength(contour, True) < min_perimeter):
# continue
# Dsiable filter by solidty
#hull = cv2.convexHull(contour)
#solid = 100 * area / cv2.contourArea(hull)
#if (solid < solidity[0] or solid > solidity[1]):
# continue
if (len(contour) < min_vertices or len(contour) > max_vertices):
continue
# Disable filter by w/h ratio
#ratio = (float)(w) / h
#if (ratio < min_ratio or ratio > max_ratio):
# continue
# Keep the contour that passed filter, keep only center of contour
output.append((x+w/2,y+h/2))
Tested reference code.
Works on Raspi bulleseye with legacy camera enabled.
Sends ball location with zmq to other python program.
# Open Computer Vision and Numpy
import cv2
import numpy as np
import time
import zmq # install with: sudo pip3 install pyzmq
import pickle
# ZMQ
# ---
port = 5555 # This is the network port
class Point(object):
def __init__(self, x=0., y=0.):
self.x = x
self.y = y
ball_loc = Point()
# Open ZMQ
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:%s" % port)
# Camera Settings
# ---------------
camera_num =0
width = 320
height = 240
center = (160,120)
roi_xstart = 160-5
roi_xend = 160+5
roi_ystart = 120-5
roi_yend = 120+5
fps = 30
displayInterval = 1./fps
autoexposure = 0 # 0 will enable it
buffer = 1
# exposure = 10000 # in microseconds
# fourcc = "YU12"
# Object Detection
# ---------------
# HSV Threshold for blue
bhue = [85, 125]
bsat = [150, 255.0]
bval = [33, 160]
# HSV Threshold for yellow
yhue = [10, 45]
ysat = [80, 255]
yval = [80, 255]
# Erode
kernel = None
anchor = (-1, -1)
iterations = 1
border_type = cv2.BORDER_CONSTANT
border_value = (-1)
# Find Contours
external_only = False
# Filter Contours
min_area = 300.0 # Useful
min_perimeter = 0
min_width = 0
max_width = 1000
min_height = 0
max_height = 1000
solidity = [0.0, 100]
max_vertices = 100.0 # Useful
min_vertices = 0
min_ratio = 0
max_ratio = 1000
# Display
display_fps = 10
# Mark location of object
# Radius of circle
radius = 5
# Blue color in BGR
blue = (255, 0, 0)
yellow = (255, 255, 0)
white = (255,255,255)
# Line thickness of 2 px
thickness = 2
font = cv2.FONT_HERSHEY_SIMPLEX
textLocation1 = (10,20)
textLocation2 = (10,40)
textLocation3 = (10,60)
textLocation4 = (10,80)
fontScale = 0.5
fontColor = white
lineType = 2
# Open the camera
cam = cv2.VideoCapture(camera_num, apiPreference=cv2.CAP_V4L2)
# Apply settings to camera
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, autoexposure)
cam.set(cv2.CAP_PROP_FPS, fps)
cam.set(cv2.CAP_PROP_BUFFERSIZE, buffer)
# Main Loop
img = np.zeros((height,width), dtype= np.uint8) # make sure width and height is correct
computeTime = 0.
lastTime = time.perf_counter()
stop = False
while not stop:
currentTime = time.perf_counter()
ret, img = cam.read()
# Convert Color Space, Threshold Color, Erode Noise
img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Yellow Threshold
imgy_thresh = cv2.inRange(img_HSV, (yhue[0], ysat[0], yval[0]), (yhue[1], ysat[1], yval[1]))
imgy_er = cv2.erode(imgy_thresh, kernel, anchor, iterations = (int) (iterations +0.5),
borderType = border_type, borderValue = border_value)
imgb_thresh = cv2.inRange(img_HSV, (bhue[0], bsat[0], bval[0]), (bhue[1], bsat[1], bval[1]))
# Blue Threshold
imgb_er = cv2.erode(imgb_thresh, kernel, anchor, iterations = (int) (iterations +0.5),
borderType = border_type, borderValue = border_value)
# Find contours
if(external_only):
mode = cv2.RETR_EXTERNAL
else:
mode = cv2.RETR_LIST
method = cv2.CHAIN_APPROX_SIMPLE
contoursy, hierarchy =cv2.findContours(imgy_er, mode=mode, method=method)
contoursb, hierarchy =cv2.findContours(imgb_er, mode=mode, method=method)
# Filter the contours
outputy = []
for contour in contoursy:
x,y,w,h = cv2.boundingRect(contour)
# Disable filter by height and width
#if (w < min_width or w > max_width):
# continue
#if (h < min_height or h > max_height):
# continue
area = cv2.contourArea(contour)
if (area < min_area):
continue
# Disable filter by circumference
#if (cv2.arcLength(contour, True) < min_perimeter):
# continue
# Dsiable filter by solidty
#hull = cv2.convexHull(contour)
#solid = 100 * area / cv2.contourArea(hull)
#if (solid < solidity[0] or solid > solidity[1]):
# continue
if (len(contour) < min_vertices or len(contour) > max_vertices):
continue
# Disable filter by w/h ratio
#ratio = (float)(w) / h
#if (ratio < min_ratio or ratio > max_ratio):
# continue
# Keep the contour that passed filter, keep only center of contour
outputy.append((int(x+w/2),int(y+h/2), area, len(contour)))
# Filter the contours
outputb = []
for contour in contoursb:
x,y,w,h = cv2.boundingRect(contour)
# Disable filter by height and width
#if (w < min_width or w > max_width):
# continue
#if (h < min_height or h > max_height):
# continue
area = cv2.contourArea(contour)
if (area < min_area):
continue
# Disable filter by circumference
#if (cv2.arcLength(contour, True) < min_perimeter):
# continue
# Disable filter by solidty
#hull = cv2.convexHull(contour)
#solid = 100 * area / cv2.contourArea(hull)
#if (solid < solidity[0] or solid > solidity[1]):
# continue
if (len(contour) < min_vertices or len(contour) > max_vertices):
continue
# Disable filter by w/h ratio
#ratio = (float)(w) / h
#if (ratio < min_ratio or ratio > max_ratio):
# continue
# Keep the contour that passed filter, keep only center of contour
outputb.append((int(x+w/2),int(y+h/2),area, len(contour)))
# Display images, but not all of them, only at display interval rate
if currentTime-lastTime > displayInterval:
lastTime=currentTime
roi = img_HSV[roi_ystart:roi_yend, roi_xstart:roi_xend, :]
roi_HSV_sum = np.sum(roi, axis=(0,1))/(roi_yend-roi_ystart+1)/(roi_xend-roi_xstart+1)
roi_HSV_min = np.min(roi, axis=(0,1))
roi_HSV_max = np.max(roi, axis=(0,1))
computeTime = 0.9*computeTime + 0.1*(time.perf_counter() - currentTime)
if (len(outputy) > 0):
for object in outputy:
img = cv2.circle(img, (object[0],object[1]), radius, yellow, thickness)
cv2.putText(img,"Area {:.0f}".format(object[2]), (object[0]+10,object[1]), font, fontScale, yellow, lineType)
cv2.putText(img,"Len {:.0f}".format(object[3]), (object[0]+10,object[1]+10), font, fontScale, yellow, lineType)
if (len(outputb) > 0):
for object in outputb:
img = cv2.circle(img, (object[0],object[1]), radius, blue, thickness)
cv2.putText(img,"{}".format(object[2]), (object[0]+10,object[1]), font, fontScale, blue, lineType)
cv2.putText(img,"Area {:.0f}".format(object[2]), (object[0]+10,object[1]), font, fontScale, blue, lineType)
cv2.putText(img,"Len {:.0f}".format(object[3]), (object[0]+10,object[1]+10), font, fontScale, blue, lineType)
cv2.putText(img,"{:.2f}ms".format(1000*computeTime), textLocation1, font, fontScale, fontColor, lineType)
cv2.putText(img,"H:{:.0f}, S:{:.0f}, V:{:.0f}".format(roi_HSV_sum[0],roi_HSV_sum[1], roi_HSV_sum[2]), textLocation2, font, fontScale, fontColor, lineType)
cv2.putText(img,"H:{}, S:{}, V:{}".format(roi_HSV_min[0],roi_HSV_min[1], roi_HSV_min[2]), textLocation3, font, fontScale, fontColor, lineType)
cv2.putText(img,"H:{}, S:{}, V:{}".format(roi_HSV_max[0],roi_HSV_max[1], roi_HSV_max[2]), textLocation4, font, fontScale, fontColor, lineType)
cv2.rectangle(img,(roi_xstart,roi_ystart),(roi_xend,roi_yend),white,thickness)
cv2.imshow('Frame',img)
cv2.imshow('Thresh Yellow',imgy_er)
cv2.imshow('Thresh Blue',imgb_er)
key=cv2.waitKey(1)
if key & 0xFF == ord('q'): stop = True
# Post ZeroMQ message if location was requested
try:
message = socket.recv_string(flags=zmq.NOBLOCK) # did we get request for ball location?
if message == "loc":
ball_loc.x = -1
ball_loc.y = -1
if (len(outputy) > 0):
object = outputy[0]
ball_loc.x= object[0]
ball_loc.y= object[1]
if (len(outputb) > 0):
object = outputb[0]
ball_loc.x= object[0]
ball_loc.y= object[1]
p = pickle.dumps(ball_loc) # serialize the ball_loc object
socket.send(p)
except zmq.Again as e:
# print("No request received")
pass