113 lines
4.2 KiB
Python
113 lines
4.2 KiB
Python
# This script uses multi processing and raspividyuv to capture frames @35 fps with canny edge detection
|
|
# To-Do:
|
|
# -set parameter for camera
|
|
# -typos and uniformity
|
|
# -comment things better and make more steps for easier understanding for non-python-people (or maybe proficiency level beginner)
|
|
# -capture led strips and save video
|
|
# -How about OV-Sensor? because putting camera in sensor mode 4 and requesting image with less resolution drops fps.
|
|
# -Ist blitzen auch bei diesem skript da??!?!?!??!?
|
|
|
|
import cv2 as cv
|
|
import numpy as np
|
|
import subprocess as sp
|
|
import time
|
|
import atexit
|
|
|
|
frames = [] # stores the video sequence for the demo
|
|
max_frames =500
|
|
|
|
N_frames = 0
|
|
|
|
# Video capture parameters
|
|
(w,h) = (416,320) # height must be multiple of 32 and width multiple of 16
|
|
colour_channels = 3
|
|
bytesPerFrame = w * h * colour_channels
|
|
fps = 35 # setting to 250 will request the maximum framerate possible
|
|
|
|
sensor_mode = 4
|
|
|
|
# "raspividyuv" is the command that provides camera frames in YUV format
|
|
# "--output -" specifies stdout as the output
|
|
# "--timeout 0" specifies continuous video
|
|
# "--luma" discards chroma channels, only luminance is sent through the pipeline
|
|
# see "raspividyuv --help" for more information on the parameters
|
|
# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview --mode "+str(sensor_mode)
|
|
videoCmd = f"raspividyuv -w {w} -h {h} --output - --timeout 0 --framerate {fps} --rgb --nopreview --mode {sensor_mode}"
|
|
# with sensor mode 38 fps instead of 72 fps --> How about Camera OV-Sensor?
|
|
# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview"
|
|
videoCmd = videoCmd.split() # Popen requires that each parameter is a separate string
|
|
|
|
cameraProcess = sp.Popen(videoCmd, stdout=sp.PIPE) # start the camera
|
|
atexit.register(cameraProcess.terminate) # this closes the camera process in case the python scripts exits unexpectedly
|
|
|
|
# wait for the first frame and discard it (only done to measure time more accurately)
|
|
rawStream = cameraProcess.stdout.read(bytesPerFrame)
|
|
|
|
print("Start...")
|
|
|
|
start_time = time.time()
|
|
|
|
while True:
|
|
cameraProcess.stdout.flush() # discard any frames that we were not able to process in time
|
|
|
|
frame = np.frombuffer(cameraProcess.stdout.read(bytesPerFrame), dtype=np.uint8) # raw NumPy array without JPEG encoding
|
|
|
|
if frame.size != bytesPerFrame:
|
|
print("Error: Camera stream closed unexpectedly")
|
|
break
|
|
frame.shape = (h,w,colour_channels) # set dimensions for numpy array --> from (921600,) to (480,640,3)
|
|
|
|
|
|
# do the processing here with OpenCV
|
|
|
|
frame = cv.cvtColor(frame,cv.COLOR_BGR2RGB) # convert frame to rgb
|
|
|
|
# test
|
|
frame_gs=cv.cvtColor(frame,cv.COLOR_RGB2GRAY)
|
|
frame = cv.Canny(frame_gs, 50,150)
|
|
|
|
|
|
frames.append(frame) # save the frame (for the demo)
|
|
|
|
N_frames += 1
|
|
|
|
#test
|
|
#put text
|
|
font = cv.FONT_HERSHEY_SIMPLEX # font
|
|
fontScale = 1 # fontScale
|
|
color = (255, 255, 255) # Font colour in BGR
|
|
thickness = 1 # Line thickness in px
|
|
|
|
# set text position
|
|
frame_width = int(frame.shape[1])
|
|
frame_height = int(frame.shape[0])
|
|
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
|
|
text_linespacing = 50 # line spacing between two strings in pixels
|
|
# text_start_position_X = int(frame_width/4) # start text from 1/4 of image width
|
|
text_start_position_X = int(0) # start text from left edge of image
|
|
pos_1 = (text_start_position_X, text_start_position_Y)
|
|
text_line_1 = f"Frame: {N_frames}"
|
|
cv.putText(frame, text_line_1, pos_1, font, fontScale, color, thickness, cv.LINE_AA)
|
|
|
|
# if N_frames > max_frames: break #if i deactivate cv.imshow i can control end of program with this parameter.
|
|
|
|
|
|
cv.imshow("Current Frame", frame) # display the image
|
|
pressed_key = cv.waitKey(1) & 0xff
|
|
if pressed_key == ord('q'):
|
|
break
|
|
|
|
|
|
|
|
cv.destroyAllWindows()
|
|
|
|
|
|
end_time = time.time()
|
|
|
|
cameraProcess.terminate() # stop the camera
|
|
|
|
|
|
elapsed_seconds = end_time-start_time
|
|
print(f"Finish! Result: {(N_frames/elapsed_seconds)} fps")
|
|
|