SA Kenan first init

This commit is contained in:
Kenan Gömek 2022-05-09 20:30:26 +02:00
commit 526dafd894
57 changed files with 13582 additions and 0 deletions

View File

@ -0,0 +1,237 @@
# Date: 18.12.2021
# Author: Kenan Gömek
# Das Skript stellt die unterschiedlichen Farbkanäle der aufgenommenen Bilder dar und erstellt schnitte, um die Pixelwerte anzuzeigen.
# Update: 18.02.2021
# Update Comment: Multiprocessing
# Import needed Packages
import cv2 as cv
import numpy as np
import time
from datetime import datetime
import os
from matplotlib import pyplot as plt
from multiprocessing import Process, cpu_count, Pool
# define User Input
# No vowels in path name!
PATH_IMAGE_FOLDER = r'U:\bwsyncshare\Auswertung'
COMMENT = "" # User comment for plot
# define functions
def calc_arithmetic_mean_of_brightness_per_pixel(r, g, b):
"""Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen."""
r = r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765
# the rest will also be uint16 then
image_heigth = r.shape[0]
image_width = r.shape[1]
number_of_colour_channels = 3
arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels)
arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth)
max_possible_brightness = 255 # maximum possible brightness for an uint8
arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness # range: 0-1
return arithmetic_mean_of_brightness_per_pixel_relative
def image_analysis(image_name, image_number, number_of_total_images, comment):
print(f"Current image {image_number+1}/{number_of_total_images}: {image_name}")
# Split and Merge colour channels
img_bgr = cv.imread(os.path.join(PATH_IMAGE_FOLDER,image_name), cv.IMREAD_COLOR)
b,g,r = cv.split(img_bgr)
img_rgb = img_bgr[:, :, ::-1]
# Create Cuts and display Pixel Values
image_width = r.shape[1]
if image_width < 1000:
plot_marker_offset = 4 # Thickness of marker-line in plot in pixels
elif image_width >= 1000:
plot_marker_offset = 10 # Thickness of marker-line in plot in pixels
# Identify dominating colour channel and set cut row and column
max_pixelvalue_red = r.max(); max_pixelvalue_green = g.max(); max_pixelvalue_blue = b.max()
max_pixelvalue = np.max([max_pixelvalue_red, max_pixelvalue_green, max_pixelvalue_blue])
idx_max_pixelvalue_red = np.unravel_index(r.argmax(), r.shape) #row=idx_..[0] column=idx_..[1]
idx_max_pixelvalue_green = np.unravel_index(g.argmax(), g.shape) #row=idx_..[0] column=idx_..[1]
idx_max_pixelvalue_blue = np.unravel_index(b.argmax(), b.shape) #row=idx_..[0] column=idx_..[1]
if max_pixelvalue == max_pixelvalue_red:
idx_max_pixelvalue = idx_max_pixelvalue_red; msg_dominating_colourchannel = 'red'
elif max_pixelvalue == max_pixelvalue_green:
idx_max_pixelvalue = idx_max_pixelvalue_green; msg_dominating_colourchannel = 'green'
elif max_pixelvalue == max_pixelvalue_blue:
idx_max_pixelvalue = idx_max_pixelvalue_blue; msg_dominating_colourchannel = 'blue'
cut_row=idx_max_pixelvalue[0]; cut_column=idx_max_pixelvalue[1]
# Red channel
# Info linspace(start, stop, num):
# stop: The end value of the sequence --> stop is included
# num: Number of samples to generate.
# Because Array stars at index 0: e.g. image with 3280x2464 Pixels: for 3280 Pixels you need 3280 values between 0 and 3279
# check: x_red_h must have 3280 values?? --> Yes from 0 to 3279 in 1 steps
x_red_h = np.linspace(0, r.shape[1]-1, r.shape[1]); y_red_h = r[cut_row,:] # data horizontal cut ->width (e.g.: 3280)
x_red_v = np.linspace(0, r.shape[0]-1, r.shape[0]); y_red_v = r[:,cut_column] # data vertical cut ->height (e.g.: 2464)
msg1_red = f"Maximum Pixel value in red channel: {max_pixelvalue_red}"; print(msg1_red)
msg2_red = f"Index of max Value in red channel (row, colum): {idx_max_pixelvalue_red}"; print(msg2_red)
msg3_red = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_red_h)}"; print(msg3_red)
msg4_red = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_red_v)}"; print(msg4_red)
r_copy = r.copy(); r_copy[cut_row:cut_row+plot_marker_offset,:]=255; r_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot
# Green channel
x_green_h = np.linspace(0, g.shape[1]-1, g.shape[1]); y_green_h = g[cut_row,:] # data horizontal cut
x_green_v = np.linspace(0, g.shape[0]-1, g.shape[0]); y_green_v = g[:,cut_column] # data vertical cut
msg1_green = f"Maximum Pixel value in green channel: {max_pixelvalue_green}"; print(msg1_green)
msg2_green = f"Index of max Value in green channel (row, colum): {idx_max_pixelvalue_green}"; print(msg2_green)
msg3_green = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_green_h)}"; print(msg3_green)
msg4_green = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_green_v)}"; print(msg4_green)
g_copy = g.copy(); g_copy[cut_row:cut_row+plot_marker_offset,:]=255; g_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot
# Blue channel
x_blue_h = np.linspace(0, b.shape[1]-1, b.shape[1]); y_blue_h = b[cut_row,:] # data horizontal cut
x_blue_v = np.linspace(0, b.shape[0]-1, b.shape[0]); y_blue_v = b[:,cut_column] # data vertical cut
msg1_blue = f"Maximum Pixel value in blue channel: {max_pixelvalue_blue}"; print(msg1_blue)
msg2_blue = f"Index of max Value in blue channel (row, colum): {idx_max_pixelvalue_blue}"; print(msg2_blue)
msg3_blue = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_blue_h)}"; print(msg3_blue)
msg4_blue = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_blue_v)}"; print(msg4_blue)
b_copy = b.copy(); b_copy[cut_row:cut_row+plot_marker_offset,:]=255; b_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot
# Create Plots
fig1, ((ax_orig_1,ax01,ax02,ax03),(ax_red_1, ax_red_2, ax_red_3, ax_red_4),
(ax_green_1, ax_green_2, ax_green_3, ax_green_4),(ax_blue_1, ax_blue_2, ax_blue_3, ax_blue_4)) \
= plt.subplots(4, 4, figsize=(30,25))
fig1.suptitle(f'Image: {image_name}', y=0.9)
yticks=np.append(np.arange(0,230,25), 255) # set yticks for cuts
xlim_max_h=r.shape[1] # xlim for horizontal cut. No special reason for choosing red channel.
xlim_max_v=r.shape[0] # xlim for vertical cut. No special reason for choosing red channel.
ax_orig_1.imshow(img_rgb); ax_orig_1.set_title("Original Image");
ax_orig_1.set_xlabel('Width=H=Columns'); ax_orig_1.set_ylabel('Heigth=V=Rows')
# red channel
ax_red_1.imshow(r, cmap = 'gray'); ax_red_1.set_title("Red Channel");
ax_red_1.set_xlabel('Width=H=Columns'); ax_red_1.set_ylabel('Heigth=V=Rows')
ax_red_2.imshow(r_copy, cmap = 'gray'); ax_red_2.set_title("Red Channel - marked");
ax_red_2.set_xlabel('Width=H=Columns'); ax_red_2.set_ylabel('Heigth=V=Rows')
ax_red_3.plot(x_red_h,y_red_h, linewidth=2.0); ax_red_3.set_title("Horizontal Cut");
ax_red_3.grid(True); ax_red_3.set_ylim(ymin=0, ymax=260); ax_red_3.set_yticks(yticks); ax_red_3.set_xlim(0,xlim_max_h)
ax_red_3.set_xlabel('Width=H=Columns'); ax_red_3.set_ylabel('Pixel Value')
ax_red_4.plot(x_red_v,y_red_v, linewidth=2.0); ax_red_4.set_title("Vertical Cut");
ax_red_4.grid(True); ax_red_4.set_ylim(ymin=0, ymax=260); ax_red_4.set_yticks(yticks); ax_red_4.set_xlim(0, xlim_max_v)
ax_red_4.set_xlabel('Heigth=V=Rows'); ax_red_4.set_ylabel('Pixel Value')
# green channel
ax_green_1.imshow(g, cmap = 'gray'); ax_green_1.set_title("Green Channel");
ax_green_1.set_xlabel('Width=H=Columns'); ax_green_1.set_ylabel('Heigth=V=Rows')
ax_green_2.imshow(g_copy, cmap = 'gray'); ax_green_2.set_title("Green Channel - marked");
ax_green_2.set_xlabel('Width=H=Columns'); ax_green_2.set_ylabel('Heigth=V=Rows')
ax_green_3.plot(x_green_h,y_green_h, linewidth=2.0); ax_green_3.set_title("Horizontal Cut");
ax_green_3.grid(True); ax_green_3.set_ylim(ymin=0, ymax=260); ax_green_3.set_yticks(yticks); ax_green_3.set_xlim(0,xlim_max_h)
ax_green_3.set_xlabel('Width=H=Columns'); ax_green_3.set_ylabel('Pixel Value')
ax_green_4.plot(x_green_v,y_green_v, linewidth=2.0); ax_green_4.set_title("Vertical Cut");
ax_green_4.grid(True); ax_green_4.set_ylim(ymin=0, ymax=260); ax_green_4.set_yticks(yticks); ax_green_4.set_xlim(0, xlim_max_v)
ax_green_4.set_xlabel('Heigth=V=Rows'); ax_green_4.set_ylabel('Pixel Value')
# blue channel
ax_blue_1.imshow(b, cmap = 'gray'); ax_blue_1.set_title("Blue Channel");
ax_blue_1.set_xlabel('Width=H=Columns'); ax_blue_1.set_ylabel('Heigth=V=Rows')
ax_blue_2.imshow(b_copy, cmap = 'gray'); ax_blue_2.set_title("Blue Channel - marked");
ax_blue_2.set_xlabel('Width=H=Columns'); ax_blue_2.set_ylabel('Heigth=V=Rows')
ax_blue_3.plot(x_blue_h,y_blue_h, linewidth=2.0); ax_blue_3.set_title("Horizontal Cut");
ax_blue_3.grid(True); ax_blue_3.set_ylim(ymin=0, ymax=260); ax_blue_3.set_yticks(yticks); ax_blue_3.set_xlim(0,xlim_max_h)
ax_blue_3.set_xlabel('Width=H=Columns'); ax_blue_3.set_ylabel('Pixel Value')
ax_blue_4.plot(x_blue_v,y_blue_v, linewidth=2.0); ax_blue_4.set_title("Vertical Cut");
ax_blue_4.grid(True); ax_blue_4.set_ylim(ymin=0, ymax=260); ax_blue_4.set_yticks(yticks); ax_blue_4.set_xlim(0, xlim_max_v)
ax_blue_4.set_xlabel('Heigth=V=Rows'); ax_blue_4.set_ylabel('Pixel Value')
# Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen.
arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(r,g,b)
if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01:
msg1_brightness = f"Overall brightness per pixel: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %" #value in percent
elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01:
msg1_brightness = f"Overall brightness per pixel: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} per mil" # value in promille
print(msg1_brightness)
# add pixel stats under Figure
pixelstats_red= '\n'.join([msg1_red, msg2_red, msg3_red, msg4_red])
pixelstats_green= '\n'.join([msg1_green, msg2_green, msg3_green, msg4_green])
pixelstats_blue= '\n'.join([msg1_blue, msg2_blue, msg3_blue, msg4_blue])
pixelstats_overall_brightness = '\n'.join([msg1_brightness])
pixelstats = '\n\n'.join([f"pixel stats: {msg_dominating_colourchannel} channel dominating",
pixelstats_red, pixelstats_green, pixelstats_blue, pixelstats_overall_brightness])
text_x_pos = 0.1; text_y_pos = -0.015 # text position: 0,0 is lower-left and 1,1 is upper-right)
fig1.text(text_x_pos, text_y_pos, pixelstats, ha='left')
# add Comment under Figure
text_x_pos = 0.5; text_y_pos = -0.025 # text position: 0,0 is lower-left and 1,1 is upper-right)
log_filename=None
try:
log_filename=[f for f in os.listdir(PATH_IMAGE_FOLDER) if f.endswith('.txt')][0]
if log_filename:
with open(os.path.join(PATH_IMAGE_FOLDER,log_filename), encoding='utf-8') as f:
log_text = f.readlines()
if comment != "":
comment = '\nPlot Comment: '+comment
log_text.append(comment)
txt=''.join(log_text)
fig1.text(text_x_pos, text_y_pos, txt, ha='left')
except IndexError:
if comment != "":
comment = '\nPlot Comment: '+comment
fig1.text(text_x_pos, text_y_pos, comment, ha='left')
else:
pass
# handle numpy memmory error on Windows:
switch_overwrite=0 # do not overwrite files, if they exist
if switch_overwrite == 1:
fig1.savefig(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf'), bbox_inches='tight') #save plot
print('Save pdf')
else:
if os.path.isfile(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf')):
pass # skip this pdf file, because it already exists
else:
fig1.savefig(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf'), bbox_inches='tight') #save plot
print('Save pdf')
plt.close('all') # close all figures
print('') # new line for better readability in console
# start
def main():
number_of_CPUS = cpu_count()
image_filenames=[f for f in os.listdir(PATH_IMAGE_FOLDER) if f.endswith('.png')]
image_filenames.sort()
list_numbers = [x for x in range(len(image_filenames))]
number_of_total_images = len(image_filenames)
list_number_of_total_images = [number_of_total_images]*number_of_total_images # list with n times number of total images
list_COMMENT = [COMMENT]*number_of_total_images
print(number_of_total_images)
print(len(list_numbers))
data_to_pass = list(zip(image_filenames, list_numbers, list_number_of_total_images, list_COMMENT))
with Pool(number_of_CPUS) as pool:
pool.starmap(image_analysis, iterable=data_to_pass)
t1 = round(time.perf_counter()/60,2)
print(f'Script finished in {t1} min')
if __name__ == '__main__':
main()

View File

@ -0,0 +1,222 @@
# Date: 18.12.2021
# Author: Kenan Gömek
# Das Skript stellt die unterschiedlichen Farbkanäle der aufgenommenen Bilder dar und erstellt schnitte, um die Pixelwerte anzuzeigen.
# Update:21.12.2021
# Update Comment: Dominierender Farbkanal wird automatisch erkannt und die Zeilen- und Spaltenmarkierungen werden alle nach dem hellsten
# Punkt im dominierenden Farbkanal gerichtet
# Import needed Packages
import cv2 as cv
import numpy as np
import time
from datetime import datetime
import os
from matplotlib import pyplot as plt
# define User Input
# No vowels in path name!'
PATH_IMAGE_FOLDER = r'U:\bwsyncshare\Auswertung'
COMMENT = "" # User Comment for plot
# select cut mode for determining pixel values
cut_mode = 'auto' # options: 'auto', 'manual'
ROW = 152 # Row-Number for Cut [Pixels] for cut_mode 'manual' index begins from 0
COLUMN = 1742 # Column-Number for Cut [Pixels] for cut_mode 'manual' index begins from 0
# define functions
def calc_arithmetic_mean_of_brightness_per_pixel(r, g, b):
"""Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen."""
r = r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765
# the rest will also be uint16 then
image_heigth = r.shape[0]
image_width = r.shape[1]
number_of_colour_channels = 3
arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels)
arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth)
max_possible_brightness = 255 # maximum possible brightness for an uint8
arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness # range: 0-1
return arithmetic_mean_of_brightness_per_pixel_relative
# start
image_filenames = [f for f in os.listdir(PATH_IMAGE_FOLDER) if f.endswith('.png')]
image_filenames.sort()
for image_number, image_name in enumerate(image_filenames):
print(f"Current image {image_number+1}/{len(image_filenames)}: {image_name}")
# Split and Merge colour channels
img_bgr = cv.imread(os.path.join(PATH_IMAGE_FOLDER,image_name), cv.IMREAD_COLOR)
b, g, r = cv.split(img_bgr)
img_rgb = img_bgr[:, :, ::-1]
# Create Cuts and display Pixel Values
image_width = r.shape[1]
if image_width < 1000:
plot_marker_offset = 4 # Thickness of marker-line in plot in pixels
elif image_width >= 1000:
plot_marker_offset = 10 # Thickness of marker-line in plot in pixels
# Identify dominating colour channel and set cut row and column
max_pixelvalue_red = r.max(); max_pixelvalue_green = g.max(); max_pixelvalue_blue = b.max()
max_pixelvalue = np.max([max_pixelvalue_red, max_pixelvalue_green, max_pixelvalue_blue])
idx_max_pixelvalue_red = np.unravel_index(r.argmax(), r.shape) # row=idx_..[0] column=idx_..[1]
idx_max_pixelvalue_green = np.unravel_index(g.argmax(), g.shape) # row=idx_..[0] column=idx_..[1]
idx_max_pixelvalue_blue = np.unravel_index(b.argmax(), b.shape) # row=idx_..[0] column=idx_..[1]
if max_pixelvalue == max_pixelvalue_red:
idx_max_pixelvalue = idx_max_pixelvalue_red; msg_dominating_colourchannel = 'red'
elif max_pixelvalue == max_pixelvalue_green:
idx_max_pixelvalue = idx_max_pixelvalue_green; msg_dominating_colourchannel = 'green'
elif max_pixelvalue == max_pixelvalue_blue:
idx_max_pixelvalue = idx_max_pixelvalue_blue; msg_dominating_colourchannel = 'blue'
if cut_mode == 'auto':
cut_row = idx_max_pixelvalue[0]; cut_column = idx_max_pixelvalue[1]
elif cut_mode == 'manual':
cut_row = ROW; cut_column = COLUMN
else:
print('Wrong cut_mode. End script.'); quit()
# Red channel
# Info linspace(start, stop, num):
# stop: The end value of the sequence --> stop is included
# num: Number of samples to generate.
# Because Array stars at index 0: e.g. image with 3280x2464 Pixels: for 3280 Pixels you need 3280 values between 0 and 3279
# check: x_red_h must have 3280 values?? --> Yes from 0 to 3279 in 1 steps
x_red_h = np.linspace(0, r.shape[1]-1, r.shape[1]); y_red_h = r[cut_row,:] # data horizontal cut ->width (e.g.: 3280)
x_red_v = np.linspace(0, r.shape[0]-1, r.shape[0]); y_red_v = r[:,cut_column] # data vertical cut ->height (e.g.: 2464)
msg1_red = f"Maximum Pixel value in red channel: {max_pixelvalue_red}"; print(msg1_red)
msg2_red = f"Index of max Value in red channel (row, colum): {idx_max_pixelvalue_red}"; print(msg2_red)
msg3_red = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_red_h)}"; print(msg3_red)
msg4_red = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_red_v)}"; print(msg4_red)
r_copy = r.copy(); r_copy[cut_row:cut_row+plot_marker_offset,:]=255; r_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot
# Green channel
x_green_h = np.linspace(0, g.shape[1]-1, g.shape[1]); y_green_h = g[cut_row,:] # data horizontal cut
x_green_v = np.linspace(0, g.shape[0]-1, g.shape[0]); y_green_v = g[:,cut_column] # data vertical cut
msg1_green = f"Maximum Pixel value in green channel: {max_pixelvalue_green}"; print(msg1_green)
msg2_green = f"Index of max Value in green channel (row, colum): {idx_max_pixelvalue_green}"; print(msg2_green)
msg3_green = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_green_h)}"; print(msg3_green)
msg4_green = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_green_v)}"; print(msg4_green)
g_copy = g.copy(); g_copy[cut_row:cut_row+plot_marker_offset,:]=255; g_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot
# Blue channel
x_blue_h = np.linspace(0, b.shape[1]-1, b.shape[1]); y_blue_h = b[cut_row,:] # data horizontal cut
x_blue_v = np.linspace(0, b.shape[0]-1, b.shape[0]); y_blue_v = b[:,cut_column] # data vertical cut
msg1_blue = f"Maximum Pixel value in blue channel: {max_pixelvalue_blue}"; print(msg1_blue)
msg2_blue = f"Index of max Value in blue channel (row, colum): {idx_max_pixelvalue_blue}"; print(msg2_blue)
msg3_blue = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_blue_h)}"; print(msg3_blue)
msg4_blue = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_blue_v)}"; print(msg4_blue)
b_copy = b.copy(); b_copy[cut_row:cut_row+plot_marker_offset,:]=255; b_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot
# Create Plots
fig1, ((ax_orig_1,ax01,ax02,ax03),(ax_red_1, ax_red_2, ax_red_3, ax_red_4),
(ax_green_1, ax_green_2, ax_green_3, ax_green_4),(ax_blue_1, ax_blue_2, ax_blue_3, ax_blue_4)) \
= plt.subplots(4, 4, figsize=(30,25))
fig1.suptitle(f'Image: {image_name}', y=0.9)
yticks=np.append(np.arange(0,230,25), 255) #set yticks for cuts
xlim_max_h=r.shape[1] # xlim for horizontal cut. No special reason for choosing red channel.
xlim_max_v=r.shape[0] # xlim for vertical cut. No special reason for choosing red channel.
ax_orig_1.imshow(img_rgb); ax_orig_1.set_title("Original Image");
ax_orig_1.set_xlabel('Width=H=Columns'); ax_orig_1.set_ylabel('Heigth=V=Rows')
# red channel
ax_red_1.imshow(r, cmap = 'gray'); ax_red_1.set_title("Red Channel");
ax_red_1.set_xlabel('Width=H=Columns'); ax_red_1.set_ylabel('Heigth=V=Rows')
ax_red_2.imshow(r_copy, cmap = 'gray'); ax_red_2.set_title("Red Channel - marked");
ax_red_2.set_xlabel('Width=H=Columns'); ax_red_2.set_ylabel('Heigth=V=Rows')
ax_red_3.plot(x_red_h,y_red_h, linewidth=2.0); ax_red_3.set_title("Horizontal Cut");
ax_red_3.grid(True); ax_red_3.set_ylim(ymin=0, ymax=260); ax_red_3.set_yticks(yticks); ax_red_3.set_xlim(0,xlim_max_h)
ax_red_3.set_xlabel('Width=H=Columns'); ax_red_3.set_ylabel('Pixel Value')
ax_red_4.plot(x_red_v,y_red_v, linewidth=2.0); ax_red_4.set_title("Vertical Cut");
ax_red_4.grid(True); ax_red_4.set_ylim(ymin=0, ymax=260); ax_red_4.set_yticks(yticks); ax_red_4.set_xlim(0, xlim_max_v)
ax_red_4.set_xlabel('Heigth=V=Rows'); ax_red_4.set_ylabel('Pixel Value')
# green channel
ax_green_1.imshow(g, cmap = 'gray'); ax_green_1.set_title("Green Channel");
ax_green_1.set_xlabel('Width=H=Columns'); ax_green_1.set_ylabel('Heigth=V=Rows')
ax_green_2.imshow(g_copy, cmap = 'gray'); ax_green_2.set_title("Green Channel - marked");
ax_green_2.set_xlabel('Width=H=Columns'); ax_green_2.set_ylabel('Heigth=V=Rows')
ax_green_3.plot(x_green_h,y_green_h, linewidth=2.0); ax_green_3.set_title("Horizontal Cut");
ax_green_3.grid(True); ax_green_3.set_ylim(ymin=0, ymax=260); ax_green_3.set_yticks(yticks); ax_green_3.set_xlim(0,xlim_max_h)
ax_green_3.set_xlabel('Width=H=Columns'); ax_green_3.set_ylabel('Pixel Value')
ax_green_4.plot(x_green_v,y_green_v, linewidth=2.0); ax_green_4.set_title("Vertical Cut");
ax_green_4.grid(True); ax_green_4.set_ylim(ymin=0, ymax=260); ax_green_4.set_yticks(yticks); ax_green_4.set_xlim(0, xlim_max_v)
ax_green_4.set_xlabel('Heigth=V=Rows'); ax_green_4.set_ylabel('Pixel Value')
# blue channel
ax_blue_1.imshow(b, cmap = 'gray'); ax_blue_1.set_title("Blue Channel");
ax_blue_1.set_xlabel('Width=H=Columns'); ax_blue_1.set_ylabel('Heigth=V=Rows')
ax_blue_2.imshow(b_copy, cmap = 'gray'); ax_blue_2.set_title("Blue Channel - marked");
ax_blue_2.set_xlabel('Width=H=Columns'); ax_blue_2.set_ylabel('Heigth=V=Rows')
ax_blue_3.plot(x_blue_h,y_blue_h, linewidth=2.0); ax_blue_3.set_title("Horizontal Cut");
ax_blue_3.grid(True); ax_blue_3.set_ylim(ymin=0, ymax=260); ax_blue_3.set_yticks(yticks); ax_blue_3.set_xlim(0,xlim_max_h)
ax_blue_3.set_xlabel('Width=H=Columns'); ax_blue_3.set_ylabel('Pixel Value')
ax_blue_4.plot(x_blue_v,y_blue_v, linewidth=2.0); ax_blue_4.set_title("Vertical Cut");
ax_blue_4.grid(True); ax_blue_4.set_ylim(ymin=0, ymax=260); ax_blue_4.set_yticks(yticks); ax_blue_4.set_xlim(0, xlim_max_v)
ax_blue_4.set_xlabel('Heigth=V=Rows'); ax_blue_4.set_ylabel('Pixel Value')
# Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen.
arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(r,g,b)
if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01:
msg1_brightness = f"Overall brightness per pixel: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %" #value in percent
elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01:
msg1_brightness = f"Overall brightness per pixel: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} per mil" # value in promille
print(msg1_brightness)
# add pixel stats under Figure
pixelstats_red= '\n'.join([msg1_red, msg2_red, msg3_red, msg4_red])
pixelstats_green= '\n'.join([msg1_green, msg2_green, msg3_green, msg4_green])
pixelstats_blue= '\n'.join([msg1_blue, msg2_blue, msg3_blue, msg4_blue])
pixelstats_overall_brightness = '\n'.join([msg1_brightness])
pixelstats = '\n\n'.join([f"pixel stats: {msg_dominating_colourchannel} channel dominating",
pixelstats_red, pixelstats_green, pixelstats_blue, pixelstats_overall_brightness])
text_x_pos = 0.1; text_y_pos = -0.015 # text position: 0,0 is lower-left and 1,1 is upper-right)
fig1.text(text_x_pos, text_y_pos, pixelstats, ha='left')
# add Comment under Figure
text_x_pos = 0.5; text_y_pos = -0.025 # text position: 0,0 is lower-left and 1,1 is upper-right)
log_filename=None
try:
log_filename=[f for f in os.listdir(PATH_IMAGE_FOLDER) if f.endswith('.txt')][0]
if log_filename:
with open(os.path.join(PATH_IMAGE_FOLDER,log_filename), encoding='utf-8') as f:
log_text = f.readlines()
if COMMENT != "":
COMMENT = '\nPlot Comment: '+COMMENT
log_text.append(COMMENT)
txt=''.join(log_text)
fig1.text(text_x_pos, text_y_pos, txt, ha='left')
except IndexError:
if COMMENT != "":
COMMENT = '\nPlot Comment: '+COMMENT
fig1.text(text_x_pos, text_y_pos, COMMENT, ha='left')
else:
pass
# handle numpy memmory error on Windows:
switch_overwrite=0 # do not overwrite files, if they exist
if switch_overwrite == 1:
fig1.savefig(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf'), bbox_inches='tight') #save plot
print('Save pdf')
else:
if os.path.isfile(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf')):
pass # skip this pdf file, because it already exists
else:
fig1.savefig(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf'), bbox_inches='tight') #save plot
print('Save pdf')
plt.close('all') # close all figures
print('') # new line for better readability in console

View File

@ -0,0 +1,20 @@
You have to connect the serial bus cable from the uC to the Raspberry.
It was connected to the lower middle USB-Port in this case,
1. start bash
(2. workon cv-4.5.3.56)
3. python
4. import serial
5. s = serial.Serial('/dev/ttyACM0', 9600)
Now you can control the LEDs.
To let the stripe light up in one color:
s.write(str.encode('RRR-GGG-BBB'))
eg: s.write(str.encode('255-000-000'))
To let the stripe light up in one of the 20 color patterns:
s.write(str.encode('color_pattern_XX')), where x is 01-20
eg: s.write(str.encode('color_pattern_01'))
Now you can start a program to detect the leds

View File

@ -0,0 +1,3 @@
1. start bash
2. workon cv-4.5.3.56
3. python $SCRIPT_NAME$

View File

@ -0,0 +1,188 @@
# Creation Date: 02.03.2022
# Author: Kenan Gömek
# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures.
# This script is designed for shooting manually images with 'i'
# Press 'q' to exit
# Change camera parameters with v,b,n,m,x,c,o. See code for more information.
import cv2 as cv
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import numpy as np
import matplotlib.pyplot as plt
# Define camera settings
# divide origin resoluton by a number, to have the origin aspect ratio
# RESOLUTION = (3280, 2464) # Max Photo-Resolution CAM03 and CAM04 # no image with PiCamera Videoport at this Resolution.. Probably GPU Memory and CPU issues.
# RESOLUTION = (1640,1232) # 2nd best Resolution for CAM03 and CAM04 with FUll FOV (2x2 binning) # Mode 4
SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232
# OUTPUT_RESOLUTION = (960, 720)
OUTPUT_RESOLUTION = (416, 320)
#OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308)
# (410,308) is being upscaled to (416,320) from ISP (warning in bash), but image will have still (410,308) pixels.
# OUTPUT_RESOLUTION = (820, 616) # (1640x1232)/2=(820,616)
# bash: frame size rounded up from 820x616 to 832x624
AWB_MODE = 'off' # Auto white balance mode
# AWB_GAINS = (Fraction(485, 256), Fraction(397, 256)) # White Balance Gains to have colours read correctly: (red, blue)
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, float or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
# the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
# ISO = 100 # ISO value
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# Define Funcions
def take_image_picamera_opencv(shutter_speed):
folder_exists=False
# Initialise Camera
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = shutter_speed
# it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program.
# The gains (analog, digital) will adjust to this set up.
# After the gains are fixed, they will never change! even if you change the shutter speed during the runtime.
# To get consistent brightness values, set the right shutter speed at initalisation once.
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
camera.exposure_mode = EXPOSURE_MODE
time.sleep(1) # wait before applying brightness and contrast
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
# camera.start_preview() # show camera preview through PiCamera interface
# camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation.
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
image = frame.array # raw NumPy array without JPEG encoding
camera_exposure_speed = camera.exposure_speed # Retrieves the current shutter speed of the camera.
cv.imshow("Current Frame", image) # display the image without text
output.truncate(0) # clear the stream for next frame
# Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented.
# if q is pressed, break from loop.
pressed_key = cv.waitKey(2) & 0xff
if pressed_key == ord('q'):
break
elif pressed_key == ord('i'): # Take image from manipulated image if i is pressed
if not folder_exists:
path_saveFolder, folder_exists = create_folder_for_captures()
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M-%S.%f")[:-3]
cv.imwrite(f"{path_saveFolder}/img_ss{shutter_speed}_iso{ISO}_res{OUTPUT_RESOLUTION[0]}x{OUTPUT_RESOLUTION[1]}_Date {d1}.png", image)
print('took image!')
elif pressed_key == ord('v'): # increase shutterspeed by 5
shutter_speed = round(shutter_speed+5)
camera.shutter_speed = shutter_speed
time.sleep(2) # wait to shutter speed is applied before querying exposure speed
exposure_speed = camera.exposure_speed
print(f"shutter speed set to: {shutter_speed}")
print(f"retrieved shutter speed: {exposure_speed}")
elif pressed_key == ord('b'): # increase shutterspeed by 50
shutter_speed = round(shutter_speed+50)
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
time.sleep(2) # wait to shutter speed is applied before querying exposure speed
exposure_speed = camera.exposure_speed
print(f"retrieved shutter speed: {exposure_speed}")
elif pressed_key == ord('n'): # increase shutterspeed by 500
shutter_speed = round(shutter_speed+500)
print(f"shutter speed set to: {shutter_speed}")
time.sleep(2) # wait to shutter speed is applied before querying exposure speed
exposure_speed = camera.exposure_speed
print(f"retrieved shutter speed: {exposure_speed}")
elif pressed_key == ord('m'): # max shutterspeed
shutter_speed = round(1/FRAMERATE*1e6)
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
time.sleep(2) # wait to shutter speed is applied before querying exposure speed
exposure_speed = camera.exposure_speed
print(f"retrieved shutter speed: {exposure_speed}")
elif pressed_key == ord('x'): # decrease shutterspeed by 500
shutter_speed = round(shutter_speed-500)
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
time.sleep(2) # wait to shutter speed is applied before querying exposure speed
exposure_speed = camera.exposure_speed
print(f"retrieved shutter speed: {exposure_speed}")
elif pressed_key == ord('c'): # decrease shutterspeed by 50
shutter_speed = round(shutter_speed-50)
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
time.sleep(2) # wait to shutter speed is applied before querying exposure speed
exposure_speed = camera.exposure_speed
print(f"retrieved shutter speed: {exposure_speed}")
elif pressed_key == ord('o'): # set shutterspeed to 0
shutter_speed = 0
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
time.sleep(2) # wait to shutter speed is applied before querying exposure speed
exposure_speed = camera.exposure_speed
print(f"retrieved shutter speed: {exposure_speed}")
def create_folder_for_captures():
# Create folder for saving the captured pictures
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M")
path_cwd = os.getcwd()
path_saveFolder = path_cwd+r"/Capture_"+d1
try:
os.mkdir(path_saveFolder)
folder_exists = True
except OSError:
print("Error! Ending script.")
quit()
return path_saveFolder, folder_exists
# Start Script
# start capture series for different shutter speeds
print('start caputure...')
take_image_picamera_opencv(shutter_speed=50)
# take_image_picamera_opencv(shutter_speed=round(1/FRAMERATE*1e6)) # max shutter-speed depending on fps: 1/2 fps = 500 000 µs
# End Script
cv.destroyAllWindows()
print('Script finished')

View File

@ -0,0 +1,112 @@
# This script uses multi processing and raspividyuv to capture frames @35 fps with canny edge detection
# To-Do:
# -set parameter for camera
# -typos and uniformity
# -comment things better and make more steps for easier understanding for non-python-people (or maybe proficiency level beginner)
# -capture led strips and save video
# -How about OV-Sensor? because putting camera in sensor mode 4 and requesting image with less resolution drops fps.
# -Ist blitzen auch bei diesem skript da??!?!?!??!?
import cv2 as cv
import numpy as np
import subprocess as sp
import time
import atexit
frames = [] # stores the video sequence for the demo
max_frames =500
N_frames = 0
# Video capture parameters
(w,h) = (416,320) # height must be multiple of 32 and width multiple of 16
colour_channels = 3
bytesPerFrame = w * h * colour_channels
fps = 35 # setting to 250 will request the maximum framerate possible
sensor_mode = 4
# "raspividyuv" is the command that provides camera frames in YUV format
# "--output -" specifies stdout as the output
# "--timeout 0" specifies continuous video
# "--luma" discards chroma channels, only luminance is sent through the pipeline
# see "raspividyuv --help" for more information on the parameters
# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview --mode "+str(sensor_mode)
videoCmd = f"raspividyuv -w {w} -h {h} --output - --timeout 0 --framerate {fps} --rgb --nopreview --mode {sensor_mode}"
# with sensor mode 38 fps instead of 72 fps --> How about Camera OV-Sensor?
# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview"
videoCmd = videoCmd.split() # Popen requires that each parameter is a separate string
cameraProcess = sp.Popen(videoCmd, stdout=sp.PIPE) # start the camera
atexit.register(cameraProcess.terminate) # this closes the camera process in case the python scripts exits unexpectedly
# wait for the first frame and discard it (only done to measure time more accurately)
rawStream = cameraProcess.stdout.read(bytesPerFrame)
print("Start...")
start_time = time.time()
while True:
cameraProcess.stdout.flush() # discard any frames that we were not able to process in time
frame = np.frombuffer(cameraProcess.stdout.read(bytesPerFrame), dtype=np.uint8) # raw NumPy array without JPEG encoding
if frame.size != bytesPerFrame:
print("Error: Camera stream closed unexpectedly")
break
frame.shape = (h,w,colour_channels) # set dimensions for numpy array --> from (921600,) to (480,640,3)
# do the processing here with OpenCV
frame = cv.cvtColor(frame,cv.COLOR_BGR2RGB) # convert frame to rgb
# test
frame_gs=cv.cvtColor(frame,cv.COLOR_RGB2GRAY)
frame = cv.Canny(frame_gs, 50,150)
frames.append(frame) # save the frame (for the demo)
N_frames += 1
#test
#put text
font = cv.FONT_HERSHEY_SIMPLEX # font
fontScale = 1 # fontScale
color = (255, 255, 255) # Font colour in BGR
thickness = 1 # Line thickness in px
# set text position
frame_width = int(frame.shape[1])
frame_height = int(frame.shape[0])
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
text_linespacing = 50 # line spacing between two strings in pixels
# text_start_position_X = int(frame_width/4) # start text from 1/4 of image width
text_start_position_X = int(0) # start text from left edge of image
pos_1 = (text_start_position_X, text_start_position_Y)
text_line_1 = f"Frame: {N_frames}"
cv.putText(frame, text_line_1, pos_1, font, fontScale, color, thickness, cv.LINE_AA)
# if N_frames > max_frames: break #if i deactivate cv.imshow i can control end of program with this parameter.
cv.imshow("Current Frame", frame) # display the image
pressed_key = cv.waitKey(1) & 0xff
if pressed_key == ord('q'):
break
cv.destroyAllWindows()
end_time = time.time()
cameraProcess.terminate() # stop the camera
elapsed_seconds = end_time-start_time
print(f"Finish! Result: {(N_frames/elapsed_seconds)} fps")

View File

@ -0,0 +1,120 @@
# This script uses multi processing and raspividyuv to capture frames @35 fps with canny edge detection
# To-Do:
# -set parameter for camera
# -typos and uniformity
# -comment things better and make more steps for easier understanding for non-python-people (or maybe proficiency level beginner)
# -capture led strips and save video
# -How about OV-Sensor? because putting camera in sensor mode 4 and requesting image with less resolution drops fps.
# -Ist blitzen auch bei diesem skript da??!?!?!??!?
import cv2 as cv
import numpy as np
import subprocess as sp
import time
import atexit
from fractions import Fraction
max_frames =500
N_frames = 0
# Video capture parameters
(w,h) = (416,320) # height must be multiple of 16 and width multiple of 16
colour_channels = 3
bytesPerFrame = w * h * colour_channels
fps = 30 # setting to 250 will request the maximum framerate possible
sensor_mode = 4
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = f"{485/256},{397/256}" # White Balance Gains to have colours read correctly: (red, blue)
ISO = 100 # ISO value
EXPOSURE_MODE = 'off'
shutterspeed = 20000
# "raspividyuv" is the command that provides camera frames in YUV format
# "--output -" specifies stdout as the output
# "--timeout 0" specifies continuous video
# "--luma" discards chroma channels, only luminance is sent through the pipeline
# see "raspividyuv --help" for more information on the parameters
# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview --mode "+str(sensor_mode)
videoCmd = (f"raspividyuv -w {w} -h {h} --output - --timeout 0 --framerate {fps} --rgb --nopreview "
f"--mode {sensor_mode} --awb {AWB_MODE} -awbg {AWB_GAINS} --ISO {ISO} --exposure {EXPOSURE_MODE} -ss {shutterspeed}")
# with sensor mode 38 fps instead of 72 fps --> How about Camera OV-Sensor?
# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview"
videoCmd = videoCmd.split() # Popen requires that each parameter is a separate string
cameraProcess = sp.Popen(videoCmd, stdout=sp.PIPE) # start the camera
atexit.register(cameraProcess.terminate) # this closes the camera process in case the python scripts exits unexpectedly
# wait for the first frame and discard it (only done to measure time more accurately)
rawStream = cameraProcess.stdout.read(bytesPerFrame)
print("Start...")
start_time = time.time()
while True:
cameraProcess.stdout.flush() # discard any frames that we were not able to process in time
frame = np.frombuffer(cameraProcess.stdout.read(bytesPerFrame), dtype=np.uint8) # raw NumPy array without JPEG encoding
if frame.size != bytesPerFrame:
print("Error: Camera stream closed unexpectedly")
break
frame.shape = (h,w,colour_channels) # set dimensions for numpy array --> from (921600,) to (480,640,3)
# do the processing here with OpenCV
r,g,b = cv.split(frame) # OpenCV works with bgr. Image is also speciefied to be captured in bgr.
frame = cv.cvtColor(frame,cv.COLOR_BGR2RGB) # convert frame to rgb
N_frames += 1
#put text
font = cv.FONT_HERSHEY_SIMPLEX # font
fontScale = 1 # fontScale
color = (255, 255, 255) # Font colour in BGR
thickness = 1 # Line thickness in px
# set text position
frame_width = int(frame.shape[1])
frame_height = int(frame.shape[0])
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
text_linespacing = 50 # line spacing between two strings in pixels
# text_start_position_X = int(frame_width/4) # start text from 1/4 of image width
text_start_position_X = int(0) # start text from left edge of image
pos_1 = (text_start_position_X, text_start_position_Y)
text_line_1 = f"Frame: {N_frames}"
cv.putText(frame, text_line_1, pos_1, font, fontScale, color, thickness, cv.LINE_AA)
# if N_frames > max_frames: break #if i deactivate cv.imshow i can control end of program with this parameter.
cv.imshow("Current Frame", frame) # display the image
cv.imshow("b", b)
cv.imshow("g", g)
cv.imshow("r", r)
pressed_key = cv.waitKey(1) & 0xff
if pressed_key == ord('q'):
break
cv.destroyAllWindows()
end_time = time.time()
cameraProcess.terminate() # stop the camera
elapsed_seconds = end_time-start_time
print(f"Finish! Result: {(N_frames/elapsed_seconds)} fps")

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,594 @@
# Creation Date: 14.01.2022
# Author: Kenan Gömek
# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures.
# This script is designed for capturing a Video with the frame number in it. press "q" to quit.
# You can take images with "i".
# Update: 28.02.2022
# This program is a development step for the final program
# This program works with multiprocessing and with Picamera
# create shared memorys once before exectution instead of twice: in main and take_image_picamera_opencv
import cv2 as cv
import numpy as np
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import sys
from multiprocessing import Process, shared_memory
# Define camera settings
# divide origin resoluton by a number, to have the origin aspect ratio
# RESOLUTION = (3280, 2464) # Max Photo-Resolution CAM03 and CAM04 # no image with PiCamera Videoport at this Resolution.. Probably GPU Memory and CPU issues.
# RESOLUTION = (1640,1232) # 2nd best Resolution for CAM03 and CAM04 with FUll FOV (2x2 binning) # Mode 4
SENSOR_MODE = 4 # corresponding sensor mode to resolution
OUTPUT_RESOLUTION = (416, 320) # (width, heigth)
image_width = OUTPUT_RESOLUTION[0]
image_heigth = OUTPUT_RESOLUTION[1]
# (410,308) is being upscaled to (416,320) from ISP (warning in bash), but image will have still (410,308) pixels.
# OUTPUT_RESOLUTION = (820, 616) # (1640x1232)/2=(820,616)
# bash: frame size rounded up from 820x616 to 832x624
number_of_colorchannels = 3 # r, g, b
size_of_frame=int(image_heigth*image_heigth)
frame_dimension = int(1)
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (Fraction(485, 256), Fraction(397, 256)) # White Balance Gains to have colours read correctly: (red, blue)
ISO = 100 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 30 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# miscellaneous parameters
max_value_of_uint64 = int((2**64) - 1) # @30 fps: konservative calculated driving time: 1.95*1e10 years --> Integer overflow not relevant.
# settings for development
show_opencv_window = False
# create shared Memorys for main-process
# shared memory for bools
shm_bools_pre=np.array([False, False, False, False, False, False, False, False, False, False], dtype=np.bool8) # create numpy array with bools stored in it
# [0]: newframe [1]: p_red_finished [2]: p_green_finished [3]: p_blue_finished
# [4]: p_red_started [5]: p_green_started [6]: p_blue_started
# [7]: p_red_start_trigger [8]: p_green_start_trigger [9]: p_blue_start_trigger
size_of_buffer = shm_bools_pre.nbytes
print(f"size of buffer: {size_of_buffer}") # size of buffer: 10
print(f"shm_bools dtype: {shm_bools_pre.dtype}") # dtype: bool
shm_bools_create = shared_memory.SharedMemory(name="shm_bools", create=True, size=shm_bools_pre.nbytes) # create a new shared memory block
shm_bools = np.ndarray(shm_bools_pre.shape, dtype=shm_bools_pre.dtype, buffer=shm_bools_create.buf) # create a NumPy array backed by shared memory
shm_bools[:] = shm_bools_pre[:] # Copy the original data into shared memory
# print(shm_bool)
# print(shm_bools.name)
# shared memory for framenumber
shm_framenumber_pre=np.array([0], dtype=np.uint64)
size_of_buffer = shm_framenumber_pre.nbytes
print(f"size of framenumber-buffer: {size_of_buffer}") #8
print(f"shm_framenumber dtype: {shm_framenumber_pre.dtype}") #uint64
shm_framenumber_create = shared_memory.SharedMemory(name="shm_framenumber", create=True, size=shm_framenumber_pre.nbytes) # create a new shared memory block
shm_framenumber = np.ndarray(shm_framenumber_pre.shape, dtype=shm_framenumber_pre.dtype, buffer=shm_framenumber_create.buf) # create a NumPy array backed by shared memory
shm_framenumber[:] = shm_framenumber_pre[:] # Copy the original data into shared memory
# print(shm_framenumber) # [0]
# print(shm_framenumber_create.name) # shm_framenumber
# shared memory for red, green, blue frame
int_black = 0 # integer for black color/ no color
shm_colorframes_pre = np.full(\
(image_heigth,image_width), \
int_black, dtype=np.uint8)
size_of_buffer = shm_colorframes_pre.nbytes
print(f"size of colorframe-buffer: {size_of_buffer}") #133 120
print(f"shm_colorframes_pre dtype: {shm_colorframes_pre.dtype}") #uint8
shm_redframe_create = shared_memory.SharedMemory(name="shm_redframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_greenframe_create = shared_memory.SharedMemory(name="shm_greenframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_blueframe_create = shared_memory.SharedMemory(name="shm_blueframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_redframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_redframe_create.buf) # create a NumPy array backed by shared memory
shm_greenframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_greenframe_create.buf) # create a NumPy array backed by shared memory
shm_blueframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_blueframe_create.buf) # create a NumPy array backed by shared memory
shm_redframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
shm_greenframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
shm_blueframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
# ----------------------------------------------------------------------------
# Define Funcions
def get_frames_from_picamera(shutter_speed):
# newframe= shm_bools[0] # do not use this! no updted values in "newframe"
# framenumber = shm_framenumber[0]
# Initialise Camera
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = shutter_speed
camera.exposure_mode = EXPOSURE_MODE
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
t_start= time.perf_counter() # save time for fps calculation
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
# General information:
# - always the newest frame is recieved: processing must be faster than fps if every frame should be processed
shm_bools[0] = True
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
shm_framenumber[0] = framenumber
#print('')
#print(f"new frame: {framenumber}")
#image = frame.array # raw NumPy array without JPEG encoding
b,g,r = cv.split(frame.array) # split colour channels of raw NumPy array without JPEG encoding
shm_redframe[:] = r
shm_greenframe[:] = g
shm_blueframe[:] = b
# for better performance one can assign directly in funtion line the values to the shm_memorys: shm_red, .. , ... = cv.split(..)
shm_bools[7:10]=[True] # trigger the start of the processing for each colorchannel
#print(shm_bools[7], shm_bools[8], shm_bools[9])
#display_image_with_text(image, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out) # show the frame
output.truncate(0) # clear the stream for next frame
if framenumber == 500: # 5 sek @ 30 fps, only for performance measuring
t_stop=time.perf_counter()
print(f"calculated fps: {framenumber/(t_stop-t_start)}")
break
def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out):
img = img.copy() # make copy of image and do not modify the original image
# please activate only one trigger once
trigger_show_brightness = 0 # trigger for (not) calculating andshowing the brightness of the image+
if trigger_show_brightness == 1:
arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(img)
trigger_show_max_brightness_values_of_colour_channels = 0 # trigger for (not) calculating and showing max values of colour chanels
if trigger_show_max_brightness_values_of_colour_channels == 1:
r_max, g_max, b_max = get_max_rgb_values(img)
font = cv.FONT_HERSHEY_SIMPLEX # font
fontScale = 1 # fontScale
color = (255, 255, 255) # Font colour in BGR
thickness = 1 # Line thickness in px
# set text position
frame_width = int(img.shape[1])
frame_height = int(img.shape[0])
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
text_linespacing = 50 # line spacing between two strings in pixels
# text_start_position_X = int(frame_width/4) # start text from 1/4 of image width
text_start_position_X = int(0) # start text from left edge of image
# set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image.
pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width
pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width
pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width
if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1:
pos_4 = (text_start_position_X, text_start_position_Y+3*text_linespacing) # start text from 1/4 of image width
# define text to display
text_line_1 = f"set ss: {shutter_speed} us"
text_line_3 = f"Frame: {framenumber}"
text_line_2 = f"ret exs: {camera_exposure_speed} us"
if trigger_show_brightness==1:
if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01:
text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %"
elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01:
text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} pm"
if trigger_show_max_brightness_values_of_colour_channels==1:
text_line_4 = f"max: r:{r_max} g:{g_max} b:{b_max}"
# put the text into the image
image_text_1 = cv.putText(img, text_line_1, pos_1, font,
fontScale, color, thickness, cv.LINE_AA)
image_text_2 = cv.putText(img, text_line_2, pos_2, font,
fontScale, color, thickness, cv.LINE_AA)
image_text_3 = cv.putText(img, text_line_3, pos_3, font,
fontScale, color, thickness, cv.LINE_AA)
if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1:
image_text_4 = cv.putText(img, text_line_4, pos_4, font,
fontScale, color, thickness, cv.LINE_AA)
cv.imshow("Current Frame", img) # display the image
if trigger_record_OpenCV == 1:
out.write(img) # write frame to Video
def calc_arithmetic_mean_of_brightness_per_pixel(image):
"""Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen."""
#Comment: So rechenintensiv, dass man kein Blitzen sieht im Bild. (Oder sehr selten bzw. schwach). Daher anzeige von max-werten
b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr.
r=r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765
#the rest will also be uint16 then
image_heigth = r.shape[0]
image_width = r.shape[1]
number_of_colour_channels = 3
arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels)
arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth)
max_possible_brightness = 255 # maximum possible brightness
arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness
return arithmetic_mean_of_brightness_per_pixel_relative
def get_max_rgb_values(image):
"""get max values of colour channels"""
b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr.
r_max=r.max()
g_max=g.max()
b_max=b.max()
return r_max, g_max, b_max
def create_folder_for_captures():
# Create folder for saving the captured pictures
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M")
path_cwd = os.getcwd()
path_saveFolder = path_cwd+r"/Capture_"+d1
try:
os.mkdir(path_saveFolder)
folder_exists = True
except OSError:
print("Error! Ending script.")
quit()
return path_saveFolder, folder_exists
def do_processing():
time.sleep(0.001)
#print("ohh i am doing high complex image analysis with computer vision")
def do_processing_frame_r(frame):
print(f"max frame color red: {frame.max()}")
def do_processing_frame_g(frame):
print(f"max frame color green: {frame.max()}")
def do_processing_frame_b(frame):
print(f"max frame color blue: {frame.max()}")
def processing_red():
shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block
shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script
newframe = shm_bools[0]
p_red_finished = shm_bools[1] # not used, but for clarity
p_red_started = shm_bools[4]
shm_framenumber_init = shared_memory.SharedMemory\
(name="shm_framenumber") # Attach to existing shared memory block
shm_framenumber = np.ndarray((1,), dtype=np.uint64, \
buffer= shm_framenumber_init.buf)
# framenumer = shm_framenumber[0]
shm_redframe_init = shared_memory.SharedMemory\
(name="shm_redframe") # Attach to existing shared memory block
shm_redframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \
buffer= shm_redframe_init.buf)
i=0
while True:
try:
framenumber = shm_framenumber[0]
if i==0:
last_processed_frame = framenumber
conditions_for_first_start = (i==0) and\
(shm_bools[0] == True) and \
(shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \
and (shm_bools[7] == True)
conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True)
# newframe and all color-channel-processings have to be finished
if conditions_for_first_start == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
do_processing()
i += 1
#print(f"first processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
shm_bools[4] = False # process ended
elif conditions_for_starting_processing == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
#print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}")
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
do_processing()
#print(f"max frame color red: {shm_redframe.max()}")
if show_opencv_window:
cv.imshow("red", shm_redframe)
cv.waitKey(1)
#print(f"processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
#t2 = time.perf_counter_ns()
#print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms")
last_processed_frame = framenumber
shm_bools[4] = False # process ended
# elif shm_bools[0] == False:
# pass
#print(f"no new red frame")
# image processing finished
except KeyboardInterrupt:
try:
shm_bool_init.close()
shm_framenumber_init.close()
shm_redframe_init.close()
except FileNotFoundError:
# Memory already destroyed
pass
def processing_green():
shm_bool_init = shared_memory.SharedMemory(name="shm_bools")
shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script
newframe= shm_bools[0]
p_green_finished= shm_bools[2] # not used, but for clarity
p_green_started = shm_bools[5]
shm_framenumber_init = shared_memory.SharedMemory\
(name="shm_framenumber") # Attach to existing shared memory block
shm_framenumber = np.ndarray((1,), dtype=np.uint64, \
buffer= shm_framenumber_init.buf)
# framenumer = shm_framenumber[0]
shm_greenframe_init = shared_memory.SharedMemory\
(name="shm_greenframe") # Attach to existing shared memory block
shm_greenframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \
buffer= shm_greenframe_init.buf)
i=0
while True:
try:
framenumber = shm_framenumber[0]
if i==0:
last_processed_frame = framenumber
conditions_for_first_start = (i==0) and\
(shm_bools[0] == True) and \
(shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \
and (shm_bools[8] == True)
conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[8] == True)
if conditions_for_first_start == True:
shm_bools[5] = True # process started
shm_bools[8] = False # reset trigger
shm_bools[2] = False # set bool for p_green_finished to false
i += 1
do_processing()
#print(f"first processing green finished. frame: {framenumber}")
shm_bools[2] = True # set bool for p_green_finished to true
shm_bools[5] = False # process ended
elif conditions_for_starting_processing == True:
shm_bools[5] = True # process started
shm_bools[8] = False # reset trigger
#print(f"green: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}")
shm_bools[2] = False # set bool for p_green_finished to false
do_processing()
if show_opencv_window:
cv.imshow("green", shm_greenframe)
cv.waitKey(1)
#print(f"max frame color green: {shm_greenframe.max()}")
#print(f"processing green finished. frame: {framenumber}")
shm_bools[2] = True # set bool for p_green_finished to true
last_processed_frame = framenumber
shm_bools[5] = False # process ended
# elif shm_bools[0] == False:
# pass
# # print(f"no new green frame")
# image processing finished
except KeyboardInterrupt:
try:
shm_bool_init.close()
shm_framenumber_init.close()
shm_greenframe_init.close()
except FileNotFoundError:
# Memory already destroyed
pass
def processing_blue():
shm_bools_init = shared_memory.SharedMemory(name="shm_bools")
shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bools_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script
newframe= shm_bools[0]
p_red_finished= shm_bools[3] # not used, but for clarity
p_blue_started = shm_bools[6]
shm_framenumber_init = shared_memory.SharedMemory\
(name="shm_framenumber") # Attach to existing shared memory block
shm_framenumber = np.ndarray((1,), dtype=np.uint64, \
buffer= shm_framenumber_init.buf)
# framenumer = shm_framenumber[0]
shm_blueframe_init = shared_memory.SharedMemory\
(name="shm_blueframe") # Attach to existing shared memory block
shm_blueframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \
buffer= shm_blueframe_init.buf)
i=0
while True:
try:
framenumber = shm_framenumber[0]
if i==0:
last_processed_frame = framenumber
conditions_for_first_start = (i==0) and\
(shm_bools[0] == True) and \
(shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \
and (shm_bools[9] == True)
conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[9] == True)
# newframe and all color-channel-processings have to be finished
if conditions_for_first_start == True:
shm_bools[6] = True # process started
shm_bools[9] = False # reset trigger
shm_bools[3] = False # set bool for p_blue_finished to false
i += 1
do_processing()
#print(f"first processing blue finished. frame: {framenumber}")
shm_bools[3] = True # set bool for p_blue_finished to true
shm_bools[6] = False # process ended
elif conditions_for_starting_processing == True:
shm_bools[6] = True # process started
shm_bools[9] = False # reset trigger
#print(f"blue: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}")
shm_bools[3] = False # set bool for p_blue_finished to false
do_processing()
if show_opencv_window:
cv.imshow("blue", shm_blueframe)
cv.waitKey(1)
#print(f"max frame color blue: {shm_blueframe.max()}")
#print(f"processing blue finished. frame: {framenumber}")
shm_bools[3] = True # set bool for p_blue_finished to true
last_processed_frame = framenumber
shm_bools[6] = False # process ended
# elif shm_bools[0] == False:
# pass
# #print(f"no new blue frame")
# image processing finished
except KeyboardInterrupt:
try:
shm_bools_init.close()
shm_framenumber_init.close()
shm_blueframe_init.close()
except FileNotFoundError:
# Memory already destroyed
pass
# ----------------------------------------------------------------------------
# main
def main():
start = time.perf_counter()
try:
# create processes
p_red = Process(target=processing_red)
p_green = Process(target=processing_green)
p_blue = Process(target=processing_blue)
processes = [p_red, p_green, p_blue]
print(f"waiting 1 second to create processes")
time.sleep(1) # sind prozesse schon vorhanden
# start acitivity of processes
for process in processes:
process.start()
# start capturing
get_frames_from_picamera(shutter_speed=33333) #Can see something at normal light conditions
# get_frames_from_picamera(shutter_speed=1000)
print('*******************************')
# this below code is only executed if the loop in take_image_picamera_opencv is breaked
# In real use case there will be no end of this program
# wait for all processes to finisch
# main is blocked as long all processes are not finished
# The processes will never finish by design (better for performance, not to check for several triggers)
# for process in processes:
# process.join()
for process in processes:
process.terminate()
# print time measuring
end = time.perf_counter()
print(f'Script finished in {round(end-start, 2)} s')
# close each SharedMemory instance and unlink to release the shared memory
shm_bools_create.close()
shm_bools_create.unlink()
shm_framenumber_create.close()
shm_framenumber_create.unlink()
shm_redframe_create.close()
shm_redframe_create.unlink()
shm_greenframe_create.close()
shm_greenframe_create.unlink()
shm_blueframe_create.close()
shm_blueframe_create.unlink()
except KeyboardInterrupt:
# Normally this prgoram never gets keyboard interrupted! But here this case is nevertheless handled
# End Script
try:
# close each SharedMemory instance and unlink to release the shared memory
shm_bools.close()
shm_bools.unlink()
shm_framenumber_create.close()
shm_framenumber_create.unlink()
shm_redframe_create.close()
shm_redframe_create.unlink()
shm_greenframe_create.close()
shm_greenframe_create.unlink()
shm_blueframe_create.close()
shm_blueframe_create.unlink()
except FileNotFoundError:
# Memory already destroyed
pass
if __name__ == "__main__":
main()

Binary file not shown.

View File

@ -0,0 +1,3 @@
1. start bash
2. workon cv-4.5.3.56
3. python $SCRIPT_NAME$

View File

@ -0,0 +1,5 @@
#Author: Kenan Gömek
#Date: 07.12.2021
Ino-Code muss auf Arduino und der python-code auf den Raspberry.
Dann kann man für alle Farben von 0-255 und Leuchtmuster automatisch fotos aufnehmen.

View File

@ -0,0 +1,148 @@
/* This program contains all 20 possible combinations for a lane wich have an orientation.
* The brightness of the color patterns is set here to 255.
* Furthermore colors can be transmitted via rrr-ggg-bbb values.
* It uses serial communication.
*/
#include <Adafruit_NeoPixel.h>
#define LED_PIN 9
#define NUM_LEDS 29
String redString; String greenString; String blueString; String inputString; String color_pattern;
Adafruit_NeoPixel strip = Adafruit_NeoPixel(NUM_LEDS, LED_PIN, NEO_GRB + NEO_KHZ800);
/* Info:
* set the color of a pixel:
* strip.setPixelColor(n, red, green, blue);
* pixel starts from n=0
*
* Multiple pixels can be set to the same color using the fill() function,
* which accepts one to three arguments. Typically its called like this:
* strip.fill(color, first, count)
*/
int pause = 3000; // pause in ms
// Definition of Colors
int led_brightness = 255;
uint32_t red = strip.Color(led_brightness,0,0);
uint32_t green = strip.Color(0,led_brightness,0);
uint32_t blue = strip.Color(0,0,led_brightness);
uint32_t yellow = strip.Color(led_brightness,led_brightness,0);
uint32_t magenta = strip.Color(led_brightness,0,led_brightness);
uint32_t cyan = strip.Color(0,led_brightness,led_brightness);
void setup() {
// put your setup code here, to run once:
Serial.begin(9600);
strip.begin();
strip.clear(); strip.show(); // turn off all LEDs
}
void loop() {
// put your main code here, to run repeatedly:
if (Serial.available()) {
inputString = Serial.readString();
color_pattern = inputString;
Serial.print("Folgender Text wurde empfangen: ");
Serial.println(inputString);
//String aufteilen in R G B
redString=inputString.substring(0,2+1);
greenString=inputString.substring(4,6+1);
blueString=inputString.substring(8,10+1);
}
// define color patterns
if (color_pattern == "color_pattern_01") {
SetColorCombination(red, green, blue); // color pattern 01
}
else if (color_pattern == "color_pattern_02") {
SetColorCombination(red, green, yellow); // color pattern 02
}
else if (color_pattern == "color_pattern_03") {
SetColorCombination(red, green, magenta); // color pattern 03
}
else if (color_pattern == "color_pattern_04") {
SetColorCombination(red, green, cyan); // color pattern 04
}
else if (color_pattern == "color_pattern_05") {
SetColorCombination(red, blue, yellow); // color pattern 05
}
else if (color_pattern == "color_pattern_06") {
SetColorCombination(red, blue, magenta); // color pattern 06
}
else if (color_pattern == "color_pattern_07") {
SetColorCombination(red, blue, cyan); // color pattern 07
}
else if (color_pattern == "color_pattern_08") {
SetColorCombination(red, yellow, magenta); // color pattern 08
}
else if (color_pattern == "color_pattern_09") {
SetColorCombination(red, yellow, cyan); // color pattern 09
}
else if (color_pattern == "color_pattern_10") {
SetColorCombination(red, magenta, cyan); // color pattern 10
}
else if (color_pattern == "color_pattern_11") {
SetColorCombination(green, blue, yellow); // color pattern 11
}
else if (color_pattern == "color_pattern_12") {
SetColorCombination(green, blue, magenta); // color pattern 12
}
else if (color_pattern == "color_pattern_13") {
SetColorCombination(green, blue, cyan); // color pattern 13
}
else if (color_pattern == "color_pattern_14") {
SetColorCombination(green, yellow, magenta); // color pattern 14
}
else if (color_pattern == "color_pattern_15") {
SetColorCombination(green, yellow, cyan); // color pattern 15
}
else if (color_pattern == "color_pattern_16") {
SetColorCombination(green, magenta, cyan); // color pattern 16
}
else if (color_pattern == "color_pattern_17") {
SetColorCombination(blue, yellow, magenta); // color pattern 17
}
else if (color_pattern == "color_pattern_18") {
SetColorCombination(blue, yellow, cyan); // color pattern 18
}
else if (color_pattern == "color_pattern_19") {
SetColorCombination(blue, magenta, cyan); // color pattern 19
}
else if (color_pattern == "color_pattern_20") {
SetColorCombination(yellow, magenta, cyan); // color pattern 20
}
else{
strip.clear();
strip.fill(strip.Color(redString.toInt(),greenString.toInt(),blueString.toInt()),0,0);
strip.show();
}
}
// Define Functions
void SetColorCombination(uint32_t color_1, uint32_t color_2, uint32_t color_3){
// Set the color combination for the whole strip
//determine whole numer of pixels, which can be displayed in a 3-row-constellation
int number_of_3rowpixels = NUM_LEDS - (NUM_LEDS % 3); // should be 27 if 29 leds are mounted. Modulo = 2: (29 % 3)=(9x3) +2
int lastpixelindex = number_of_3rowpixels-1; // index starts at 0
for(int i=0; i<=lastpixelindex; i=i+3){
strip.setPixelColor(i, color_1);
strip.setPixelColor(i+1, color_2);
strip.setPixelColor(i+2, color_3);
}
strip.show();
}

View File

@ -0,0 +1,235 @@
# Creation Date: 14.01.2022
# Author: Kenan Gömek
# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures.
# This script is designed for aligning the LED-Stripe horizontally to the image sensor with LEDs off
# Use:
# 1. turn on the LEDs to e.g. 005-000-000 manually via bash or similar
# 2. execute this script
# 3. adjust shutter speed if desired with x,c,b,n,m (program starts with max shutter speed for selected frame rate)
# 4. align LED strip e.g. to the upper blue line and first led to the left edge of the image
# 5. take image of alignment with 'i'
# 6. end program with 'q'
import cv2 as cv
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import numpy as np
# Define camera settings
# divide origin resoluton by a number, to have the origin aspect ratio
# RESOLUTION = (3280, 2464) # Max Photo-Resolution CAM03 and CAM04 # no image with PiCamera Videoport at this Resolution.. Probably GPU Memory and CPU issues.
# RESOLUTION = (1640,1232) # 2nd best Resolution for CAM03 and CAM04 with FUll FOV (2x2 binning) # Mode 4
SENSOR_MODE = 4 # corresponding sensor mode to resolution
OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308)
# (410,308) is being upscaled to (416,320) from ISP (warning in bash), but image will have still (410,308) pixels.
# OUTPUT_RESOLUTION = (820, 616) # (1640x1232)/2=(820,616)
# bash: frame size rounded up from 820x616 to 832x624
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (Fraction(485, 256), Fraction(397, 256)) # White Balance Gains to have colours read correctly: (red, blue)
ISO = 100 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 10 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# Define Funcions
def take_image_picamera_opencv(shutter_speed):
# Initialise Camera
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = shutter_speed
camera.exposure_mode = EXPOSURE_MODE
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
# camera.start_preview() # show camera preview through PiCamera interface
# camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation.
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
image = frame.array # raw NumPy array without JPEG encoding
camera_exposure_speed = camera.exposure_speed # Retrieves the current shutter speed of the camera.
# cv.imshow("Current Frame", image) # display the image without text
img = alignment_procedure(image, shutter_speed, framenumber, camera_exposure_speed) # show the frame
framenumber_to_save = 7000 # frame 15 (no particular reason for frame 15)
trigger_save_frame = 'no' # trigger for saving the frame
if framenumber == framenumber_to_save and trigger_save_frame=='yes': # save desired frame
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M-%S.%f")[:-3]
print(f"Take picture! framenumber: {framenumber} shutter speed: {shutter_speed} µs")
cv.imwrite(f"{path_saveFolder}/ss{shutter_speed}_Date {d1}.png", image)
break # break from the loop, because we took the image we wanted
output.truncate(0) # clear the stream for next frame
# Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented.
# if q is pressed, break from loop.
pressed_key = cv.waitKey(1) & 0xff
if pressed_key == ord('q'):
break
elif pressed_key == ord('i'): # Take image from manipulated image if i is pressed
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M-%S.%f")[:-3]
cv.imwrite(f"{path_saveFolder}/Date {d1}.png", img)
print('took image!')
elif pressed_key == ord('b'): # increase shutterspeed by 50
shutter_speed = round(shutter_speed+50)
camera.shutter_speed = shutter_speed
elif pressed_key == ord('n'): # increase shutterspeed by 500
shutter_speed = round(shutter_speed+500)
elif pressed_key == ord('m'): # max shutterspeed
shutter_speed = round(1/FRAMERATE*1e6)
camera.shutter_speed = shutter_speed
elif pressed_key == ord('x'): # decrease shutterspeed by 500
shutter_speed = round(shutter_speed-500)
camera.shutter_speed = shutter_speed
elif pressed_key == ord('c'): # decrease shutterspeed by 50
shutter_speed = round(shutter_speed-50)
camera.shutter_speed = shutter_speed
elif pressed_key == ord('o'): # set shutter speed to 0
shutter_speed = 0
camera.shutter_speed = shutter_speed
def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed):
img = img.copy() # make copy of image and do not modify the original image
font = cv.FONT_HERSHEY_SIMPLEX # font
fontScale = 1 # fontScale
color = (255, 255, 255) # Font colour in BGR
thickness = 1 # Line thickness in px
# set text position
frame_width = int(img.shape[1])
frame_height = int(img.shape[0])
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
text_linespacing = 50 # line spacing between two strings in pixels
# text_start_position_X = int(frame_width/4) # start text from 1/4 of image width
text_start_position_X = int(0) # start text from left edge of image
# set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image.
pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width
pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width
pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width
# define text to display
text_line_1 = f"set shttr-spd: {shutter_speed} us"
text_line_3 = f"Frame: {framenumber}"
text_line_2 = f"ret exp-spd: {camera_exposure_speed} us"
# put the text into the image
image_text_1 = cv.putText(img, text_line_1, pos_1, font,
fontScale, color, thickness, cv.LINE_AA)
image_text_2 = cv.putText(img, text_line_2, pos_2, font,
fontScale, color, thickness, cv.LINE_AA)
image_text_3 = cv.putText(img, text_line_3, pos_3, font,
fontScale, color, thickness, cv.LINE_AA)
cv.imshow("Current Frame", img) # display the image
def alignment_procedure(img, shutter_speed, framenumber, camera_exposure_speed):
img = img.copy() # make copy of image and do not modify the original image
font = cv.FONT_HERSHEY_SIMPLEX # font
fontScale = 1 # fontScale
color = (255, 255, 255) # Font colour in BGR
thickness = 1 # Line thickness in px
# set text position
frame_width = int(img.shape[1])
frame_height = int(img.shape[0])
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
text_linespacing = 50 # line spacing between two strings in pixels
# text_start_position_X = int(frame_width/4) # start text from 1/4 of image width
text_start_position_X = int(0) # start text from left edge of image
# set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image.
pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width
pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width
pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width
# define text to display
text_line_1 = f"set shttr-spd: {shutter_speed} us"
text_line_3 = f"Frame: {framenumber}"
text_line_2 = f"ret exp-spd: {camera_exposure_speed} us"
# put the text into the image
image_text_1 = cv.putText(img, text_line_1, pos_1, font,
fontScale, color, thickness, cv.LINE_AA)
image_text_2 = cv.putText(img, text_line_2, pos_2, font,
fontScale, color, thickness, cv.LINE_AA)
image_text_3 = cv.putText(img, text_line_3, pos_3, font,
fontScale, color, thickness, cv.LINE_AA)
# draw lines into image
alignment_line_thickness = 1 # thickness of the alignment line in pixels
alignment_line_offset = 21 # offset of the alignment line from the center
frame_center = round(frame_height/2) # center of the frame
offset_center = frame_center + 70 # offset of the center for e.g. have the alignment lines more in the bottom part of the image
alignment_row_start = offset_center-alignment_line_offset
alignment_row_end = offset_center+alignment_line_offset
img[offset_center, :, :] = [255,255,255] # bgr format
img[alignment_row_start-alignment_line_thickness:alignment_row_start, :, :] = [255,0,0] # bgr format
img[alignment_row_end:alignment_row_end+alignment_line_thickness, :, :] = [0,0,255] # bgr format
cv.imshow("Current Frame", img) # display the image
return img
# Start Script
# Create folder for saving the captured pictures
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M")
path_cwd = os.getcwd()
path_saveFolder = path_cwd+r"/Ledstripe_alignment_"+d1
try:
os.mkdir(path_saveFolder)
except OSError:
print("Error! Ending script. Try it again in 1 minute")
quit()
# start capture series for different shutter speeds
print('start caputure series...')
# take_image_picamera_opencv(shutter_speed=2000000) #Can see something at normal light conditions
take_image_picamera_opencv(shutter_speed=round(1/FRAMERATE*1e6)) # max shutter-speed depending on fps: 1/2 fps = 500 000 µs
# End Script
cv.destroyAllWindows()
print('Script finished')

View File

@ -0,0 +1,441 @@
# Creation Date: 10.01.2022
# Author: Kenan Gömek
# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures.
# Update: This script is designed to take image series with different camera parameters
# Update-Comment:
# To-DO:
import serial
import cv2 as cv
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import numpy as np
# Comment for Measurement-Log:
CAM_NR = "04" # CAM:01,02,03,04
COMMENT = ""
# Set Parameters for series with variable shutterspeeds
# Uncomment/Comment needed code parts for shutter speed lists:
#START_SHUTTER_SPEED = 50 # microseconds. 0=auto
#END_SHUTTER_SPEED = 100 # microseconds
#STEP_SHUTTER_SPEED = 25 # microseconds
# shutter_speeds_add = [47, 66, 85, 104] # microseconds. shutter speeds to additionally add
shutter_speeds_add = [50] # microseconds. shutter speeds to additionally add
# from investigations: ss < 30 are all darker, but the darkness is the same. e.g: ss=5,10,15,20,25 are all the same brightness=Shutter_speed
# with ss <5 µs (because < 20 µs) one can request the minimum posisble shutter speed
# it was found, that ss is adjustable in 20 µs-steps from 30 µs ongoing: [10 µs, 30 µs, 50 µs] in this range
# retrieved ss: 9, 28, 47, 66, 85, 104 µs
shutter_speeds = list()
#for ss in range(START_SHUTTER_SPEED, END_SHUTTER_SPEED+STEP_SHUTTER_SPEED, STEP_SHUTTER_SPEED):
# shutter_speeds.append(ss)
#shutter_speeds = shutter_speeds + shutter_speeds_add # concatenate lists
shutter_speeds = shutter_speeds_add
shutter_speeds.sort() # sort list
# camera_resolutions = [(192, 144), (416,320), (640,480), (960,720), (1280,960), (1648,1232)]
camera_resolutions = [(192, 144)]
iso_values = [320]
# Define camera settings
if CAM_NR == "03":
SENSOR_MODE = 4 # corresponding sensor mode to resolution
OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308)
# Camera informations for log
CAM_DESCRIPTION = "Raspberry Pi Camera Module V2"
CAM_EAN = "506021437024020"
elif CAM_NR == "04":
SENSOR_MODE = 4 # corresponding sensor mode to resolution
# Camera informations for log
CAM_DESCRIPTION = "Raspberry Pi NoIR Camera Module V2"
CAM_EAN = "0640522710898"
elif CAM_NR == ("01"):
SENSOR_MODE = 4 # corresponding sensor mode to resolution --> gibt es sowas für camera 1? Ja gleicher Sensor wie Raspberry Pi V1 sensor!
# Camera informations for log
CAM_DESCRIPTION = "RPI-SPYCAM"
CAM_EAN = "4251266701743 "
elif CAM_NR == ("02"):
SENSOR_MODE = 4 # corresponding sensor mode to resolution --> gibt es sowas für camera 2? --> Ja gleicher Sensor wie Raspberry Pi V1 sensor!
# Camera informations for log
CAM_DESCRIPTION = "RB-Camera_JT"
CAM_EAN = "4250236815909"
AWB_MODE = 'off' # Auto white balance mode
# AWB_GAINS = (Fraction(485, 256), Fraction(397, 256)) # White Balance Gains to have colours read correctly: (red, blue)
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
#the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# Dictionary for color patterns
# r: red, g: green, b: blue, y: yellow, m: magenta, c: cyan
dict_color_pattern = {
"01": "rgb",
"02": "rgy",
"03": "rgm",
"04": "rgc",
"05": "rby",
"06": "rbm",
"07": "rbc",
"08": "rym",
"09": "ryc",
"10": "rmc",
"11": "gby",
"12": "gbm",
"13": "gbc",
"14": "gym",
"15": "gyc",
"16": "gmc",
"17": "bym",
"18": "byc",
"19": "bmc",
"20": "ymc",
}
# Define Funcions
def take_image_picamera_opencv(shutter_speed, iso, resolution, led_rgb_value):
colour_string = led_rgb_value
color_substring_list = [str(x).zfill(2) for x in range(1,21)] # string list to check for color pattern
# Initialise Camera
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = resolution # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = iso # Iso must be set prior fixing the gains (prior setting exposure_mode to off)
camera.shutter_speed = shutter_speed
# it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program.
# The gains (analog, digital) will adjust to this set up.
# After the gains are fixed, they will never change! even if you change the shutter speed during the runtime.
# To get consistent brightness values, set the right shutter speed at initalisation once.
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
# digital_gain: The value represents the digital gain the camera applies after conversion of the sensors analog output.
# analog_gain: The value represents the analog gain of the sensor prior to digital conversion.
# digital and analog gain can not be set from the user.
camera.exposure_mode = EXPOSURE_MODE
# exposure_mode value 'off' overrides the ISO setting.
# For example, 'off' fixes analog_gain and digital_gain entirely,
# preventing this property from adjusting them when set.
time.sleep(1)
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
# camera.start_preview()
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
exposure_speed = camera.exposure_speed # settings have to be applied before retrieving the right exposure_speed
# cv.namedWindow("Current Frame", cv.WINDOW_NORMAL)
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
image = frame.array # raw NumPy array without JPEG encoding
#display_image_with_text(image, shutter_speed, framenumber) # show the frame
framenumber_to_save = 15 # save frame 15 (no particular reason for frame 15)
if framenumber == framenumber_to_save: # save desired frame
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M-%S")
print(f"Take picture! iso: {iso} shutter speed: {shutter_speed} µs")
if "-" in colour_string:
cv.imwrite(f"{path_saveFolder}/RGB {colour_string}_ss{shutter_speed}_es{exposure_speed}_iso{iso}_b{BRIGHTNESS}_c{CONTRAST}_res{resolution[0]}x{resolution[1]}_Date {d1}.png", image)
elif any(substring in colour_string for substring in color_substring_list):
cv.imwrite(f"{path_saveFolder}/cp {colour_string}-{dict_color_pattern[colour_string]}_ss{shutter_speed}_es{exposure_speed}_iso{iso}_b{BRIGHTNESS}_c{CONTRAST}_res{resolution[0]}x{resolution[1]}_Date {d1}.png", image)
break # break from the loop, because we took the image we wanted
output.truncate(0) # clear the stream for next frame
# Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented.
# if q is pressed, break from loop.
# if cv.waitKey(1) & 0xff == ord('q'):
# break
def take_image_series_onecolourchannel_per_led(led_rgb_value):
"""Take image series in one specified colour."""
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for i, resolution in enumerate(camera_resolutions):
print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}')
for ss in shutter_speeds:
for iso in iso_values:
take_image_picamera_opencv(ss, iso, resolution, led_rgb_value)
def take_image_series_rgb(brightness):
"""Take image series. One colour per series."""
t1 = time.perf_counter()
print(f'switchting to red')
led_rgb_value = f"{brightness}-000-000"
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for i, resolution in enumerate(camera_resolutions):
print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}')
for ss in shutter_speeds:
for iso in iso_values:
take_image_picamera_opencv(ss, iso, resolution, led_rgb_value)
print('switchting to green')
led_rgb_value = f"000-{brightness}-000"
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for i, resolution in enumerate(camera_resolutions):
print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}')
for ss in shutter_speeds:
for iso in iso_values:
take_image_picamera_opencv(ss, iso, resolution, led_rgb_value)
print('switchting to blue')
led_rgb_value = f"000-000-{brightness}"
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for i, resolution in enumerate(camera_resolutions):
print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}')
for ss in shutter_speeds:
for iso in iso_values:
take_image_picamera_opencv(ss, iso, resolution, led_rgb_value)
t2 = time.perf_counter()
elapsed_time = round((t2-t1)/60,2)
print(f'series_rgb finished in {elapsed_time} min')
def take_image_series_ymc(brightness):
"""Take image series. One colour per series."""
t1 = time.perf_counter()
print(f'switchting to yellow')
led_rgb_value = f"{brightness}-{brightness}-000"
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for i, resolution in enumerate(camera_resolutions):
print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}')
for ss in shutter_speeds:
for iso in iso_values:
take_image_picamera_opencv(ss, iso, resolution, led_rgb_value)
print('switchting to magenta')
led_rgb_value = f"{brightness}-000-{brightness}"
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for i, resolution in enumerate(camera_resolutions):
print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}')
for ss in shutter_speeds:
for iso in iso_values:
take_image_picamera_opencv(ss, iso, resolution, led_rgb_value)
print('switchting to cyan')
led_rgb_value = f"000-{brightness}-{brightness}"
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for i, resolution in enumerate(camera_resolutions):
print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}')
for ss in shutter_speeds:
for iso in iso_values:
take_image_picamera_opencv(ss, iso, resolution, led_rgb_value)
t2 = time.perf_counter()
elapsed_time = round((t2-t1)/60,2)
print(f'series_rgb finished in {elapsed_time} min')
def take_image_series_twocolourchannels_per_led(brightness):
"""Take image series. Two colours per series."""
# All posibilities with R G B: RG, RB, GB
print(f'switchting to red and green')
led_rgb_value = f"{brightness}-{brightness}-000"
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for ss in range(START_SHUTTER_SPEED, END_SHUTTER_SPEED+STEP_SHUTTER_SPEED, STEP_SHUTTER_SPEED):
take_image_picamera_opencv(ss, led_rgb_value)
print('switchting to red and blue')
led_rgb_value = f"{brightness}-000-{brightness}"
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for ss in range(START_SHUTTER_SPEED, END_SHUTTER_SPEED+STEP_SHUTTER_SPEED, STEP_SHUTTER_SPEED):
take_image_picamera_opencv(ss, led_rgb_value)
print('switchting to green and blue')
led_rgb_value = f"000-{brightness}-{brightness}"
s.write(str.encode(led_rgb_value)); response = s.readline(); print(response)
for ss in range(START_SHUTTER_SPEED, END_SHUTTER_SPEED+STEP_SHUTTER_SPEED, STEP_SHUTTER_SPEED):
take_image_picamera_opencv(ss, led_rgb_value)
def takeimage_all_color_patterns():
"""Take images of all 20 color patterns including orientation """
"The brightness is hard coded to 255 in the Arduino"
t1 = time.perf_counter()
print(f'take images for all 20 color patterns')
for number_of_color_pattern in range(1,21):
number_of_color_pattern = str(number_of_color_pattern).zfill(2)
print(f"color pattern no: {number_of_color_pattern}: {dict_color_pattern[number_of_color_pattern]}")
s.write(str.encode(f"color_pattern_{number_of_color_pattern}"))
for i, resolution in enumerate(camera_resolutions):
print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}')
for ss in shutter_speeds:
for iso in iso_values:
take_image_picamera_opencv(ss, iso, resolution, number_of_color_pattern)
t2 = time.perf_counter()
elapsed_time = round((t2-t1)/60,2)
print(f'series_rgb finished in {elapsed_time} min')
def display_image_with_text(img, shutter_speed, framenumber):
img = img.copy() # make copy of image and do not modify the original image
font = cv.FONT_HERSHEY_SIMPLEX # font
fontScale = 1 # fontScale
color = (255, 255, 255) # Font colour in BGR
thickness = 1 # Line thickness in px
# set text position
frame_width = int(img.shape[1])
frame_height = int(img.shape[0])
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
text_linespacing = 50 # line spacing between two strings in pixels
# set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image.
pos_1 = (int(frame_width/4), text_start_position_Y) # start text from 1/4 of image width
pos_2 = (int(frame_width/4), text_start_position_Y+text_linespacing) # start text from 1/4 of image width
# define text to display
text_line_1 = f"Shutterspeed: {shutter_speed} us"
text_line_2 = f"Frame: {framenumber}"
# put the text into the image
image_text_1 = cv.putText(img, text_line_1, pos_1, font,
fontScale, color, thickness, cv.LINE_AA)
image_text_2 = cv.putText(img, text_line_2, pos_2, font,
fontScale, color, thickness, cv.LINE_AA)
cv.imshow("Current Frame", img) # display the image
# Start Script
# Create folder for saving the captured pictures
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M")
path_cwd = os.getcwd()
# path_saveFolder = path_cwd+r"/Solarradiation_"+d1
path_saveFolder = path_cwd+r"/series_"+d1
try:
os.mkdir(path_saveFolder)
lines = [f"CAMERA NR: {CAM_NR}", f"CAMERA DESCRIPTION: {CAM_DESCRIPTION}", f"CAMERA EAN: {CAM_EAN}",
f"\n#Camera settings:", f"SENSOR MODE: {SENSOR_MODE}",
f"FRAMERATE: {FRAMERATE} # frames per second",
f"AWB_MODE: {AWB_MODE}", f"AWB_GAINS (red, blue): {AWB_GAINS}",
f"Brightness: {BRIGHTNESS}", f"Contrast: {CONTRAST}",
f"EXPOSURE_MODE: {EXPOSURE_MODE}",
f"\nCOMMENT: {COMMENT}"]
with open(os.path.join(path_saveFolder, "log.txt"), "w") as f:
f.write("\n".join(lines))
except OSError:
print("Error! Ending script. Try it again in 1 minute")
quit()
# Turn on LEDs
s = serial.Serial("/dev/ttyACM0", 9600) # Name of USB-Port may vary (here: ACM0)
time.sleep(5) # der Arduino resettet nach einer Seriellen Verbindung, daher muss kurz gewartet werden
# start capture series for different shutter speeds
print('start caputure series...')
# s.write(str.encode("000-255-000")); response = s.readline(); print(response)
# take_image_picamera_opencv(shutter_speed=200, led_rgb_value="000-255-000")
# take image series for two colour channels per led
# for brgns in range(50,260,50):
# brgns=str(brgns).zfill(3)
# take_image_series_twocolourchannels_per_led(brgns) #Take Image Series for every color in given brightness
# take_image_series_twocolourchannels_per_led(255) #Take Image Series for every color in given brightness
# # take image series for white colour
# for brgns in range(50,260,50):
# brgns=f"{brgns}".zfill(3)
# ledcolour=f"{brgns}-{brgns}-{brgns}"
# take_image_series_onecolourchannel_per_led(ledcolour)
# take_image_series_onecolourchannel_per_led("255-255-255")
# --------------------------------------------------------------------------------------------------------------------------------------------
# # 03.02.2022: Take images with multiple shutter speeds but in brightness 255 for determining colour falsification to the edges of the image
# take_image_series_rgb(255)
# take_image_series_ymc(255)
# take_image_series_onecolourchannel_per_led("255-255-255")
# --------------------------------------------------------------------------------------------------------------------------------------------
# # 26.02.2022: Take images with multiple shutter speeds and brightness for investigating the influence of the solar radiation on the images
# # take image series for rgb
# for brgns in range(50,260,50):
# brgns=str(brgns).zfill(3)
# take_image_series_ymc(brgns) #Take Image Series for every color in given brightness
# take_image_series_ymc(255) #Take Image Series for every color in given brightness
# # take image series for ymc
# for brgns in range(50,260,50):
# brgns=str(brgns).zfill(3)
# take_image_series_rgb(brgns) #Take Image Series for every color in given brightness
# take_image_series_rgb(255) #Take Image Series for every color in given brightness
# # take image series for white colour
# for brgns in range(50,260,50):
# brgns=f"{brgns}".zfill(3)
# ledcolour=f"{brgns}-{brgns}-{brgns}"
# take_image_series_onecolourchannel_per_led(ledcolour)
# take_image_series_onecolourchannel_per_led("255-255-255")
# --------------------------------------------------------------------------------------------------------------------------------------------
# 10.04.2022: Take images with multiple shutter speeds for investigating the influence of the solar radiation on the images
# take image series for rgb
take_image_series_rgb(255) #Take Image Series for every color in given brightness
# take image series for ymc
take_image_series_ymc(255) #Take Image Series for every color in given brightness
# take image series for white colour
take_image_series_onecolourchannel_per_led("255-255-255")
# --------------------------------------------------------------------------------------------------------------------------------------------
# 07.03.2022: Take images of all 20 color patterns with birghtness 255
takeimage_all_color_patterns()
# --------------------------------------------------------------------------------------------------------------------------------------------
# End Script
# Turn off LEDs
s.write(str.encode('000-000-000')); response = s.readline(); print(response)
s.close() #close serial port
cv.destroyAllWindows()
t1 = round(time.perf_counter()/60,2)
print(f'Script finished in {t1} min')

View File

@ -0,0 +1,2 @@
#Author: Kenan Gömek
python object_size.py --image $IMAGE_PATH$ --width $OBJECT_WIDTH$

View File

@ -0,0 +1,117 @@
#modified by Kenan Gömek
# Source: https://pyimagesearch.com/2016/03/28/measuring-size-of-objects-in-an-image-with-opencv/
# import the necessary packages
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
ap.add_argument("-w", "--width", type=float, required=True,
help="width of the left-most object in the image (in inches)")
args = vars(ap.parse_args())
# load the image, convert it to grayscale, and blur it slightly
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# sort the contours from left-to-right and initialize the
# 'pixels per metric' calibration variable
(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None
# loop over the contours individually
for c in cnts:
# if the contour is not sufficiently large, ignore it
if cv2.contourArea(c) < 100:
continue
# compute the rotated bounding box of the contour
orig = image.copy()
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear
# in top-left, top-right, bottom-right, and bottom-left
# order, then draw the outline of the rotated bounding
# box
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
# loop over the original points and draw them
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
# unpack the ordered bounding box, then compute the midpoint
# between the top-left and top-right coordinates, followed by
# the midpoint between bottom-left and bottom-right coordinates
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
# compute the midpoint between the top-left and top-right points,
# followed by the midpoint between the top-righ and bottom-right
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# draw the midpoints on the image
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# draw lines between the midpoints
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
(255, 0, 255), 2)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
(255, 0, 255), 2)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
# if the pixels per metric has not been initialized, then
# compute it as the ratio of pixels to supplied metric
# (in this case, inches)
if pixelsPerMetric is None:
pixelsPerMetric = dB / args["width"]
print(f"dB: {dB}")
w_kg = args["width"]
print(f"width: {w_kg}")
print(f"pixelsPerMetric: {pixelsPerMetric}")
# compute the size of the object
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
# draw the object sizes on the image
cv2.putText(orig, "{:.2f} mm".format(dimA),
(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 255, 255), 2)
cv2.putText(orig, "{:.2f} mm".format(dimB),
(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 255, 255), 2)
# show the output image
cv2.imshow("Image", orig)
cv2.waitKey(0)

View File

@ -0,0 +1,454 @@
# Creation Date: 01.03.2022
# Author: Kenan Gömek
# This program is for the development of the lane detection algorithm on a windows and Linux machine
# It uses images, which were captured previously
# V02 detects the lane by using the grayscale-image of the rgb-image
from turtle import position
import cv2 as cv
import numpy as np
import os
import time
import platform
# input
if platform.system() == "Windows":
folder_path=r"images_input"
if platform.system() == "Linux":
folder_path=r"/home/pi/Desktop/images_input"
# Parameters
pixels_per_mm = 32/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480
# Offset Camera Sensor in Scooty according to Scooty-KS
x_offset_camera_mm = 100 # [mm]
y_offset_camera_mm = 50 # [mm]
x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px]
y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px]
# image parameters: width > height
image_height = 320 # shape [0]
image_width = 416 # shape[1]
# calculate center of image
[x_0, y_0] = np.array([image_width/2, image_height/2], dtype=np.uint16)
threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel
# Parameters for LED Detection
minDiameter_mm = 3.75 # [mm] minimum diameter of detected blob/LED. Must be minimum >0 !
# maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED
# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16
color_number_off = 0
color_number_red = 1
color_number_green = 2
color_number_blue = 3
color_number_yellow = 4
color_number_magenta = 5
color_number_cyan = 6
color_number_white = 7
# Parameters for grayscale to binary conversion
binary_threshold = 15 # determined by testing and application
# the higher threshold is, the smaller the diameter of the led, because more high values are extracted
binary_maxval = 255 # values ofer threshold will be set to maxval
# Parameters for line fitting for lane construction
param = 0 # not used for DIST_L2
reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line).
aeps = 0.001 # Sufficient accuracy for the angle.
show_opencv_window = True # show opencv window
draw_opencv = True # draw lane and so on
print_additional_info = True
# calculations before start
# Filter blobs by Area --> not implemented anymore, because no need, because detection is good and less time for calculation needed
# more than good trade off!
minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED
# maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED
minArea_px2 = np.pi/4*minDiameter_px**2 # min Area of a blob in px^2
# maxArea_px2 = np.pi/4*maxDiameter_px**2
# minArea = minArea_px2 # min Area of a blob in px^2
# params.maxArea = maxArea_px2 # max Area of a blob in px^2.
# reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area
def points_trafo(detected_LEDs, alpha_rad, dx, dy):
"""Tranfsform points of LED to lane in KS-LED"""
detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer
detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow
x_pnts = detected_LEDs_trafo[:,0]
y_pnts = detected_LEDs_trafo[:,1]
# Translation
x1 = x_pnts-dx-x_0
x_trafo = x1
y1 = y_pnts-dy-y_0
y_trafo = y1
# Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton --> passive Drehung
# cos(-x)=cos(x)
# sin(-x)=-sin(x) --> -sin(-x) = - -sin(x) = sin(x)
# wegen besserer verständlichkeit nicht verinfachung angewendet
x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1
detected_LEDs_trafo[:,0] = x_trafo
y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1
detected_LEDs_trafo[:,1] = y_trafo
#sort points along lane: x_2, y_2 -axis (KS_LED)
detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')]
return detected_LEDs_trafo
def construct_lane(detected_LEDs, img_bgr):
"""construct the lane"""
# This function is partially commented in german, because higher math is used
# clearer what is trying to be achieved
# get points
# xy_pnts = detected_LEDs[:,0:2]
# x_pnts = detected_LEDs[:,0]
# y_pnts = detected_LEDs[:,1]
# approach 2:
# fit line through centers of LEDs in KS_0
# DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance
[dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps)
# x_2, y_2: point on the line --> mean of leds --> point in the middle of the leds
# x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1)
alpha_rad = np.arctan2(dy, dx) # calculate angle of line
alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line
# print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°")
if print_additional_info:
alpha_print = alpha[0]
alpha_print = float("{0:.2f}".format(alpha_print))
print(f"Alpha: {alpha_print}°")
# get smallest distance to point an line
# Berechnung nach: Repetitorium Höhere Mathematik, Wirth
# Gerade: x = a+ t*b
# Punkt : OP = p
# d = abs(b x (p-a))/(abs(b))
# info: np.array()[:,0] --> gets only array with 1 dimensions with desired values
p = np.array([x_0, y_0])
a = np.array([x_2, y_2])[:,0]
b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor
c = p-a
# Betrag von Vektor: np.linalg.norm(vec)
cross= np.cross(b, c)
d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px]
#print(f"d: {round(d,2)}")
# Fußpunkt x_FP=(X_LED, Y_LED)
t_0_dot = np.dot(c, b)
t_0_norm = (np.linalg.norm(b)**2)
t_0 = t_0_dot/t_0_norm
[x_LED, y_LED] = (a+t_0*b)
if print_additional_info:
# convert float32, round and prepare for printing string
x_LED_print = float("{0:.2f}".format(x_LED))
y_LED_print = float("{0:.2f}".format(y_LED))
print(f"x_LED: {x_LED_print} [px], y_LED: {y_LED_print} [px]")
# Abstand (dx_LED, dy_LED) Fußpunkt x_FP zu KS_0 (P)
dx_LED = x_LED - x_0
dx_LED_mm = dx_LED*(1/pixels_per_mm)
dy_LED = y_LED - y_0
dy_LED_mm = dy_LED*(1/pixels_per_mm)
if print_additional_info:
# convert float32, round and prepare for printing string
dx_LED_print = float("{0:.2f}".format(dx_LED))
dy_LED_print = float("{0:.2f}".format(dy_LED))
dx_LED_mm_print = float("{0:.2f}".format(dx_LED_mm))
dy_LED_mm_print = float("{0:.2f}".format(dy_LED_mm))
print(f"dx_LED: {dx_LED_print} [px] , dy_LED: {dy_LED_print} [px]")
print(f"dx_LED: {dx_LED_mm_print} [mm] , dy_LED: {dy_LED_mm_print} [mm]")
# Abstand (dx, dy) Fußpunkt x_FP von Bildmitte zu KS_Scooty
# Diese Werte zurückgeben
dx_LED_scooty = x_LED - x_0 + x_offset_camera_px
dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm)
dy_LED_scooty = y_LED - y_0 + y_offset_camera_px
dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED_scooty: {round(dx_LED_scooty,2)} [px] , dy_LED_scooty: {round(dy_LED_scooty,2)} [px]")
print(f"dx_LED_scooty: {round(dx_LED_scooty_mm,2)} [mm] , dy_LED_scooty: {round(dy_LED_scooty_mm,2)} [mm]")
# Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten
# Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen
detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED)
if print_additional_info:
print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}")
#-----------------------------------
# draw useful lines and points
# draw lane line
if draw_opencv:
pt_0 = (a+b*np.array([-300, -300])).astype(np.int32)
pt_1 = (a+b*np.array([300, 300])).astype(np.int32)
#print(f"pt_0: {pt_0}, pt_1: {pt_1}")
cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane
# draw dx dy
cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot
# cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx
# cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy
#draw additional points
cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points
#cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA)
cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt
if show_opencv_window:
cv.imshow("Lane", img_bgr)
return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED
def convert_rgb_to_grayscale_average(image_bgr):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_bgr = image_bgr.astype(np.uint16, copy=False)
image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8
return image_gray
def get_color_of_leds(matrix_of_LEDs, image_bgr):
"""Determine color of LEDs at positions and add to matrix"""
# is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels.
offset = 1 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led
# offset=0 --> only the value from the middle point of the blob/led
# offset=1 --> 9 values, offset=2-->25 values
for led in matrix_of_LEDs:
x_pos = led[0] # uint16
y_pos = led[1] # uint16
# get values of color channels in region around middle point of blob/led:
# +1 at stop index, because it is not inclusive
region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8
region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8
region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8
# average of the values
# convert dtype to prevent integer overflow while addition
region_around_red_led = region_around_red_led.astype(np.uint16, copy=False)
region_around_green_led = region_around_green_led.astype(np.uint16, copy=False)
region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False)
# sum all elements in matrix and divide with number of elements
number_of_elements= region_around_blue_led.size
value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result
value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result
value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result
# determine which leds are active:
# if value > threshold --> led is active
status_blue_led = False; status_green_led = False; status_red_led = False
if value_of_blue_led > threshold_color_detection:
status_blue_led = True
if value_of_green_led > threshold_color_detection:
status_green_led = True
if value_of_red_led > threshold_color_detection:
status_red_led = True
# determine color by checking the cases:
# case 1: red
if status_blue_led==False and status_green_led==False and status_red_led==True:
color = color_number_red
# case 2: green
elif status_blue_led==False and status_green_led==True and status_red_led==False:
color = color_number_green
# case 3: blue
elif status_blue_led==True and status_green_led==False and status_red_led==False:
color = color_number_blue
# case 4: yellow = red + green
elif status_blue_led==False and status_green_led==True and status_red_led==True:
color = color_number_yellow
# case 5: magenta = red + blue
elif status_blue_led==True and status_green_led==False and status_red_led==True:
color = color_number_magenta
# case 6: cyan = green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==False:
color = color_number_cyan
# case 7: white = red + green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==True:
color = color_number_white
# case 8: led not active
# this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale
else:
color = color_number_off
# fill matrix with color
led[2] = color # uint16
return matrix_of_LEDs
def get_position_of_LEDs_contours(image_gray, image_bgr):
# create binary image
ret, image_binary = cv.threshold(image_gray, binary_threshold, binary_maxval, cv.THRESH_BINARY)
# find contours
contour_retrieval_algorithm = cv.RETR_EXTERNAL # retrieves only the extreme outer contours
contours, hierarchy = cv.findContours(image_binary, contour_retrieval_algorithm, cv.CHAIN_APPROX_SIMPLE)
# analyse contours
number_of_detected_contours = len(contours)
if number_of_detected_contours != 0:
# centroid of contours
# Pre-allocate matrix for numpy
number_of_rows = 0
number_of_columns = 3
position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) #empty: []
if draw_opencv:
image_bgr_contours = image_bgr.copy() # create copy of image
# copy is needed to draw on, because else the color of
# the circle is detected als LED-Color
number_of_detected_LEDs = 0
for i, cnt in enumerate(contours):
area = cv.contourArea(cnt)
# diameter = 2*np.sqrt(area/np.pi)
# diameter_mm = diameter*(1/pixels_per_mm)
# print(f"area: {area} [px^2], diameter: {diameter} [px], diamter: {diameter_mm} [mm]")
# Filter contours by area. minimum Area needs to be at least >0 !
if area > minArea_px2:
M = cv.moments(cnt)
number_of_detected_LEDs += 1
# prevent zero division
if M['m00']==0:
cx = 0
cy = 0
else:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
#print(cx, cy)
# add positions to matrix
x_pos = int(cx) # x positon
y_pos = int(cy) # y position
position_of_leds = np.vstack((position_of_leds, \
np.array([x_pos, y_pos, color_number_off], dtype=np.uint16))) # vstack: row wise
# draw centroids
if draw_opencv:
radius = 2
color = (255,255,255)
thickness = -1 # filled
cv.circle(image_bgr_contours,(cx,cy), radius, color, thickness)
if number_of_detected_LEDs != 0:
if print_additional_info:
print(f"detected LEDs: {number_of_detected_LEDs}")
if draw_opencv:
# draw contours
contours_to_pass = -1 # pass all contours
color_of_contour = (255,255,255)
line_thickness = 1
cv.drawContours(image_bgr_contours, contours, contours_to_pass, color_of_contour, line_thickness)
if show_opencv_window:
cv.imshow("binary", image_binary)
cv.imshow("Contours", image_bgr_contours)
return position_of_leds
else:
if print_additional_info:
print(f"No LEDs were detected")
return None
else:
if print_additional_info:
print(f"No contours were detected")
return None
def detect_position_of_LEDs(image_bgr):
# convert rgb to grayscale
image_gray = convert_rgb_to_grayscale_average(image_bgr)
if show_opencv_window:
cv.imshow("grayscale", image_gray)
# get position of leds
position_of_LEDs = get_position_of_LEDs_contours(image_gray, image_bgr)
# position_of_LEDs = None
return position_of_LEDs
def lane_detection(image_bgr):
# Detect LEDs
if print_additional_info:
print(f"Detect LEDs and color:")
position_of_LEDs = detect_position_of_LEDs(image_bgr)
# detected_LEDs = None # only that following code is not being executet for development
# Get color of leds
if position_of_LEDs is not None:
detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr)
# print result
if print_additional_info:
print(f"Detected LEDs in KS_Sensor (x,y):\n{detected_LEDs}")
else:
detected_LEDs = None
# Contruct lane
if detected_LEDs is not None:
if print_additional_info:
print("\nContruct lane with consideration of camera offset:")
dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \
construct_lane(detected_LEDs, image_bgr)
return detected_LEDs
else:
return None
def main():
filenames_of_images = [f for f in os.listdir(folder_path) if f.endswith('.png')]
for i, filename_of_image in enumerate(filenames_of_images):
if print_additional_info:
print(f"image {i+1}/{len(filenames_of_images)}:{filename_of_image}")
full_path_of_image = os.path.join(folder_path, filename_of_image)
image_bgr = cv.imread(full_path_of_image, cv.IMREAD_COLOR) # load original image
start_processing = time.perf_counter()
detected_LEDs = lane_detection(image_bgr)
end_processing = time.perf_counter()
time_processing = end_processing-start_processing
time_processing = time_processing*1000
time_processing = round(time_processing, 2)
print(f'processing time: {time_processing} ms')
#print(f"_____________________________________")
# show images:
# cv.imshow("Blue channel", image_b)
# cv.imshow("Green channel", image_g)
# cv.imshow("Red channel", image_r)
if show_opencv_window:
pressed_key = cv.waitKey(0) & 0xff # display and wait if a key is pressed and then continue
if pressed_key == ord('q'):
exit()
cv.destroyAllWindows()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,254 @@
# Creation Date: 01.03.2022
# Author: Kenan Gömek
# This program is for the development of the lane detection algorithm on a windows machine
# It uses images, which were captured previously
import cv2 as cv
import numpy as np
import os
# input
folder_path=r"U:\bwsyncshare\images_input"
# Parameters
pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480
cut_off_brightness_grayscale = 45 # 15 pixels with a value less than this will bet set to zero (black)
# Parameters for HoughCircles
dp = 1 # Inverse ratio of the accumulator resolution to the image resolution. For example, if dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has half as big width and height.
minDist_mm = 10 # [mm] minimal distance between two circles
minDist_px = int(minDist_mm*pixels_per_mm) # in [px] Minimum distance in px between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed.
minRadius_mm = 5 # [mm] minimum radius of a circle
minRadius_px = int(minRadius_mm*pixels_per_mm) # [px] Minimum circle radius.
maxRadius_mm = 7 # [mm] maximum radius of a circle
maxRadius_px = int(maxRadius_mm*pixels_per_mm) # [px] Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, returns centers without finding the radius.
param1 = 100 # 30 First method-specific parameter. In case of HOUGH_GRADIENT , it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
# If circles/LEDs should be detected at low shutter speeds, than lower this value
# Upper threshold for the internal Canny edge detector.
# "Gradient value between dark and white"
param2 = 5 # 12 Second method-specific parameter. In case of HOUGH_GRADIENT , it is the accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first.
# By increasing this threshold value, we can ensure that only the best circles, corresponding to larger accumulator values, are returned.
# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16
color_number_red = 1
color_number_green = 2
color_number_blue = 3
def preprocess_grayscale_image_1(image_gray_copy, color_channel):
# Set all pixels with a value less than "cut_off_brightness_grayscale" to zero
image_gray_cutoff = image_gray_copy.copy()
image_gray_cutoff[image_gray_cutoff < cut_off_brightness_grayscale]=0
cv.imshow(f"{color_channel} channel cutoff", image_gray_cutoff[150:,:])
preprocessed_image = image_gray_cutoff
return preprocessed_image
def preprocess_grayscale_image_2(image_gray_copy, color_channel):
# Set all pixels with a value less than "cut_off_brightness_grayscale" to zero
# This prevents the false detection of color-channels which are not intended to be detected
image_gray_cutoff = image_gray_copy.copy()
image_gray_cutoff[image_gray_cutoff < cut_off_brightness_grayscale]=0
# cv.imshow(f"{color_channel} channel grayscale cutoff", image_gray_cutoff[150:,:])
# For better accuracy, binary images are used before finding contours --> apply treshold
image_gray_binary = image_gray_cutoff
perc = np.percentile(image_gray_binary, 95) # calculate 95 % percentile
ret, tresh=cv.threshold(image_gray_binary, perc, 255, cv.THRESH_BINARY) # set pixels above specified percentile to 255, the resto to 0
preprocessed_image = tresh
return preprocessed_image
def preprocess_grayscale_image_3(image_gray_copy, color_channel):
# Set all pixels with a value less than "cut_off_brightness_grayscale" to zero
# This prevents the false detection of color-channels which are not intended to be detected
image_gray_cutoff = image_gray_copy.copy()
image_gray_cutoff[image_gray_cutoff < cut_off_brightness_grayscale]=0
perc = np.percentile(image_gray_cutoff, 95) # calculate 95 % percentile
image_gray_cutoff[image_gray_cutoff < perc]=0
cv.imshow(f"{color_channel} channel grayscale cutoff", image_gray_cutoff[150:,:])
# For better accuracy, binary images are used before finding contours --> apply treshold
image_gray_binary = image_gray_cutoff
perc = np.percentile(image_gray_binary, 95) # calculate 95 % percentile
ret, tresh=cv.threshold(image_gray_binary, perc, 255, cv.THRESH_BINARY) # set pixels above specified percentile to 255, the resto to 0
preprocessed_image = image_gray_cutoff
return preprocessed_image
def preprocess_grayscale_image_4_cannyedge(image_gray_copy, color_channel):
# Set all pixels with a value less than "cut_off_brightness_grayscale" to zero
# This prevents the false detection of color-channels which are not intended to be detected
t_lower = 1
t_upper = 5
edge = cv.Canny(image_gray_copy, t_lower, t_upper)
cv.imshow(f"{color_channel} channel edge", edge[150:,:])
preprocessed_image = edge
return preprocessed_image
def detect_LEDs_in_color_channel(image_gray, color_channel, image_gray_copy, image_bgr_copy):
# preprocessed_gray_image = preprocess_grayscale_image_1(image_gray_copy=image_gray_copy, color_channel=color_channel)
preprocessed_gray_image = preprocess_grayscale_image_2(image_gray_copy=image_gray_copy, color_channel=color_channel)
# preprocessed_gray_image = preprocess_grayscale_image_3(image_gray_copy=image_gray_copy, color_channel=color_channel)
# preprocessed_gray_image = preprocess_grayscale_image_4_cannyedge(image_gray_copy=image_gray_copy, color_channel=color_channel)
# preprocessed_gray_image = image_gray_copy
detected_LEDs = cv.HoughCircles(preprocessed_gray_image, cv.HOUGH_GRADIENT, dp=dp, minDist = minDist_px
, param1=param1, param2=param2, minRadius=minRadius_px, maxRadius=maxRadius_px)
# specify color number, for adding to matrix of detected LEDs
if color_channel == "blue":
color_number = color_number_blue
elif color_channel == "green":
color_number = color_number_green
elif color_channel == "red":
color_number = color_number_red
# check if at least one circle was found in the image
if detected_LEDs is not None:
detected_LEDs = np.uint16(np.round(detected_LEDs)) # convert the (x, y) coordinates and radius of the circles to integers
detected_LEDs = detected_LEDs[0,:]
detected_LEDs=np.hstack((detected_LEDs, np.full((detected_LEDs.shape[0],1), color_number, dtype=np.uint16)))
# matrix with columns: x, y, r
number_of_detected_LEDs = detected_LEDs.shape[0]
print(f"detected {color_channel} LEDs: {number_of_detected_LEDs}")
# paramters for drawing
line_thickness = 1
circle_color = (0,255,0)
vertex_offset = 2
rectangle_color = (0,128,255) # R G B
for (x, y, r, cn) in detected_LEDs:
print(f"x:{x} px, y:{y} px, r:{r} px, r:{round(r*1/(pixels_per_mm),2)} mm, D: {round(2*r*1/(pixels_per_mm),2)} mm, color: {color_channel}")
cv.circle(image_bgr_copy, (x, y), r, circle_color, thickness=line_thickness) # draw detected circumference of the cirle
cv.circle(image_gray_copy, (x, y), r, circle_color, thickness=line_thickness) # draw detected circumference of the cirle
cv.circle(preprocessed_gray_image, (x, y), r, circle_color, thickness=1) # draw detected circumference of the cirle
cv.rectangle(img=image_bgr_copy, pt1=(x-vertex_offset, y-vertex_offset), pt2=(x+vertex_offset, y+vertex_offset), \
color=rectangle_color, thickness=cv.FILLED)
cv.rectangle(img=image_gray_copy, pt1=(x-vertex_offset, y-vertex_offset), pt2=(x+vertex_offset, y+vertex_offset), \
color=rectangle_color, thickness=cv.FILLED)
cv.rectangle(img=preprocessed_gray_image, pt1=(x-vertex_offset, y-vertex_offset), pt2=(x+vertex_offset, y+vertex_offset), \
color=rectangle_color, thickness=cv.FILLED)
cv.imshow(f"{color_channel} channel binary", preprocessed_gray_image[150:,:])
return detected_LEDs
else:
print(f"No {color_channel} LEDs were found in the image")
return None
def detect_blue_LEDs(image_colorchannel_gray, image_colorchannel_gray_copy, image_bgr_copy):
color_channel = "blue"
detected_LEDs_blue = detect_LEDs_in_color_channel(image_gray=image_colorchannel_gray, color_channel=color_channel, \
image_gray_copy=image_colorchannel_gray_copy, image_bgr_copy=image_bgr_copy)
if detected_LEDs_blue is not None:
return detected_LEDs_blue
else:
return None
def detect_green_LEDs(image_colorchannel_gray, image_colorchannel_gray_copy, image_bgr_copy):
color_channel = "green"
detected_LEDs_green = detect_LEDs_in_color_channel(image_gray=image_colorchannel_gray, color_channel=color_channel, \
image_gray_copy=image_colorchannel_gray_copy, image_bgr_copy=image_bgr_copy)
if detected_LEDs_green is not None:
return detected_LEDs_green
else:
return None
def detect_red_LEDs(image_colorchannel_gray, image_colorchannel_gray_copy, image_bgr_copy):
color_channel = "red"
detected_LEDs_red = detect_LEDs_in_color_channel(image_gray=image_colorchannel_gray, color_channel=color_channel, \
image_gray_copy=image_colorchannel_gray_copy, image_bgr_copy=image_bgr_copy)
if detected_LEDs_red is not None:
return detected_LEDs_red
else:
return None
def detect_LEDs(image_b, image_g, image_r, image_b_copy, image_g_copy, image_r_copy, image_bgr_copy):
detected_LEDs_blue = detect_blue_LEDs(image_colorchannel_gray=image_b, image_colorchannel_gray_copy=image_b_copy, image_bgr_copy=image_bgr_copy)
detected_LEDs_green = detect_green_LEDs(image_colorchannel_gray=image_g, image_colorchannel_gray_copy=image_g_copy, image_bgr_copy=image_bgr_copy)
detected_LEDs_red = detect_red_LEDs(image_colorchannel_gray=image_r, image_colorchannel_gray_copy=image_r_copy, image_bgr_copy=image_bgr_copy)
# check the cases:
# case 1: r
if detected_LEDs_blue is None and detected_LEDs_green is None and detected_LEDs_red is not None:
detected_LEDs = detected_LEDs_red
# case 2: g
elif detected_LEDs_blue is None and detected_LEDs_green is not None and detected_LEDs_red is None:
detected_LEDs = detected_LEDs_green
# case 3: b
elif detected_LEDs_blue is not None and detected_LEDs_green is None and detected_LEDs_red is None:
detected_LEDs = detected_LEDs_blue
# case 4: y = r+g
elif detected_LEDs_blue is None and detected_LEDs_green is not None and detected_LEDs_red is not None:
detected_LEDs_all = np.vstack((detected_LEDs_red, detected_LEDs_green))
detected_LEDs = detected_LEDs_all
# case 5: m = r+b
elif detected_LEDs_blue is not None and detected_LEDs_green is None and detected_LEDs_red is not None:
detected_LEDs_all = np.vstack((detected_LEDs_red, detected_LEDs_blue))
detected_LEDs = detected_LEDs_all
# case 6: c = g+b
elif detected_LEDs_blue is not None and detected_LEDs_green is not None and detected_LEDs_red is None:
detected_LEDs_all = np.vstack((detected_LEDs_green, detected_LEDs_blue))
detected_LEDs = detected_LEDs_all
# case 7: w = r+g+b
elif detected_LEDs_blue is not None and detected_LEDs_green is not None and detected_LEDs_red is not None:
detected_LEDs_all = np.vstack((detected_LEDs_red, detected_LEDs_green, detected_LEDs_blue))
detected_LEDs = detected_LEDs_all
return detected_LEDs
def lane_detection(image_b, image_g, image_r, image_b_copy, image_g_copy, image_r_copy, image_bgr_copy):
# Detect LEDs
detected_LEDs = detect_LEDs(image_b, image_g, image_r, image_b_copy, image_g_copy, image_r_copy, image_bgr_copy)
return detected_LEDs
def main():
filenames_of_images = [f for f in os.listdir(folder_path) if f.endswith('.png')]
for i, filename_of_image in enumerate(filenames_of_images):
print(f"image:{filename_of_image}")
full_path_of_image = os.path.join(folder_path, filename_of_image)
image_bgr = cv.imread(full_path_of_image, cv.IMREAD_COLOR) # load original image
image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images
# create copy of images, to draw on them
image_bgr_copy = image_bgr.copy()
image_b_copy = image_b.copy()
image_g_copy = image_g.copy()
image_r_copy = image_r.copy()
detected_LEDs = lane_detection(image_b, image_g, image_r, image_b_copy, image_g_copy, image_r_copy, image_bgr_copy)
print(detected_LEDs)
print(f"_____________________________________")
# show images: only region of interest
cv.imshow("Original image", image_bgr[150:,:])
cv.imshow("All detected LEDs", image_bgr_copy[150:,:])
cv.imshow("Blue channel", image_b[150:,:])
cv.imshow("Blue channel detected", image_b_copy[150:,:])
cv.imshow("Green channel", image_g[150:,:])
cv.imshow("Green channel detected", image_g_copy[150:,:])
cv.imshow("Red channel", image_r[150:,:])
cv.imshow("Red channel detected", image_r_copy[150:,:])
cv.waitKey(0) # display and wait if a key is pressed and then continue
cv.destroyAllWindows()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,445 @@
# Creation Date: 01.03.2022
# Author: Kenan Gömek
# This program is for the development of the lane detection algorithm on a windows machine
# It uses images, which were captured previously
# V02 detects the lane by suing the grayscale-image of the rgb-image
import cv2 as cv
import numpy as np
import os
import time
import platform
# input
if platform.system() == "Windows":
folder_path=r"U:\bwsyncshare\SA\Software\Code\02_SA\04_Spurerkennung\images_input"
if platform.system() == "Linux":
folder_path=r"/home/pi/Desktop/Studienarbeit/04_Spurerkennung/images_input"
# Parameters
pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480
# Offset Camera Sensor in Scooty according to Scooty-KS
x_offset_camera_mm = 100 # [mm]
y_offset_camera_mm = 50 # [mm]
x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px]
y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px]
# image parameters: width > height
image_height = 320 # shape [0]
image_width = 416 # shape[1]
# calculate center of image
[x_0, y_0] = np.array([image_width/2, image_height/2], dtype=np.uint16)
threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel
# Parameters for Blob/LED Detection
minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED
maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED
# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16
color_number_off = 0
color_number_red = 1
color_number_green = 2
color_number_blue = 3
color_number_yellow = 4
color_number_magenta = 5
color_number_cyan = 6
color_number_white = 7
show_opencv_window = True # show opencv window
draw_opencv = True # draw lane and so on
print_additional_info = True
def points_trafo(detected_LEDs, alpha_rad, dx, dy):
"""Tranfsform points of LED to lane in KS-LED"""
detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer
detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow
x_pnts = detected_LEDs_trafo[:,0]
y_pnts = detected_LEDs_trafo[:,1]
# Translation
x1 = x_pnts-dx-x_0
x_trafo=x1
y1 = y_pnts-dy-y_0
y_trafo = y1
# Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton
x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1
detected_LEDs_trafo[:,0] = x_trafo
y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1
detected_LEDs_trafo[:,1] = y_trafo
#sort points along lane: x_2, y_2 -axis (KS_LED)
detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')]
return detected_LEDs_trafo
def construct_lane(detected_LEDs, img_bgr):
"""construct the lane"""
# This function is partially commented in german, because higher math is used
# clearer what is trying to be achieved
# get points
# xy_pnts = detected_LEDs[:,0:2]
# x_pnts = detected_LEDs[:,0]
# y_pnts = detected_LEDs[:,1]
# approach 2:
# fit line through centers of LEDs in KS_0
# DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance
param = 0 # not used for DIST_L2
reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line).
aeps = 0.001 # Sufficient accuracy for the angle.
[dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps)
# x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1)
alpha_rad = np.arctan2(dy, dx) # calculate angle of line
alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line
# print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°")
if print_additional_info:
print(f"Lane: alpha:{alpha[0]}°")
# get smallest distance to point an line
# Berechnung nach: Repetitorium Höhere Mathematik, Wirth
# Gerade: x = a+ t*b
# Punkt : OP = p
# d = abs(b x (p-a))/(abs(b))
# info: np.array()[:,0] --> gets only array with 1 dimensions with desired values
p = np.array([x_0, y_0])
a = np.array([x_2, y_2])[:,0]
b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor
c = p-a
# Betrag von Vektor: np.linalg.norm(vec)
cross= np.cross(b, c)
d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px]
#print(f"d: {round(d,2)}")
# Fußpunkt (X_LED, Y_LED)
t_0_dot = np.dot(c, b)
t_0_norm = (np.linalg.norm(b)**2)
t_0 = t_0_dot/t_0_norm
[x_LED, y_LED] = (a+t_0*b)
if print_additional_info:
print(f"x_LED: {x_LED}, y_LED: {y_LED}")
# Abstand (dx, dy) Fußpunkt zu KS_0
dx_LED = x_LED - x_0
dx_LED_mm = dx_LED*(1/pixels_per_mm)
dy_LED = y_LED - y_0
dy_LED_mm = dy_LED*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]")
print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]")
# Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty
# Diese Werte zurückgeben
dx_LED_scooty = x_LED - x_0 + x_offset_camera_px
dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm)
dy_LED_scooty = y_LED - y_0 + y_offset_camera_px
dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]")
print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]")
# Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten
# Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen
detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED)
if print_additional_info:
print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}")
#-----------------------------------
# draw useful lines and points
# draw lane line
if draw_opencv:
pt_0 = (a+b*np.array([-300, -300])).astype(np.int32)
pt_1 = (a+b*np.array([300, 300])).astype(np.int32)
#print(f"pt_0: {pt_0}, pt_1: {pt_1}")
cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane
# draw dx dy
cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot
# cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx
# cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy
#draw additional points
cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points
#cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA)
cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt
if show_opencv_window:
cv.imshow("Lane", img_bgr)
return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED
def convert_rgb_to_grayscale_average(image_bgr):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_bgr = image_bgr.astype(np.uint16, copy=False)
image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8
return image_gray
def create_detector(params_for_blob_detection):
detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters.
return detector
def define_parameters_for_blob_detection():
"""set parameters for simple blob detector"""
params = cv.SimpleBlobDetector_Params()
# Threshold for Convert the source image to binary images by applying thresholding
# with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive)
# with distance thresholdStep between neighboring thresholds.
# Since the Grayscale image is dark if only one color channel is active,
# the Threshold values have to be set like this.
# particularly the thresholdStep-Value has to be low
params.minThreshold=20 # reminder: this value is set for grayscale image
params.maxThreshold=255
params.thresholdStep=1
params.filterByColor=False # do not filter blobs by color
# Filter blobs by Area
params.filterByArea=True
minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED
maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED
minArea_px2 = np.pi/4*minDiameter_px**2
maxArea_px2 = np.pi/4*maxDiameter_px**2
params.minArea = minArea_px2 # min Area of a blob in px^2
# params.maxArea = maxArea_px2 # max Area of a blob in px^2.
# reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area
# Filter by Inertia
params.filterByInertia=False
params.minInertiaRatio = 0.2 # [0-1]
# Filter by Convexity
params.filterByConvexity=False
params.minConvexity = 0.2 # [0-1]
# Filter by Circularity
params.filterByCircularity=False
params.minCircularity = 0.4 # [0-1]
# params.minDistBetweenBlobs = minDist_px # this has no effect
return params
def detect_LED_positions_in_grayscale(image_gray, image_bgr, detector):
keypoints = detector.detect(image_gray) # Detect blobs --> LEDs
number_of_detected_leds = len(keypoints)
if number_of_detected_leds != 0:
# print information of keypoints
print(f"detected LEDs: {number_of_detected_leds}")
#Pre-allocate matrix for numpy
number_of_rows = number_of_detected_leds
number_of_columns = 3
position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16)
for i, k in enumerate(keypoints):
# x_pos = round(k.pt[0],0) # x position
# y_pos = round(k.pt[1],0) # y position
# print(f"x: {x_pos} y: {y_pos}")
# diameter_px = round(k.size,2)
# diameter_mm = round(diameter_px*1/pixels_per_mm,2)
# print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter
# area_px2 = round(np.pi/4*k.size**2,0) # area in px^2
# area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0)
# print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}")
# print('')
# calculate parameters to transfer to matrix
# x_pos = int(np.ceil(x_pos))
# y_pos = int(np.ceil(y_pos))
# Fill matrix
# position_of_leds[i,:] = [x_pos,y_pos, 0]
position_of_leds[i,0] = int(np.ceil(k.pt[0])) # x positon
position_of_leds[i,1] = int(np.ceil(k.pt[1])) # y position
if draw_opencv:
# draw the keypoints on the original image
# cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \
outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if show_opencv_window:
cv.imshow("grayscale", image_gray)
cv.imshow("Detected", blobs)
return position_of_leds
else:
print(f"No LEDs were detected")
return None
def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, detector):
position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, detector)
if position_of_LEDs is not None:
return position_of_LEDs
else:
return None
def get_color_of_leds(matrix_of_LEDs, image_bgr):
# is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels.
offset = 2 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led
# offset = 0 --> only the value from the middle point of the blob/led
# offset=1 --> 9 values, offset=2-->25 values
for led in matrix_of_LEDs:
x_pos = led[0] # uint16
y_pos = led[1] # uint16
# get values of color channels in region around middle point of blob/led:
# +1 at stop index, because it is not inclusive
region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8
region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8
region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8
# average of the values
# convert dtype to prevent integer overflow while addition
region_around_red_led = region_around_red_led.astype(np.uint16, copy=False)
region_around_green_led = region_around_green_led.astype(np.uint16, copy=False)
region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False)
# sum all elements in matrix and divide with number of elements
number_of_elements= region_around_blue_led.size
value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result
value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result
value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result
# determine which leds are active:
# if value > threshold --> led is active
status_blue_led = False; status_green_led = False; status_red_led = False
if value_of_blue_led > threshold_color_detection:
status_blue_led = True
if value_of_green_led > threshold_color_detection:
status_green_led = True
if value_of_red_led > threshold_color_detection:
status_red_led = True
# determine color by checking the cases:
# case 1: red
if status_blue_led==False and status_green_led==False and status_red_led==True:
color = color_number_red
# case 2: green
elif status_blue_led==False and status_green_led==True and status_red_led==False:
color = color_number_green
# case 3: blue
elif status_blue_led==True and status_green_led==False and status_red_led==False:
color = color_number_blue
# case 4: yellow = red + green
elif status_blue_led==False and status_green_led==True and status_red_led==True:
color = color_number_yellow
# case 5: magenta = red + blue
elif status_blue_led==True and status_green_led==False and status_red_led==True:
color = color_number_magenta
# case 6: cyan = green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==False:
color = color_number_cyan
# case 7: white = red + green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==True:
color = color_number_white
# case 8: led not active
# this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale
else:
color = color_number_off
# fill matrix with color
led[2] = color # uint16
return matrix_of_LEDs
def detect_LEDs_with_grayscale(image_bgr, detector):
# convert rgb to grayscale image
# start_m1 = time.perf_counter()
image_gray = convert_rgb_to_grayscale_average(image_bgr)
# end_m1 = time.perf_counter()
# time_processing = end_m1-start_m1
# time_processing = time_processing*1000
# time_processing = round(time_processing, 2)
# print(f'processing time conversion: {time_processing} ms')
# get position of leds
position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, detector=detector)
#position_of_LEDs = None
if position_of_LEDs is not None:
# determine color of leds and add to matrix
detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr)
return detected_LEDs
else:
return None
def lane_detection(image_bgr, detector):
# Detect LEDs
print(f"Detect LEDs and color:")
detected_LEDs = detect_LEDs_with_grayscale(image_bgr, detector)
if detected_LEDs is not None:
# Contruct lane
#print(f"_____________________________________")
# print("Contruct lane")
dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \
construct_lane(detected_LEDs, image_bgr)
# print result
if print_additional_info:
print(f"Detected LEDs relative to KS_Sensor (x,y):\n{detected_LEDs}")
return detected_LEDs
else:
return None
def main():
filenames_of_images = [f for f in os.listdir(folder_path) if f.endswith('.png')]
# initialise parameters for blob detectio once befor loop for performane
params_for_blob_detection = define_parameters_for_blob_detection()
detector = create_detector(params_for_blob_detection)
for i, filename_of_image in enumerate(filenames_of_images):
print(f"image {i+1}/{len(filenames_of_images)}:{filename_of_image}")
full_path_of_image = os.path.join(folder_path, filename_of_image)
image_bgr = cv.imread(full_path_of_image, cv.IMREAD_COLOR) # load original image
start_processing = time.perf_counter()
detected_LEDs = lane_detection(image_bgr, detector)
end_processing = time.perf_counter()
time_processing = end_processing-start_processing
time_processing = time_processing*1000
time_processing = round(time_processing, 2)
print(f'processing time: {time_processing} ms')
#print(f"_____________________________________")
# show images:
# cv.imshow("Blue channel", image_b)
# cv.imshow("Green channel", image_g)
# cv.imshow("Red channel", image_r)
if show_opencv_window:
pressed_key = cv.waitKey(0) & 0xff # display and wait if a key is pressed and then continue
if pressed_key == ord('q'):
exit()
cv.destroyAllWindows()
if __name__ == "__main__":
main()

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -0,0 +1,12 @@
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 3 18:23:42 2022
@author: Kenan
"""
import numpy as np
detected_LEDs_blue=np.load('detected_LEDs_blue.npy')
newarr = np.c_[detected_LEDs_blue, np.full((1,8,1),1)]

View File

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 1 19:06:33 2022
@author: Kenan
Bilder lsöchen die über einer bestimmten schutterspeed sind
"""
import sys, os
folder_path=r"U:\bwsyncshare\images_input"
files = [f for f in os.listdir(folder_path) if f.endswith('.png')]
for i, file in enumerate(files):
for ss in range(250,2050,50):
ss_str=str(ss)
if ss_str in file:
try:
os.remove(os.path.join(folder_path, file))
except:
pass

View File

@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 27 11:30:48 2022
@author: Kenan
"""
import numpy as np
p = np.array([[2],[5],[-1]])
#p = np.array([[208],[160]])
#p= np.transpose(p)
#p = np.array([2,5,-1])
a = np.array([[2],[0],[1]])
#a = np.array([[194.375],[237.375]])
#a= np.transpose(a)
#a = np.array([2,0,1])
b = np.array([[2],[-1],[2]])
#b = np.array([[0.980569],[-0.19617459]])
#b= np.transpose(b)
#b = np.array([2,-1,2])
c = p-a
cross= np.cross(b[:,0], c[:,0])
res = np.linalg.norm(cross)/np.linalg.norm(b)
# Fußpunkt L
t_0_dot = np.dot(c[:,0], b[:,0])
t_0_norm = (np.linalg.norm(b[:,0])**2)
t_0 = t_0_dot/t_0_norm
x_0 = (a+t_0*b)[:,0]

View File

@ -0,0 +1,59 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 4 19:02:17 2022
@author: Kenan
Skript um Geradengleichung und winkel der geraden aus einer menge von punkten zu bestimmen
"""
import numpy as np
from matplotlib import pyplot as plt
# KOSY: Y: Nach oben, X: Nach rechts
# create array: column: x y radius colornumber
points = np.array([[2,3,5,1],
[7,6,5,1]])
points = np.array([[1,1,5,1],
[1,2,5,1],
[2,2,5,1],
[3,1,5,1],
[4,1,5,1],
[5,1,5,1],
[6,1,5,1],
[5,2,5,1],
[4,3,5,1],
[5,5,5,1],
[7,5,5,1],
[2,6,5,1]])
x = points[:,0] # x coordinates
y = points[:,1] # y coordinates
deg = 1 # Degree of the fitting polynomial
[m, c] = np.polyfit(x,y,deg) # m*x+c = g(x)
#Winkelberechnung mit atan2. Mathematisch. Zur X-Achse. Positiv gegen UZS.
x_1=1
y_1=m*x_1
alpha_atan2 = np.arctan2(y_1,x_1)*180/np.pi
print(f"alpha atan2: {round(alpha_atan2,2)}°")
# winkelberechnung selbst. Zur Y-Achse aber
alpha = np.arctan(1/m) # Winkel zur Y-Achse in [rad]
alpha_deg = alpha * 180/np.pi # Winkel in [°]
print(f"alpha: {round(alpha_deg,2)}°")
f1 = plt.plot(x,y, 'o')
plt.grid(True)
x_values_to_plot = np.linspace(0,7,100)
y_fitted_line = m*x_values_to_plot+c
plt.plot(x_values_to_plot, y_fitted_line)

View File

@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 1 18:14:57 2022
@author: Kenan
Messen wie lange konvertierung in numpy dauert
"""
import numpy as np
import time
image_width=192
image_heigth = 160
number_of_colorchannels=3
def convert_rgb_to_grayscale_average(image_bgr):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_bgr = image_bgr.astype(np.uint16, copy=False)
image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8
return image_gray
int_black = 120 # integer for black color/ no color
image_bgr= np.full(\
(image_heigth,image_width, number_of_colorchannels), \
int_black, dtype=np.uint8)
start_m1 = time.perf_counter()
image_gray = convert_rgb_to_grayscale_average(image_bgr)
end_m1 = time.perf_counter()
time_processing = end_m1-start_m1
time_processing = time_processing*1000
time_processing = round(time_processing, 2)
print(f'processing time conversion: {time_processing} ms')

View File

@ -0,0 +1,66 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 10:36:30 2022
@author: Kenan
"""
import cv2 as cv
import numpy as np
import math as M
import os
folder_path=r"U:\bwsyncshare\images_input"
filenames_of_images = [f for f in os.listdir(folder_path) if f.endswith('.png')]
for i, filename_of_image in enumerate(filenames_of_images):
full_path_of_image = os.path.join(folder_path, filename_of_image)
image_bgr = cv.imread(full_path_of_image, cv.IMREAD_COLOR) # load original image
image_grayscale = cv.cvtColor(image_bgr, cv.COLOR_BGR2GRAY)
image = image_bgr
image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images
# For better accuracy, binary images are used before finding contours --> apply treshold
image_gray_binary = image.copy()
perc = np.percentile(image_r, 95) # calculate 95 % percentile
ret, tresh=cv.threshold(image_r, perc, 255, cv.THRESH_BINARY) # set pixels above specified percentile to 255, the resto to 0
cv.imshow("grayscale", image_grayscale)
cv.imshow("red", image_r)
params = cv.SimpleBlobDetector_Params()
params.minThreshold=1
params.maxThreshold=255
params.thresholdStep=1
params.filterByColor=False
params.filterByArea=True
params.minArea = 300
params.filterByInertia=False
params.filterByConvexity=False
params.filterByCircularity=False
detector = cv.SimpleBlobDetector_create(params)
keypoints = detector.detect(image_grayscale)
number_of_detected_leds = len(keypoints)
for k in keypoints:
print(k.pt[0]) # x
print(k.pt[1]) #y
print(k.size) #diameter
area = M.pi/4*k.size**2
print(area)
print('')
blank = np.zeros((1, 1))
blobs = cv.drawKeypoints(image, keypoints, blank, (0, 0, 255),cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv.imshow("Blobs Using Area", blobs)
cv.waitKey(0)
cv.destroyAllWindows()

View File

@ -0,0 +1,37 @@
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 31 19:36:39 2022
@author: Kenan
"""
import numpy as np
from matplotlib import pyplot as plt
points = np.array([[-2, -3, -1, 0],
[0, 1, -1, -2]])
x = points[0,:] # x coordinates
y = points[1,:] # y coordinates
f1 = plt.plot(x,y, 'ko')
plt.grid(True)
alpha = -45*np.pi/180
dx=-1
dy=-1
b = np.array([dx,dy])
#Verschiebung
x1 = x-dx
y1 = y-dy
plt.plot(x1,y1, 'bo')
plt.grid(True)
plt.axis('equal')
#Drehung
x_trafo = np.cos(alpha)*x1-np.sin(alpha)*y1
y_trafo = np.sin(alpha)*x1+np.cos(alpha)*y1
plt.plot(x_trafo,y_trafo, 'o')
plt.grid(True)

View File

@ -0,0 +1,20 @@
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 27 09:28:09 2022
@author: Kenan
"""
import numpy as np
from matplotlib import pyplot as plt
#p1 = (1,2)
#p2=(3,4)
# dritter wert ist farbe von led
points = np.array([[1,3,5],
[2,4,3]])
mean = np.mean(points[:,0:2],1)

View File

@ -0,0 +1,41 @@
# -*- coding: utf-8 -*-
"""
Created 05.03.2022
@author: Kenan
"""
import numpy as np
from itertools import permutations
def get_color_pattern(color_array):
# define color patterns
color_pattern_01 = np.array([1, 2, 3])
color_pattern_50 = np.array([4, 5, 6])
color_pattern_70 = np.array([7, 8, 9])
list_color_patterns = np.vstack((color_pattern_01, color_pattern_50, color_pattern_70))
# check which cp is detected
for cp in list_color_patterns:
# create permutations
perms = np.array(list(permutations(cp, len(cp)))) # 6 permutations for 3 numbers: nPr(3,3)
#print("perm: ", perms)
# check if one of the permutations is included
for permutation in perms:
if np.array_equal(permutation, color_array):
print(cp)
break # end loop if found, for performance
# dritter wert ist farbe von led
points = np.array([[1,3,7],
[2,4,8],
[2,4,9]])
color_array = points[:,2]
get_color_pattern(color_array)

View File

@ -0,0 +1,55 @@
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 26 17:08:00 2022
@author: Kenan
"""
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
path = r"U:\images_input\image.png"
def convert_rgb_to_grayscale_average(image_b, image_g, image_r):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
image_r = image_r.astype(np.uint16) #sporadische lösung. umwandlung vor addition, damit keine gehler weil uint8 --> overflow
image_g = image_g.astype(np.uint16)
image_b = image_b.astype(np.uint16)
image_gray = (image_b+image_g+image_r)/3
image_gray = image_gray.astype(np.uint8)
return image_gray
def convert_rgb_to_grayscale_average_v2(image_b, image_g, image_r):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
image_gray = 1/3*image_r + 1/3*image_g + 1/3*image_b # automatically converts into float64
image_gray = image_gray.astype(np.uint8)
return image_gray
image_bgr = cv.imread(path, cv.IMREAD_COLOR) # load original image
image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images
image_grayscale_avg = convert_rgb_to_grayscale_average(image_b, image_g, image_r)
plt.imshow(image_grayscale_avg, cmap = 'gray')
image_grayscale_avg_v2 = convert_rgb_to_grayscale_average_v2(image_b, image_g, image_r) # new alrorithm, cleaner
# Test if both are same
res = np.all(image_grayscale_avg_v2==image_grayscale_avg)
print(res) # False
# Explanation:
# row = 227, col = 7 : r=2, g=10, b=0
# opt1: (r+g+b)/3 = 12/3 = 4 -->int = 4
# v2: 1/3*2 +1/3*10 + 1/3*0 = 0.6666+3.3333 = 3.9999 -->int = 3 --> FALSE
# conclusion: opt 1 is better

View File

@ -0,0 +1,64 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 21 17:49:58 2022
@author: Kenan
"""
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
path = r"U:\bwsyncshare\image.png"
image_bgr = cv.imread(path, cv.IMREAD_COLOR) # load original image
image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images
image_r = image_r.astype(np.uint16)
image_g = image_g.astype(np.uint16)
image_b = image_b.astype(np.uint16)
img_rgb=image_bgr[:,:,::-1]
image_grayscale = cv.cvtColor(image_bgr, cv.COLOR_BGR2GRAY) # Y = 0.299 R + 0.587 G + 0.114 B
image_grayscale_KG = (image_r+image_g+image_b)/3
image_grayscale_KG = image_grayscale_KG.astype(int) # quick and dirty way to convert to uint8 array
image_column = 6
image_row = 243
image_bgr_b = image_bgr[image_column, image_row, 0]
image_b_b = image_b[image_column, image_row]
plt.imshow(img_rgb)
# get pixel info from specific color channel
print(image_b.shape[0]) # 416 columns
print(image_b.shape[1]) # 320 rows
#image_b[row, column]
print(image_b[230, 20])
#image_bgr[row, column, dim]
print(image_bgr[230,20,0])
# Test for addition of colors
row = 245
column = 70
print('Color addition:')
blue = image_bgr[row,column, 0]
green = image_bgr[row,column, 1]
red = image_bgr[row,column, 2]
print(f'blue: {blue}')
print(f'green: {green}')
print(f'red: {red}')
grayscale = image_grayscale[row, column]
grayscale_AVG = image_grayscale_KG[row, column]
print(f'gray Y: {grayscale}')
print(f'gray AVG: {grayscale_AVG}')
image_r = image_r.astype(np.uint16)
image_g = image_g.astype(np.uint16)
image_b = image_b.astype(np.uint16)
res = image_r[row, column]+image_g[row, column]+image_b[row, column]
print(f"addition of r g b: {res}")

Binary file not shown.

After

Width:  |  Height:  |  Size: 523 KiB

View File

@ -0,0 +1,2 @@
Das sind TEstbilder, welche erstellt wurden
Winkel nur geschätzt und ohne KOSY, da für erstellung der bilder nicht relevant

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

@ -0,0 +1,156 @@
# Creation Date: 02.03.2022
# Author: Kenan Gömek
# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures.
# This script is designed for shooting manually images with 'i'
# Press 'q' to exit
# Change camera parameters with v,b,n,m,x,c,o. See code for more information.
import cv2 as cv
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import numpy as np
import matplotlib.pyplot as plt
# Define camera settings
SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232
OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308) --> needs to be divisible by 16 --> 416x320
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
#the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
SHUTTER_SPEED = 30000 # [µs]
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# Define Funcions
def get_frames_from_camera():
# Initialise Camera
print('Initalise Camera...')
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = SHUTTER_SPEED
# it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program.
# The gains (analog, digital) will adjust to this set up.
# After the gains are fixed, they will never change! even if you change the shutter speed during the runtime.
# To get consistent brightness values, set the right shutter speed at initalisation once.
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
camera.exposure_mode = EXPOSURE_MODE
time.sleep(1) # wait before applying brightness and contrast
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
# camera.start_preview() # show camera preview through PiCamera interface
# camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation.
print('Start caputure...')
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
start_processing = time.perf_counter()
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
image = frame.array # raw NumPy array without JPEG encoding
#cv.imshow("Current Frame", image) # display the image without text
output.truncate(0) # clear the stream for next frame
# processing
# Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented.
# if q is pressed, break from loop.
pressed_key = cv.waitKey(2) & 0xff
if pressed_key == ord('q'):
break
elif pressed_key == ord('v'): # increase shutterspeed by 5
shutter_speed = round(shutter_speed+5)
camera.shutter_speed = shutter_speed
time.sleep(2) # wait to shutter speed is applied before querying exposure speed
exposure_speed = camera.exposure_speed
print(f"shutter speed set to: {shutter_speed}")
print(f"retrieved shutter speed: {exposure_speed}")
elif pressed_key == ord('b'): # increase shutterspeed by 50
shutter_speed = round(shutter_speed+50)
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
elif pressed_key == ord('n'): # increase shutterspeed by 500
shutter_speed = round(shutter_speed+500)
print(f"shutter speed set to: {shutter_speed}")
elif pressed_key == ord('m'): # max shutterspeed
shutter_speed = round(1/FRAMERATE*1e6)
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
elif pressed_key == ord('x'): # decrease shutterspeed by 500
shutter_speed = round(shutter_speed-500)
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
elif pressed_key == ord('c'): # decrease shutterspeed by 50
shutter_speed = round(shutter_speed-50)
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
elif pressed_key == ord('o'): # set shutterspeed to 0
shutter_speed = 0
camera.shutter_speed = shutter_speed
print(f"shutter speed set to: {shutter_speed}")
end_processing = time.perf_counter()
time_processing = round(end_processing-start_processing, 2)
time_processing = time_processing*1000
print(f'processing time: {time_processing} ms')
# ----------------------------------------------------------------------------
# main
def main():
# start capturing
get_frames_from_camera() # start capture
cv.destroyAllWindows()
print('Program finished')
if __name__ == "__main__":
main()

View File

@ -0,0 +1,514 @@
# Creation Date: 02.03.2022
# Author: Kenan Gömek
# This code detects the lane with HoughCircles.
# Quit program with 'q' if opencv windows is shows. Else use Ctrl+C.
import cv2 as cv
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import numpy as np
import math as M
# Define camera settings
SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232
OUTPUT_RESOLUTION = (192, 144) # (1640x1232)/4=(410,308) --> needs to be divisible by 16 --> 416x320
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
#the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
SHUTTER_SPEED = 50 # [µs]
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# Define Functions
# Parameters
pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480
# Offset Camera Sensor in Scooty according to Scooty-KS
x_offset_camera_mm = 100 # [mm]
y_offset_camera_mm = 50 # [mm]
x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px]
y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px]
# image parameters
image_heigth = OUTPUT_RESOLUTION[1] # shape [0]
image_width = OUTPUT_RESOLUTION[0]# shape[1]
# calculate center of image
[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16)
threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel
# Parameters for Blob/LED Detection
minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED
maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED
# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16
color_number_off = 0
color_number_red = 1
color_number_green = 2
color_number_blue = 3
color_number_yellow = 4
color_number_magenta = 5
color_number_cyan = 6
color_number_white = 7
show_opencv_window = True # show opencv window
draw_opencv = True # draw lane and so on
print_additional_info = False
# Parameters for HoughCircles
dp = 1 # Inverse ratio of the accumulator resolution to the image resolution. For example, if dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has half as big width and height.
minDist_mm = 1 # [mm] minimal distance between two circles
minDist_px = int(minDist_mm*pixels_per_mm) # in [px] Minimum distance in px between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed.
minRadius_mm = 3 # [mm] minimum radius of a circle
minRadius_px = int(minRadius_mm*pixels_per_mm) # [px] Minimum circle radius.
maxRadius_mm = 7 # [mm] maximum radius of a circle
maxRadius_px = int(maxRadius_mm*pixels_per_mm) # [px] Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, returns centers without finding the radius.
param1 = 150 # 30 First method-specific parameter. In case of HOUGH_GRADIENT , it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
# If circles/LEDs should be detected at low shutter speeds, than lower this value
# Upper threshold for the internal Canny edge detector.
# "Gradient value between dark and white"
param2 = 5 # 12 Second method-specific parameter. In case of HOUGH_GRADIENT , it is the accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first.
# By increasing this threshold value, we can ensure that only the best circles, corresponding to larger accumulator values, are returned.
def points_trafo(detected_LEDs, alpha_rad, dx, dy):
"""Tranfsform points of LED to lane in KS-LED"""
detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer
detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow
x_pnts = detected_LEDs_trafo[:,0]
y_pnts = detected_LEDs_trafo[:,1]
# Translation
x1 = x_pnts-dx-x_0
x_trafo=x1
y1 = y_pnts-dy-y_0
y_trafo = y1
# Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton
x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1
detected_LEDs_trafo[:,0] = x_trafo
y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1
detected_LEDs_trafo[:,1] = y_trafo
#sort points along lane: x_2, y_2 -axis (KS_LED)
detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')]
return detected_LEDs_trafo
def construct_lane(detected_LEDs, img_bgr):
"""construct the lane"""
# This function is partially commented in german, because higher math is used
# clearer what is trying to be achieved
# get points
# xy_pnts = detected_LEDs[:,0:2]
# x_pnts = detected_LEDs[:,0]
# y_pnts = detected_LEDs[:,1]
# approach 2:
# fit line through centers of LEDs in KS_0
# DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance
param = 0 # not used for DIST_L2
reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line).
aeps = 0.001 # Sufficient accuracy for the angle.
[dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps)
# x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1)
alpha_rad = np.arctan2(dy, dx) # calculate angle of line
alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line
# print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°")
if print_additional_info:
print(f"Lane: alpha:{alpha[0]}°")
# get smallest distance to point an line
# Berechnung nach: Repetitorium Höhere Mathematik, Wirth
# Gerade: x = a+ t*b
# Punkt : OP = p
# d = abs(b x (p-a))/(abs(b))
# info: np.array()[:,0] --> gets only array with 1 dimensions with desired values
p = np.array([x_0, y_0])
a = np.array([x_2, y_2])[:,0]
b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor
c = p-a
# Betrag von Vektor: np.linalg.norm(vec)
cross= np.cross(b, c)
d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px]
#print(f"d: {round(d,2)}")
# Fußpunkt (X_LED, Y_LED)
t_0_dot = np.dot(c, b)
t_0_norm = (np.linalg.norm(b)**2)
t_0 = t_0_dot/t_0_norm
[x_LED, y_LED] = (a+t_0*b)
if print_additional_info:
print(f"x_LED: {x_LED}, y_LED: {y_LED}")
# Abstand (dx, dy) Fußpunkt zu KS_0
dx_LED = x_LED - x_0
dx_LED_mm = dx_LED*(1/pixels_per_mm)
dy_LED = y_LED - y_0
dy_LED_mm = dy_LED*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]")
print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]")
# Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty
# Diese Werte zurückgeben
dx_LED_scooty = x_LED - x_0 + x_offset_camera_px
dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm)
dy_LED_scooty = y_LED - y_0 + y_offset_camera_px
dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]")
print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]")
# Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten
# Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen
detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED)
if print_additional_info:
print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}")
#-----------------------------------
# draw useful lines and points
# draw lane line
if draw_opencv:
pt_0 = (a+b*np.array([-300, -300])).astype(np.int32)
pt_1 = (a+b*np.array([300, 300])).astype(np.int32)
#print(f"pt_0: {pt_0}, pt_1: {pt_1}")
cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane
# draw dx dy
cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot
# cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx
# cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy
#draw additional points
cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points
#cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA)
cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt
if show_opencv_window:
cv.imshow("Lane", img_bgr)
return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED
def convert_rgb_to_grayscale_average(image_bgr):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_bgr = image_bgr.astype(np.uint16, copy=False)
image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8
return image_gray
def create_detector(params_for_blob_detection):
detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters.
return detector
def define_parameters_for_blob_detection():
"""set parameters for simple blob detector"""
params = cv.SimpleBlobDetector_Params()
# Threshold for Convert the source image to binary images by applying thresholding
# with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive)
# with distance thresholdStep between neighboring thresholds.
# Since the Grayscale image is dark if only one color channel is active,
# the Threshold values have to be set like this.
# particularly the thresholdStep-Value has to be low
params.minThreshold=20 # reminder: this value is set for grayscale image
params.maxThreshold=255
params.thresholdStep=1
params.filterByColor=False # do not filter blobs by color
# Filter blobs by Area
params.filterByArea=False
# Filter by Inertia
params.filterByInertia=False
# Filter by Convexity
params.filterByConvexity=False
# Filter by Circularity
params.filterByCircularity=False
# params.minDistBetweenBlobs = minDist_px # this has no effect
return params
def detect_LED_positions_in_grayscale(image_gray, image_bgr, detector):
start_processing = time.perf_counter()
#keypoints = detector.detect(image_gray) # Detect blobs --> LEDs
detected_LEDs = cv.HoughCircles(image_gray, cv.HOUGH_GRADIENT, dp=dp, minDist = minDist_px
, param1=param1, param2=param2, minRadius=minRadius_px, maxRadius=maxRadius_px)
end_processing = time.perf_counter()
time_processing = end_processing-start_processing
time_processing = time_processing*1000
time_processing = round(time_processing, 2)
print(f'processing time Hough: {time_processing} ms')
# check if at least one circle was found in the image
if detected_LEDs is not None:
detected_LEDs = np.uint16(np.round(detected_LEDs)) # convert the (x, y) coordinates and radius of the circles to integers
detected_LEDs = detected_LEDs[0,:]
detected_LEDs=np.hstack((detected_LEDs, np.full((detected_LEDs.shape[0],1), 9, dtype=np.uint16)))
# matrix with columns: x, y, r
number_of_detected_LEDs = detected_LEDs.shape[0]
print(f"detected {9} LEDs: {number_of_detected_LEDs}")
# paramters for drawing
line_thickness = 1
circle_color = (0,255,0)
vertex_offset = 2
rectangle_color = (0,128,255) # R G B
for (x, y, r, cn) in detected_LEDs:
print(f"x:{x} px, y:{y} px, r:{r} px, r:{round(r*1/(pixels_per_mm),2)} mm, D: {round(2*r*1/(pixels_per_mm),2)} mm, color: {9}")
cv.circle(image_bgr, (x, y), r, circle_color, thickness=line_thickness) # draw detected circumference of the cirle
cv.rectangle(img=image_bgr, pt1=(x-vertex_offset, y-vertex_offset), pt2=(x+vertex_offset, y+vertex_offset), \
color=rectangle_color, thickness=cv.FILLED)
cv.imshow(f"HC", image_bgr)
return None
else:
print(f"No LEDs were detected")
return None
def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, detector):
position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, detector)
if position_of_LEDs is not None:
return position_of_LEDs
else:
return None
def get_color_of_leds(matrix_of_LEDs, image_bgr):
# is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels.
offset = 0 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led
# offset = 0 --> only the value from the middle point of the blob/led
# offset=1 --> 9 values, offset=2-->25 values
for led in matrix_of_LEDs:
x_pos = led[0] # uint16
y_pos = led[1] # uint16
# get values of color channels in region around middle point of blob/led:
# +1 at stop index, because it is not inclusive
region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8
region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8
region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8
# average of the values
# convert dtype to prevent integer overflow while addition
region_around_red_led = region_around_red_led.astype(np.uint16, copy=False)
region_around_green_led = region_around_green_led.astype(np.uint16, copy=False)
region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False)
# sum all elements in matrix and divide with number of elements
number_of_elements= region_around_blue_led.size
value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result
value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result
value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result
# determine which leds are active:
# if value > threshold --> led is active
status_blue_led = False; status_green_led = False; status_red_led = False
if value_of_blue_led > threshold_color_detection:
status_blue_led = True
if value_of_green_led > threshold_color_detection:
status_green_led = True
if value_of_red_led > threshold_color_detection:
status_red_led = True
# determine color by checking the cases:
# case 1: red
if status_blue_led==False and status_green_led==False and status_red_led==True:
color = color_number_red
# case 2: green
elif status_blue_led==False and status_green_led==True and status_red_led==False:
color = color_number_green
# case 3: blue
elif status_blue_led==True and status_green_led==False and status_red_led==False:
color = color_number_blue
# case 4: yellow = red + green
elif status_blue_led==False and status_green_led==True and status_red_led==True:
color = color_number_yellow
# case 5: magenta = red + blue
elif status_blue_led==True and status_green_led==False and status_red_led==True:
color = color_number_magenta
# case 6: cyan = green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==False:
color = color_number_cyan
# case 7: white = red + green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==True:
color = color_number_white
# case 8: led not active
# this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale
else:
color = color_number_off
# fill matrix with color
led[2] = color # uint16
return matrix_of_LEDs
def detect_LEDs_with_grayscale(image_bgr, detector):
# convert rgb to grayscale image
# start_m1 = time.perf_counter()
image_gray = convert_rgb_to_grayscale_average(image_bgr)
# end_m1 = time.perf_counter()
# time_processing = end_m1-start_m1
# time_processing = time_processing*1000
# time_processing = round(time_processing, 2)
# print(f'processing time conversion: {time_processing} ms')
# get position of leds
position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, detector=detector)
#position_of_LEDs = None
if position_of_LEDs is not None:
# determine color of leds and add to matrix
detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr)
return detected_LEDs
else:
return None
def lane_detection(image_bgr, detector):
# Detect LEDs
print(f"Detect LEDs and color:")
detected_LEDs = detect_LEDs_with_grayscale(image_bgr, detector)
if detected_LEDs is not None:
# Contruct lane
#print(f"_____________________________________")
# print("Contruct lane")
dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \
construct_lane(detected_LEDs, image_bgr)
# print result
if print_additional_info:
print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}")
return detected_LEDs
else:
return None
# Picamera
def get_frames_from_camera(detector):
# Initialise Camera
print('Initialise Camera...')
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = SHUTTER_SPEED
# it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program.
# The gains (analog, digital) will adjust to this set up.
# After the gains are fixed, they will never change! even if you change the shutter speed during the runtime.
# To get consistent brightness values, set the right shutter speed at initalisation once.
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
camera.exposure_mode = EXPOSURE_MODE
time.sleep(1) # wait before applying brightness and contrast
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
# camera.start_preview() # show camera preview through PiCamera interface
# camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation.
print('Start caputure...')
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
start_processing = time.perf_counter()
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
image_bgr = frame.array # raw NumPy array without JPEG encoding
#cv.imshow("Current Frame", image) # display the image without text
output.truncate(0) # clear the stream for next frame
# processing
lane_detection(image_bgr, detector)
# Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented.
# if q is pressed, break from loop.
pressed_key = cv.waitKey(2) & 0xff
if pressed_key == ord('q'):
break
end_processing = time.perf_counter()
time_processing = round(end_processing-start_processing, 2)
time_processing = time_processing*1000
print(f'processing time: {time_processing} ms')
# ----------------------------------------------------------------------------
# main
def main():
# initialise parameters for blob detectio once befor loop for performane
params_for_blob_detection = define_parameters_for_blob_detection()
detector = create_detector(params_for_blob_detection)
# start capturing
get_frames_from_camera(detector) # start capture
cv.destroyAllWindows()
print('Program finished')
if __name__ == "__main__":
main()

View File

@ -0,0 +1,514 @@
# Creation Date: 02.03.2022
# Author: Kenan Gömek
# This code detects the lane with Simpleblob Detector.
# Quit program with 'q' if opencv windows is shows. Else use Ctrl+C.
import cv2 as cv
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import numpy as np
import math as M
# Define camera settings
SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232
# OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308) --> needs to be divisible by 16 --> 416x320
OUTPUT_RESOLUTION = (192, 144)
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
#the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
SHUTTER_SPEED = 50 # [µs]
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# Parameters
pixels_per_mm = 32/24.25 #[px/mm] for 120 mm camera height for resolution: 192x144
# pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480
# Offset Camera Sensor in Scooty according to Scooty-KS
x_offset_camera_mm = 100 # [mm]
y_offset_camera_mm = 50 # [mm]
x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px]
y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px]
# image parameters
image_heigth = OUTPUT_RESOLUTION[1] # shape [0]
image_width = OUTPUT_RESOLUTION[0]# shape[1]
# calculate center of image
[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16)
threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel
# Parameters for Blob/LED Detection
minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED
maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED
# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16
color_number_off = 0
color_number_red = 1
color_number_green = 2
color_number_blue = 3
color_number_yellow = 4
color_number_magenta = 5
color_number_cyan = 6
color_number_white = 7
show_opencv_window = True # show opencv window
draw_opencv = True # draw lane and so on
print_additional_info = True
def points_trafo(detected_LEDs, alpha_rad, dx, dy):
"""Tranfsform points of LED to lane in KS-LED"""
detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer
detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow
x_pnts = detected_LEDs_trafo[:,0]
y_pnts = detected_LEDs_trafo[:,1]
# Translation
x1 = x_pnts-dx-x_0
x_trafo=x1
y1 = y_pnts-dy-y_0
y_trafo = y1
# Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton
x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1
detected_LEDs_trafo[:,0] = x_trafo
y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1
detected_LEDs_trafo[:,1] = y_trafo
#sort points along lane: x_2, y_2 -axis (KS_LED)
detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')]
return detected_LEDs_trafo
def construct_lane(detected_LEDs, img_bgr):
"""construct the lane"""
# This function is partially commented in german, because higher math is used
# clearer what is trying to be achieved
# get points
# xy_pnts = detected_LEDs[:,0:2]
# x_pnts = detected_LEDs[:,0]
# y_pnts = detected_LEDs[:,1]
# approach 2:
# fit line through centers of LEDs in KS_0
# DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance
param = 0 # not used for DIST_L2
reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line).
aeps = 0.001 # Sufficient accuracy for the angle.
[dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps)
# x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1)
alpha_rad = np.arctan2(dy, dx) # calculate angle of line
alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line
# print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°")
if print_additional_info:
print(f"Lane: alpha:{alpha[0]}°")
# get smallest distance to point an line
# Berechnung nach: Repetitorium Höhere Mathematik, Wirth
# Gerade: x = a+ t*b
# Punkt : OP = p
# d = abs(b x (p-a))/(abs(b))
# info: np.array()[:,0] --> gets only array with 1 dimensions with desired values
p = np.array([x_0, y_0])
a = np.array([x_2, y_2])[:,0]
b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor
c = p-a
# Betrag von Vektor: np.linalg.norm(vec)
cross= np.cross(b, c)
d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px]
#print(f"d: {round(d,2)}")
# Fußpunkt (X_LED, Y_LED)
t_0_dot = np.dot(c, b)
t_0_norm = (np.linalg.norm(b)**2)
t_0 = t_0_dot/t_0_norm
[x_LED, y_LED] = (a+t_0*b)
if print_additional_info:
print(f"x_LED: {x_LED}, y_LED: {y_LED}")
# Abstand (dx, dy) Fußpunkt zu KS_0
dx_LED = x_LED - x_0
dx_LED_mm = dx_LED*(1/pixels_per_mm)
dy_LED = y_LED - y_0
dy_LED_mm = dy_LED*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]")
print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]")
# Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty
# Diese Werte zurückgeben
dx_LED_scooty = x_LED - x_0 + x_offset_camera_px
dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm)
dy_LED_scooty = y_LED - y_0 + y_offset_camera_px
dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]")
print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]")
# Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten
# Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen
detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED)
if print_additional_info:
print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}")
#-----------------------------------
# draw useful lines and points
# draw lane line
if draw_opencv:
pt_0 = (a+b*np.array([-300, -300])).astype(np.int32)
pt_1 = (a+b*np.array([300, 300])).astype(np.int32)
#print(f"pt_0: {pt_0}, pt_1: {pt_1}")
cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane
# draw dx dy
cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot
# cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx
# cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy
#draw additional points
cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points
#cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA)
cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt
if show_opencv_window:
cv.imshow("Lane", img_bgr)
return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED
def convert_rgb_to_grayscale_average(image_bgr):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_bgr = image_bgr.astype(np.uint16, copy=False)
image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8
return image_gray
def create_detector(params_for_blob_detection):
detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters.
return detector
def define_parameters_for_blob_detection():
"""set parameters for simple blob detector"""
params = cv.SimpleBlobDetector_Params()
# Threshold for Convert the source image to binary images by applying thresholding
# with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive)
# with distance thresholdStep between neighboring thresholds.
# Since the Grayscale image is dark if only one color channel is active,
# the Threshold values have to be set like this.
# particularly the thresholdStep-Value has to be low
params.minThreshold=20 # reminder: this value is set for grayscale image
params.maxThreshold=255
params.thresholdStep=1
params.filterByColor=False # do not filter blobs by color
# Filter blobs by Area
params.filterByArea=False
# Filter by Inertia
params.filterByInertia=False
# Filter by Convexity
params.filterByConvexity=False
# Filter by Circularity
params.filterByCircularity=False
# params.minDistBetweenBlobs = minDist_px # this has no effect
return params
def detect_LED_positions_in_grayscale(image_gray, image_bgr, detector):
start_processing = time.perf_counter()
keypoints = detector.detect(image_gray) # Detect blobs --> LEDs
end_processing = time.perf_counter()
time_processing = end_processing-start_processing
time_processing = time_processing*1000
time_processing = round(time_processing, 2)
print(f'processing time detector: {time_processing} ms')
number_of_detected_leds = len(keypoints)
if number_of_detected_leds != 0:
# print information of keypoints
print(f"detected LEDs: {number_of_detected_leds}")
#Pre-allocate matrix for numpy
number_of_rows = number_of_detected_leds
number_of_columns = 3
position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16)
for i, k in enumerate(keypoints):
# x_pos = round(k.pt[0],0) # x position
# y_pos = round(k.pt[1],0) # y position
# print(f"x: {x_pos} y: {y_pos}")
# diameter_px = round(k.size,2)
# diameter_mm = round(diameter_px*1/pixels_per_mm,2)
# print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter
# area_px2 = round(np.pi/4*k.size**2,0) # area in px^2
# area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0)
# print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}")
# print('')
# calculate parameters to transfer to matrix
# x_pos = int(np.ceil(x_pos))
# y_pos = int(np.ceil(y_pos))
# Fill matrix
# position_of_leds[i,:] = [x_pos,y_pos, 0]
position_of_leds[i,0] = int(np.ceil(k.pt[0])) # x positon
position_of_leds[i,1] = int(np.ceil(k.pt[1])) # y position
if draw_opencv:
# draw the keypoints on the original image
# cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \
outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if show_opencv_window:
# cv.imshow("grayscale", image_gray)
cv.imshow("Detected", blobs)
return position_of_leds
else:
print(f"No LEDs were detected")
return None
def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, detector):
position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, detector)
if position_of_LEDs is not None:
return position_of_LEDs
else:
return None
def get_color_of_leds(matrix_of_LEDs, image_bgr):
# is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels.
offset = 2 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led
# offset = 0 --> only the value from the middle point of the blob/led
# offset=1 --> 9 values, offset=2-->25 values
for led in matrix_of_LEDs:
x_pos = led[0] # uint16
y_pos = led[1] # uint16
# get values of color channels in region around middle point of blob/led:
# +1 at stop index, because it is not inclusive
region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8
region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8
region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8
# average of the values
# convert dtype to prevent integer overflow while addition
region_around_red_led = region_around_red_led.astype(np.uint16, copy=False)
region_around_green_led = region_around_green_led.astype(np.uint16, copy=False)
region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False)
# sum all elements in matrix and divide with number of elements
number_of_elements= region_around_blue_led.size
value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result
value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result
value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result
# determine which leds are active:
# if value > threshold --> led is active
status_blue_led = False; status_green_led = False; status_red_led = False
if value_of_blue_led > threshold_color_detection:
status_blue_led = True
if value_of_green_led > threshold_color_detection:
status_green_led = True
if value_of_red_led > threshold_color_detection:
status_red_led = True
# determine color by checking the cases:
# case 1: red
if status_blue_led==False and status_green_led==False and status_red_led==True:
color = color_number_red
# case 2: green
elif status_blue_led==False and status_green_led==True and status_red_led==False:
color = color_number_green
# case 3: blue
elif status_blue_led==True and status_green_led==False and status_red_led==False:
color = color_number_blue
# case 4: yellow = red + green
elif status_blue_led==False and status_green_led==True and status_red_led==True:
color = color_number_yellow
# case 5: magenta = red + blue
elif status_blue_led==True and status_green_led==False and status_red_led==True:
color = color_number_magenta
# case 6: cyan = green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==False:
color = color_number_cyan
# case 7: white = red + green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==True:
color = color_number_white
# case 8: led not active
# this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale
else:
color = color_number_off
# fill matrix with color
led[2] = color # uint16
return matrix_of_LEDs
def detect_LEDs(image_bgr, detector):
# convert rgb to grayscale image
# start_m1 = time.perf_counter()
image_gray = convert_rgb_to_grayscale_average(image_bgr)
# end_m1 = time.perf_counter()
# time_processing = end_m1-start_m1
# time_processing = time_processing*1000
# time_processing = round(time_processing, 2)
# print(f'processing time conversion: {time_processing} ms')
# get position of leds
position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, detector=detector)
#position_of_LEDs = None
if position_of_LEDs is not None:
# determine color of leds and add to matrix
detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr)
return detected_LEDs
else:
return None
def lane_detection(image_bgr, detector):
# Detect LEDs
print(f"Detect LEDs and color:")
detected_LEDs = detect_LEDs(image_bgr, detector)
if detected_LEDs is not None:
# Contruct lane
#print(f"_____________________________________")
# print("Contruct lane")
dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \
construct_lane(detected_LEDs, image_bgr)
# print result
if print_additional_info:
print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}")
return detected_LEDs
else:
return None
# PiCamera
def get_frames_from_camera(detector):
# Initialise Camera
print('Initialise Camera...')
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = SHUTTER_SPEED
# it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program.
# The gains (analog, digital) will adjust to this set up.
# After the gains are fixed, they will never change! even if you change the shutter speed during the runtime.
# To get consistent brightness values, set the right shutter speed at initalisation once.
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
camera.exposure_mode = EXPOSURE_MODE
time.sleep(1) # wait before applying brightness and contrast
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
# camera.start_preview() # show camera preview through PiCamera interface
# camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation.
print('Start caputure...')
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
start_processing = time.perf_counter()
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
image_bgr = frame.array # raw NumPy array without JPEG encoding
#cv.imshow("Current Frame", image) # display the image without text
output.truncate(0) # clear the stream for next frame
# processing
lane_detection(image_bgr, detector)
# Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented.
# if q is pressed, break from loop.
pressed_key = cv.waitKey(2) & 0xff
if pressed_key == ord('q'):
break
end_processing = time.perf_counter()
time_processing = end_processing-start_processing
time_processing = time_processing*1000
time_processing = round(time_processing, 2)
print(f'processing time: {time_processing} ms')
# ----------------------------------------------------------------------------
# main
def main():
# initialise parameters for blob detection once befor loop for performane
params_for_blob_detection = define_parameters_for_blob_detection()
detector = create_detector(params_for_blob_detection)
# start capturing
get_frames_from_camera(detector) # start capture
cv.destroyAllWindows()
print('Program finished')
if __name__ == "__main__":
main()

View File

@ -0,0 +1,521 @@
# Creation Date: 02.03.2022
# Author: Kenan Gömek
# This code detects the lane with findContours.
# Quit program with 'q' if opencv windows is shows. Else use Ctrl+C.
import cv2 as cv
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import numpy as np
import math as M
# Define camera settings
SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232
# OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308) --> needs to be divisible by 16 --> 416x320
OUTPUT_RESOLUTION = (192, 144)
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
#the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
SHUTTER_SPEED = 50 # [µs]
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# Parameters
pixels_per_mm = 32/24.25 #[px/mm] for 120 mm camera height for resolution: 192x144
# pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480
# Offset Camera Sensor in Scooty according to Scooty-KS
x_offset_camera_mm = 100 # [mm]
y_offset_camera_mm = 50 # [mm]
x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px]
y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px]
# image parameters
image_height = OUTPUT_RESOLUTION[1] # shape [0]
image_width = OUTPUT_RESOLUTION[0]# shape[1]
# calculate center of image
[x_0, y_0] = np.array([image_width/2, image_height/2], dtype=np.uint16)
threshold_color_detection = 60
# values under this will not be considered as active leds in each color channel
# see get_color_of_leds()
# Parameters for Blob/LED Detection
minDiameter_mm = 3.75 # [mm] minimum diameter of detected blob/LED. Must be minimum >0 !
# maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED
# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16
color_number_off = 0
color_number_red = 1
color_number_green = 2
color_number_blue = 3
color_number_yellow = 4
color_number_magenta = 5
color_number_cyan = 6
color_number_white = 7
# Parameters for grayscale to binary conversion
binary_threshold = 15 # determined by testing and application
# the higher threshold is, the smaller the diameter of the led, because more high values are extracted
binary_maxval = 255 # values ofer threshold will be set to maxval
# Parameters for line fitting for lane construction
param = 0 # not used for DIST_L2
reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line).
aeps = 0.001 # Sufficient accuracy for the angle.
# Parameters for OpenCV
show_opencv_window = True # show opencv window
draw_opencv = True # draw lane and so on
print_additional_info = True
# calculations before start
# Filter blobs by Area --> not implemented anymore, because no need, because detection is good and less time for calculation needed
# more than good trade off!
minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED
# maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED
minArea_px2 = np.pi/4*minDiameter_px**2 # min Area of a blob in px^2
# maxArea_px2 = np.pi/4*maxDiameter_px**2
# minArea = minArea_px2 # min Area of a blob in px^2
# params.maxArea = maxArea_px2 # max Area of a blob in px^2.
# reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area
def points_trafo(detected_LEDs, alpha_rad, dx, dy):
"""Tranfsform points of LED to lane in KS-LED"""
detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer
detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow
x_pnts = detected_LEDs_trafo[:,0]
y_pnts = detected_LEDs_trafo[:,1]
# Translation
x1 = x_pnts-dx-x_0
x_trafo = x1
y1 = y_pnts-dy-y_0
y_trafo = y1
# Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton
x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1
detected_LEDs_trafo[:,0] = x_trafo
y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1
detected_LEDs_trafo[:,1] = y_trafo
#sort points along lane: x_2, y_2 -axis (KS_LED)
detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')]
return detected_LEDs_trafo
def construct_lane(detected_LEDs, img_bgr):
"""construct the lane"""
# This function is partially commented in german, because higher math is used
# clearer what is trying to be achieved
# get points
# xy_pnts = detected_LEDs[:,0:2]
# x_pnts = detected_LEDs[:,0]
# y_pnts = detected_LEDs[:,1]
# approach 2:
# fit line through centers of LEDs in KS_0
# DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance
[dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps)
# x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1)
alpha_rad = np.arctan2(dy, dx) # calculate angle of line
alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line
# print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°")
if print_additional_info:
alpha_print = alpha[0]
alpha_print = float("{0:.2f}".format(alpha_print))
print(f"Alpha: {alpha_print}°")
# get smallest distance to point an line
# Berechnung nach: Repetitorium Höhere Mathematik, Wirth
# Gerade: x = a+ t*b
# Punkt : OP = p
# d = abs(b x (p-a))/(abs(b))
# info: np.array()[:,0] --> gets only array with 1 dimensions with desired values
p = np.array([x_0, y_0])
a = np.array([x_2, y_2])[:,0]
b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor
c = p-a
# Betrag von Vektor: np.linalg.norm(vec)
cross= np.cross(b, c)
d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px]
#print(f"d: {round(d,2)}")
# Fußpunkt (X_LED, Y_LED)
t_0_dot = np.dot(c, b)
t_0_norm = (np.linalg.norm(b)**2)
t_0 = t_0_dot/t_0_norm
[x_LED, y_LED] = (a+t_0*b)
if print_additional_info:
# convert float32, round and prepare for printing string
x_LED_print = float("{0:.2f}".format(x_LED))
y_LED_print = float("{0:.2f}".format(y_LED))
print(f"x_LED: {x_LED_print} [px], y_LED: {y_LED_print} [px]")
# Abstand (dx, dy) Fußpunkt zu KS_0
dx_LED = x_LED - x_0
dx_LED_mm = dx_LED*(1/pixels_per_mm)
dy_LED = y_LED - y_0
dy_LED_mm = dy_LED*(1/pixels_per_mm)
if print_additional_info:
# convert float32, round and prepare for printing string
dx_LED_print = float("{0:.2f}".format(dx_LED))
dy_LED_print = float("{0:.2f}".format(dy_LED))
dx_LED_mm_print = float("{0:.2f}".format(dx_LED_mm))
dy_LED_mm_print = float("{0:.2f}".format(dy_LED_mm))
print(f"dx_LED: {dx_LED_print} [px] , dy_LED: {dy_LED_print} [px]")
print(f"dx_LED: {dx_LED_mm_print} [mm] , dy_LED: {dy_LED_mm_print} [mm]")
# Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty
# Diese Werte zurückgeben
dx_LED_scooty = x_LED - x_0 + x_offset_camera_px
dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm)
dy_LED_scooty = y_LED - y_0 + y_offset_camera_px
dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED_scooty: {round(dx_LED_scooty,2)} [px] , dy_LED_scooty: {round(dy_LED_scooty,2)} [px]")
print(f"dx_LED_scooty: {round(dx_LED_scooty_mm,2)} [mm] , dy_LED_scooty: {round(dy_LED_scooty_mm,2)} [mm]")
# Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten
# Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen
detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED)
if print_additional_info:
print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}")
#-----------------------------------
# draw useful lines and points
# draw lane line
if draw_opencv:
pt_0 = (a+b*np.array([-300, -300])).astype(np.int32)
pt_1 = (a+b*np.array([300, 300])).astype(np.int32)
#print(f"pt_0: {pt_0}, pt_1: {pt_1}")
cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane
# draw dx dy
cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot
# cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx
# cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy
#draw additional points
cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points
#cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA)
cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt
if show_opencv_window:
cv.imshow("Lane", img_bgr)
return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED
def convert_rgb_to_grayscale_average(image_bgr):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_bgr = image_bgr.astype(np.uint16, copy=False)
image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8
return image_gray
def get_color_of_leds(matrix_of_LEDs, image_bgr):
"""Determine color of LEDs at positions and add to matrix"""
# is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels.
offset = 1 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led
# offset = 0 --> only the value from the middle point of the blob/led
# offset=1 --> 9 values, offset=2-->25 values
for led in matrix_of_LEDs:
x_pos = led[0] # uint16
y_pos = led[1] # uint16
# get values of color channels in region around middle point of blob/led:
# +1 at stop index, because it is not inclusive
region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8
region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8
region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8
# average of the values
# convert dtype to prevent integer overflow while addition
region_around_red_led = region_around_red_led.astype(np.uint16, copy=False)
region_around_green_led = region_around_green_led.astype(np.uint16, copy=False)
region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False)
# sum all elements in matrix and divide with number of elements
number_of_elements= region_around_blue_led.size
value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result
value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result
value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result
# determine which leds are active:
# if value > threshold --> led is active
status_blue_led = False; status_green_led = False; status_red_led = False
if value_of_blue_led > threshold_color_detection:
status_blue_led = True
if value_of_green_led > threshold_color_detection:
status_green_led = True
if value_of_red_led > threshold_color_detection:
status_red_led = True
# determine color by checking the cases:
# case 1: red
if status_blue_led==False and status_green_led==False and status_red_led==True:
color = color_number_red
# case 2: green
elif status_blue_led==False and status_green_led==True and status_red_led==False:
color = color_number_green
# case 3: blue
elif status_blue_led==True and status_green_led==False and status_red_led==False:
color = color_number_blue
# case 4: yellow = red + green
elif status_blue_led==False and status_green_led==True and status_red_led==True:
color = color_number_yellow
# case 5: magenta = red + blue
elif status_blue_led==True and status_green_led==False and status_red_led==True:
color = color_number_magenta
# case 6: cyan = green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==False:
color = color_number_cyan
# case 7: white = red + green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==True:
color = color_number_white
# case 8: led not active
# this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale
else:
color = color_number_off
# fill matrix with color
led[2] = color # uint16
return matrix_of_LEDs
def get_position_of_LEDs_contours(image_gray, image_bgr):
# create binary image
ret, image_binary = cv.threshold(image_gray, binary_threshold, binary_maxval, cv.THRESH_BINARY)
# find contours
contour_retrieval_algorithm = cv.RETR_EXTERNAL # retrieves only the extreme outer contours
contours, hierarchy = cv.findContours(image_binary, contour_retrieval_algorithm, cv.CHAIN_APPROX_SIMPLE)
# analyse contours
number_of_detected_contours = len(contours)
if number_of_detected_contours != 0:
# centroid of contours
# Pre-allocate matrix for numpy
number_of_rows = 0
number_of_columns = 3
position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) #empty: []
if draw_opencv:
image_bgr_contours = image_bgr.copy() # create copy of image
# copy is needed to draw on, because else the color of
# the circle is detected als LED-Color
number_of_detected_LEDs = 0
for i, cnt in enumerate(contours):
M = cv.moments(cnt)
area = cv.contourArea(cnt)
# diameter = 2*np.sqrt(area/np.pi)
# diameter_mm = diameter*(1/pixels_per_mm)
# print(f"area: {area} [px^2], diameter: {diameter} [px], diamter: {diameter_mm} [mm]")
# Filter contours by area. minimum Area needs to be at least >0 !
if area >minArea_px2:
number_of_detected_LEDs += 1
# prevent zero division
if M['m00']==0:
cx = 0
cy = 0
else:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
#print(cx, cy)
# add positions to matrix
x_pos = int(cx) # x positon
y_pos = int(cy) # y position
position_of_leds = np.vstack((position_of_leds, \
np.array([x_pos, y_pos, color_number_off], dtype=np.uint16))) # vstack: row wise
# draw centroids
if draw_opencv:
radius = 2
color = (255,255,255)
thickness = -1 # filled
cv.circle(image_bgr_contours,(cx,cy), radius, color, thickness)
if number_of_detected_LEDs != 0:
if print_additional_info:
print(f"detected LEDs: {number_of_detected_LEDs}")
if draw_opencv:
# draw contours
contours_to_pass = -1 # pass all contours
color_of_contour = (255,255,255)
line_thickness = 1
cv.drawContours(image_bgr_contours, contours, contours_to_pass, color_of_contour, line_thickness)
if show_opencv_window:
cv.imshow("binary", image_binary)
cv.imshow("Contours", image_bgr_contours)
return position_of_leds
else:
if print_additional_info:
print(f"No LEDs were detected")
return None
else:
if print_additional_info:
print(f"No contours were detected")
return None
def detect_position_of_LEDs(image_bgr):
# convert rgb to grayscale
image_gray = convert_rgb_to_grayscale_average(image_bgr)
if show_opencv_window:
cv.imshow("grayscale", image_gray)
# get position of leds
position_of_LEDs = get_position_of_LEDs_contours(image_gray, image_bgr)
# position_of_LEDs = None
return position_of_LEDs
def lane_detection(image_bgr):
# Detect LEDs
if print_additional_info:
print(f"Detect LEDs and color:")
position_of_LEDs = detect_position_of_LEDs(image_bgr)
# detected_LEDs = None # only that following code is not being executet for development
# Get color of leds
if position_of_LEDs is not None:
detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr)
# print result
if print_additional_info:
print(f"Detected LEDs relative to image center (x0,y0):\n{detected_LEDs}")
else:
detected_LEDs = None
# Contruct lane
if detected_LEDs is not None:
if print_additional_info:
print("\nContruct lane with consideration of camera offset:")
dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \
construct_lane(detected_LEDs, image_bgr)
return detected_LEDs
else:
return None
# PiCamera
def get_frames_from_camera():
# Initialise Camera
print('Initialise Camera...')
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = SHUTTER_SPEED
# it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program.
# The gains (analog, digital) will adjust to this set up.
# After the gains are fixed, they will never change! even if you change the shutter speed during the runtime.
# To get consistent brightness values, set the right shutter speed at initalisation once.
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
camera.exposure_mode = EXPOSURE_MODE
time.sleep(1) # wait before applying brightness and contrast
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
# camera.start_preview() # show camera preview through PiCamera interface
# camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation.
print('Start caputure...')
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
start_processing = time.perf_counter()
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
image_bgr = frame.array # raw NumPy array without JPEG encoding
#cv.imshow("Current Frame", image) # display the image without text
output.truncate(0) # clear the stream for next frame
# processing
lane_detection(image_bgr)
# Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented.
# if q is pressed, break from loop.
pressed_key = cv.waitKey(2) & 0xff
if pressed_key == ord('q'):
break
end_processing = time.perf_counter()
time_processing = end_processing-start_processing
time_processing = time_processing*1000
time_processing = round(time_processing, 2)
print(f'processing time: {time_processing} ms')
# ----------------------------------------------------------------------------
# main
def main():
# start capturing
get_frames_from_camera() # start capture
cv.destroyAllWindows()
print('Program finished')
if __name__ == "__main__":
main()

BIN
90_ZielSW/Final/PAP.pdf Normal file

Binary file not shown.

View File

@ -0,0 +1,522 @@
# Creation Date: 02.03.2022
# Author: Kenan Gömek
# This code detects the lane.
# Quit program with 'q' if opencv windows is shows. Else use Ctrl+C.
import cv2 as cv
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import numpy as np
import math as M
# Define camera settings
SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232
OUTPUT_RESOLUTION = (192, 144)
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, float or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
# the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
SHUTTER_SPEED = 50 # [µs]
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# Parameters
pixels_per_mm = 32/24.25 # [px/mm] for 120 mm camera height for resolution: 192x144
# Offset Camera Sensor in Scooty according to Scooty-KS
x_offset_camera_mm = 0 # [mm]
y_offset_camera_mm = 0 # [mm]
x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px]
y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px]
# image parameters
image_height = OUTPUT_RESOLUTION[1]
image_width = OUTPUT_RESOLUTION[0]
# calculate center of image
[x_0, y_0] = np.array([image_width/2, image_height/2], dtype=np.uint16)
threshold_color_detection = 60
# values (average) under this will not be considered as active leds in each color channel
# see get_color_of_leds()-function
# Parameters for Blob/LED Detection
minDiameter_mm = 3.75 # [mm] minimum diameter of detected blob/LED. Must be minimum >0 !
# Define color numbers to identify the color channels in the matrix with all detected LEDs.
# No string, because numpy array should stay uint16
color_number_off = 0
color_number_red = 1
color_number_green = 2
color_number_blue = 3
color_number_yellow = 4
color_number_magenta = 5
color_number_cyan = 6
color_number_white = 7
# Parameters for grayscale to binary conversion
binary_threshold = 15 # determined by testing and application
# the higher threshold is, the smaller the diameter of the led, because more high values are extracted
binary_maxval = 255 # values ofer threshold will be set to maxval
# Parameters for line fitting for lane construction
param = 0 # not used for DIST_L2
reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line).
aeps = 0.001 # Sufficient accuracy for the angle.
# Parameters for OpenCV
# OpenCV windows only for demonstration
# They are not needed for functionality
show_opencv_window = True # show opencv window
draw_opencv = True # draw lane and so on
print_additional_info = True
# calculations before start
# Filter blobs by Area --> not implemented anymore, because no need, because detection is good and less time for calculation needed
# more than good trade off!
minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED
minArea_px2 = np.pi/4*minDiameter_px**2 # min Area of a blob in px^2
def points_trafo(detected_LEDs, alpha_rad, dx, dy):
"""Tranfsform points of LED to lane in KS-LED"""
detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer
detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow
x_pnts = detected_LEDs_trafo[:, 0]
y_pnts = detected_LEDs_trafo[:, 1]
# Translation
x1 = x_pnts-dx-x_0
x_trafo = x1
y1 = y_pnts-dy-y_0
y_trafo = y1
# Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton
x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1
detected_LEDs_trafo[:, 0] = x_trafo
y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1
detected_LEDs_trafo[:, 1] = y_trafo
# sort points along lane: x_2, y_2 -axis (KS_LED)
detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')]
return detected_LEDs_trafo
def construct_lane(detected_LEDs, img_bgr):
"""construct the lane"""
# This function is partially commented in german, because higher math is used
# clearer what is trying to be achieved
# get points
# xy_pnts = detected_LEDs[:,0:2]
# x_pnts = detected_LEDs[:,0]
# y_pnts = detected_LEDs[:,1]
# approach 2:
# fit line through centers of LEDs in KS_0
# DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance
[dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:, 0:2], cv.DIST_L2, param, reps, aeps)
# x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1)
alpha_rad = np.arctan2(dy, dx) # calculate angle of line
alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line
# print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°")
if print_additional_info:
alpha_print = alpha[0]
alpha_print = float("{0:.2f}".format(alpha_print))
print(f"Alpha: {alpha_print}°")
# get smallest distance to point an line
# Berechnung nach: Repetitorium Höhere Mathematik, Wirth
# Gerade: x = a+ t*b
# Punkt : OP = p
# d = abs(b x (p-a))/(abs(b))
# info: np.array()[:,0] --> gets only array with 1 dimensions with desired values
p = np.array([x_0, y_0])
a = np.array([x_2, y_2])[:, 0]
b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:, 0] # Richtungsvektor
c = p-a
# Betrag von Vektor: np.linalg.norm(vec)
cross = np.cross(b, c)
d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px]
# print(f"d: {round(d,2)}")
# Fußpunkt (X_LED, Y_LED)
t_0_dot = np.dot(c, b)
t_0_norm = (np.linalg.norm(b)**2)
t_0 = t_0_dot/t_0_norm
[x_LED, y_LED] = (a+t_0*b)
if print_additional_info:
# convert float32, round and prepare for printing string
x_LED_print = float("{0:.2f}".format(x_LED))
y_LED_print = float("{0:.2f}".format(y_LED))
print(f"x_LED: {x_LED_print} [px], y_LED: {y_LED_print} [px]")
# Abstand (dx, dy) Fußpunkt zu KS_0
dx_LED = x_LED - x_0
dx_LED_mm = dx_LED*(1/pixels_per_mm)
dy_LED = y_LED - y_0
dy_LED_mm = dy_LED*(1/pixels_per_mm)
if print_additional_info:
# convert float32, round and prepare for printing string
dx_LED_print = float("{0:.2f}".format(dx_LED))
dy_LED_print = float("{0:.2f}".format(dy_LED))
dx_LED_mm_print = float("{0:.2f}".format(dx_LED_mm))
dy_LED_mm_print = float("{0:.2f}".format(dy_LED_mm))
print(f"dx_LED: {dx_LED_print} [px] , dy_LED: {dy_LED_print} [px]")
print(f"dx_LED: {dx_LED_mm_print} [mm] , dy_LED: {dy_LED_mm_print} [mm]")
# Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty
# Diese Werte zurückgeben
dx_LED_scooty = x_LED - x_0 + x_offset_camera_px
dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm)
dy_LED_scooty = y_LED - y_0 + y_offset_camera_px
dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED_scooty: {round(dx_LED_scooty,2)} [px] , dy_LED_scooty: {round(dy_LED_scooty,2)} [px]")
print(f"dx_LED_scooty: {round(dx_LED_scooty_mm,2)} [mm] , dy_LED_scooty: {round(dy_LED_scooty_mm,2)} [mm]")
# Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten
# Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen
detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED)
if print_additional_info:
print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}")
#-----------------------------------
# draw useful lines and points
# draw lane line
if draw_opencv:
pt_0 = (a+b*np.array([-300, -300])).astype(np.int32)
pt_1 = (a+b*np.array([300, 300])).astype(np.int32)
# print(f"pt_0: {pt_0}, pt_1: {pt_1}")
line_color = (255, 255, 255)
line_thickness = 1
cv.line(img_bgr, pt_0, pt_1, line_color, line_thickness) # draw lane
# draw dx dy
line_color = (0, 0, 255)
line_thickness = 2
cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), line_color, line_thickness) # shortest distance from KS_0 to KS_LED --> Lot
# cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0, 0, 255), 2) # only dx
# cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0, 0, 255), 2) # only dy
# draw additional points
circle_radius = 5
circle_color = (255, 128, 255)
circle_thickness = -1 # filled
cv.circle(img_bgr, (int(x_2), int(y_2)), circle_radius, circle_color, circle_thickness) # Center of points
# cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA)
circle_color = (170, 255, 0)
cv.circle(img_bgr, (int(x_LED), int(y_LED)), circle_radius, circle_color, circle_thickness) # Fußpunkt
if show_opencv_window:
cv.imshow("Lane", img_bgr)
return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED
def convert_rgb_to_grayscale_average(image_bgr):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_bgr = image_bgr.astype(np.uint16, copy=False)
image_gray = (image_bgr[:, :, 0]+image_bgr[:, :, 1]+image_bgr[:, :, 2])/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8
return image_gray
def get_color_of_leds(matrix_of_LEDs, image_bgr):
"""Determine color of LEDs at positions and add to matrix"""
# is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels.
offset = 1 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led
# offset = 0 --> only the value from the middle point of the blob/led
# offset=1 --> 9 values, offset=2-->25 values
for led in matrix_of_LEDs:
x_pos = led[0] # uint16
y_pos = led[1] # uint16
# get values of color channels in region around middle point of blob/led:
# +1 at stop index, because it is not inclusive
region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8
region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8
region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8
# average of the values
# convert dtype to prevent integer overflow while addition
region_around_red_led = region_around_red_led.astype(np.uint16, copy=False)
region_around_green_led = region_around_green_led.astype(np.uint16, copy=False)
region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False)
# sum all elements in matrix and divide with number of elements
number_of_elements = region_around_blue_led.size
value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result
value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result
value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result
# determine which leds are active:
# if value > threshold --> led is active
status_blue_led = False
status_green_led = False
status_red_led = False
if value_of_blue_led > threshold_color_detection:
status_blue_led = True
if value_of_green_led > threshold_color_detection:
status_green_led = True
if value_of_red_led > threshold_color_detection:
status_red_led = True
# determine color by checking the cases:
# case 1: red
if status_blue_led==False and status_green_led==False and status_red_led==True:
color = color_number_red
# case 2: green
elif status_blue_led==False and status_green_led==True and status_red_led==False:
color = color_number_green
# case 3: blue
elif status_blue_led==True and status_green_led==False and status_red_led==False:
color = color_number_blue
# case 4: yellow = red + green
elif status_blue_led==False and status_green_led==True and status_red_led==True:
color = color_number_yellow
# case 5: magenta = red + blue
elif status_blue_led==True and status_green_led==False and status_red_led==True:
color = color_number_magenta
# case 6: cyan = green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==False:
color = color_number_cyan
# case 7: white = red + green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==True:
color = color_number_white
# case 8: led not active
# this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale
else:
color = color_number_off
# fill matrix with color
led[2] = color # uint16
return matrix_of_LEDs
def get_position_of_LEDs_contours(image_gray, image_bgr):
# create binary image
ret, image_binary = cv.threshold(image_gray, binary_threshold, binary_maxval, cv.THRESH_BINARY)
# find contours
contour_retrieval_algorithm = cv.RETR_EXTERNAL # retrieves only the extreme outer contours
contours, hierarchy = cv.findContours(image_binary, contour_retrieval_algorithm, cv.CHAIN_APPROX_SIMPLE)
# analyse contours
number_of_detected_contours = len(contours)
if number_of_detected_contours != 0:
# centroid of contours
# Pre-allocate matrix for numpy
number_of_rows = 0
number_of_columns = 3
position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) #empty: []
if draw_opencv:
image_bgr_contours = image_bgr.copy() # create copy of image
# copy is needed to draw on, because else the color of
# the circle is detected als LED-Color
number_of_detected_LEDs = 0
for i, cnt in enumerate(contours):
M = cv.moments(cnt)
area = cv.contourArea(cnt)
# diameter = 2*np.sqrt(area/np.pi)
# diameter_mm = diameter*(1/pixels_per_mm)
# print(f"area: {area} [px^2], diameter: {diameter} [px], diamter: {diameter_mm} [mm]")
# Filter contours by area. minimum Area needs to be at least >0 !
if area >minArea_px2:
number_of_detected_LEDs += 1
# prevent zero division
if M['m00']==0:
cx = 0
cy = 0
else:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
#print(cx, cy)
# add positions to matrix
x_pos = int(cx) # x positon
y_pos = int(cy) # y position
position_of_leds = np.vstack((position_of_leds, \
np.array([x_pos, y_pos, color_number_off], dtype=np.uint16))) # vstack: row wise
# draw centroids
if draw_opencv:
radius = 2
color = (255,255,255)
thickness = -1 # filled
cv.circle(image_bgr_contours,(cx,cy), radius, color, thickness)
if number_of_detected_LEDs != 0:
if print_additional_info:
print(f"detected LEDs: {number_of_detected_LEDs}")
if draw_opencv:
# draw contours
contours_to_pass = -1 # pass all contours
color_of_contour = (255,255,255)
line_thickness = 1
cv.drawContours(image_bgr_contours, contours, contours_to_pass, color_of_contour, line_thickness)
if show_opencv_window:
cv.imshow("binary", image_binary)
cv.imshow("Contours", image_bgr_contours)
return position_of_leds
else:
if print_additional_info:
print(f"No LEDs were detected")
return None
else:
if print_additional_info:
print(f"No contours were detected")
return None
def detect_position_of_LEDs(image_bgr):
# convert rgb to grayscale
image_gray = convert_rgb_to_grayscale_average(image_bgr)
if show_opencv_window:
cv.imshow("grayscale", image_gray)
# get position of leds
position_of_LEDs = get_position_of_LEDs_contours(image_gray, image_bgr)
return position_of_LEDs
def lane_detection(image_bgr):
# Detect LEDs
if print_additional_info:
print(f"Detect LEDs and color:")
position_of_LEDs = detect_position_of_LEDs(image_bgr)
# detected_LEDs = None # only that following code is not being executet for development
# Get color of leds
if position_of_LEDs is not None:
detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr)
# print result
if print_additional_info:
print(f"Detected LEDs relative to image center (x0,y0):\n{detected_LEDs}")
else:
detected_LEDs = None
# Contruct lane
if detected_LEDs is not None:
if print_additional_info:
print("\nContruct lane with consideration of camera offset:")
dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \
construct_lane(detected_LEDs, image_bgr)
return detected_LEDs
else:
return None
# PiCamera
def get_frames_from_camera():
# Initialise Camera
print('Initialise Camera...')
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = SHUTTER_SPEED
# it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program.
# The gains (analog, digital) will adjust to this set up.
# After the gains are fixed, they will never change! even if you change the shutter speed during the runtime.
# To get consistent brightness values, set the right shutter speed at initalisation once.
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
camera.exposure_mode = EXPOSURE_MODE
time.sleep(1) # wait before applying brightness and contrast
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
# camera.start_preview() # show camera preview through PiCamera interface
# camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation.
print('Start caputure...')
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
start_processing = time.perf_counter()
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
image_bgr = frame.array # raw NumPy array without JPEG encoding
#cv.imshow("Current Frame", image) # display the image without text
output.truncate(0) # clear the stream for next frame
# processing
lane_detection(image_bgr)
# Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented.
# if q is pressed, break from loop.
pressed_key = cv.waitKey(2) & 0xff
if pressed_key == ord('q'):
break
end_processing = time.perf_counter()
time_processing = end_processing-start_processing
time_processing = time_processing*1000
time_processing = round(time_processing, 2)
print(f'processing time: {time_processing} ms')
# ----------------------------------------------------------------------------
# main
def main():
# start capturing
get_frames_from_camera() # start capture
if show_opencv_window:
cv.destroyAllWindows() # destroy all open cv windows
if __name__ == "__main__":
main()

View File

@ -0,0 +1,912 @@
import cv2 as cv
import numpy as np
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import sys
import numpy as np
import math as M
from multiprocessing import Process, shared_memory
# Parameters
pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480
# Offset Camera Sensor in Scooty according to Scooty-KS
x_offset_camera_mm = 100 # [mm]
y_offset_camera_mm = 50 # [mm]
x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px]
y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px]
# image parameters
image_heigth = 320 # shape [0]
image_width = 416 # shape[1]
# calculate center of image
[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16)
threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel
# Parameters for Blob/LED Detection
minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED
maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED
# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16
color_number_off = 0
color_number_red = 1
color_number_green = 2
color_number_blue = 3
color_number_yellow = 4
color_number_magenta = 5
color_number_cyan = 6
color_number_white = 7
#----------------------------------------------------------------------
# Define camera settings
SENSOR_MODE = 4 # corresponding sensor mode to resolution
OUTPUT_RESOLUTION = (320, 416) # (width, heigth)
image_width = OUTPUT_RESOLUTION[0]
image_heigth = OUTPUT_RESOLUTION[1]
number_of_colorchannels = 3 # r, g, b
size_of_frame=int(image_heigth*image_heigth*number_of_colorchannels)
frame_dimension = int(number_of_colorchannels)
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
#the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
SHUTTER_SPEED = 50 # [µs]
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# miscellaneous parameters
max_value_of_uint64 = int((2**64) - 1) # @30 fps: konservative calculated driving time: 1.95*1e10 years --> Integer overflow not relevant.
# settings for development
show_opencv_window = False
# create shared Memorys for main-process
# shared memory for bools
shm_bools_pre=np.array([False, False, False, False, False, False, False, False, False, False], dtype=np.bool8) # create numpy array with bools stored in it
# [0]: newframe [1]: p_red_finished [2]: p_green_finished [3]: p_blue_finished
# [4]: p_red_started [5]: p_green_started [6]: p_blue_started
# [7]: p_red_start_trigger [8]: p_green_start_trigger [9]: p_blue_start_trigger
size_of_buffer = shm_bools_pre.nbytes
print(f"size of buffer: {size_of_buffer}") # size of buffer: 10
print(f"shm_bools dtype: {shm_bools_pre.dtype}") # dtype: bool
shm_bools_create = shared_memory.SharedMemory(name="shm_bools", create=True, size=shm_bools_pre.nbytes) # create a new shared memory block
shm_bools = np.ndarray(shm_bools_pre.shape, dtype=shm_bools_pre.dtype, buffer=shm_bools_create.buf) # create a NumPy array backed by shared memory
shm_bools[:] = shm_bools_pre[:] # Copy the original data into shared memory
# print(shm_bool)
# print(shm_bools.name)
# shared memory for framenumber
shm_framenumber_pre=np.array([0], dtype=np.uint64)
size_of_buffer = shm_framenumber_pre.nbytes
print(f"size of framenumber-buffer: {size_of_buffer}") #8
print(f"shm_framenumber dtype: {shm_framenumber_pre.dtype}") #uint64
shm_framenumber_create = shared_memory.SharedMemory(name="shm_framenumber", create=True, size=shm_framenumber_pre.nbytes) # create a new shared memory block
shm_framenumber = np.ndarray(shm_framenumber_pre.shape, dtype=shm_framenumber_pre.dtype, buffer=shm_framenumber_create.buf) # create a NumPy array backed by shared memory
shm_framenumber[:] = shm_framenumber_pre[:] # Copy the original data into shared memory
# print(shm_framenumber) # [0]
# print(shm_framenumber_create.name) # shm_framenumber
# shared memory for red, green, blue frame
int_black = 0 # integer for black color/ no color
shm_colorframes_pre = np.full(\
(image_heigth,image_width), \
int_black, dtype=np.uint8)
size_of_buffer = shm_colorframes_pre.nbytes
print(f"size of colorframe-buffer: {size_of_buffer}") #133 120
print(f"shm_colorframes_pre dtype: {shm_colorframes_pre.dtype}") #uint8
shm_redframe_create = shared_memory.SharedMemory(name="shm_redframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_greenframe_create = shared_memory.SharedMemory(name="shm_greenframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_blueframe_create = shared_memory.SharedMemory(name="shm_blueframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_redframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_redframe_create.buf) # create a NumPy array backed by shared memory
shm_greenframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_greenframe_create.buf) # create a NumPy array backed by shared memory
shm_blueframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_blueframe_create.buf) # create a NumPy array backed by shared memory
shm_redframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
shm_greenframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
shm_blueframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
# shared memory bgr frame
int_black = 0 # integer for black color/ no color
shm_frame_pre = np.full(\
(image_heigth,image_width, number_of_colorchannels), \
int_black, dtype=np.uint8)
size_of_buffer = shm_frame_pre.nbytes
print(f"size of colorframe-buffer: {size_of_buffer}") #399 360 = 416 x 320 x 3
print(f"shm_colorframes_pre dtype: {shm_frame_pre.dtype}") #uint8
shm_frame_create = shared_memory.SharedMemory(name="shm_frame", create=True, size=shm_frame_pre.nbytes) # create a new shared memory block
shm_frame = np.ndarray(shm_frame_pre.shape, dtype=shm_frame_pre.dtype, buffer=shm_frame_create.buf) # create a NumPy array backed by shared memory
shm_frame[:] = shm_frame_pre[:] # Copy the original data into shared memory
# ----------------------------------------------------------------------------
# Define Functions
def points_trafo(detected_LEDs, alpha_rad, dx, dy):
"""Tranfsform points of LED to lane in KS-LED"""
detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer
detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow
x_pnts = detected_LEDs_trafo[:,0]
y_pnts = detected_LEDs_trafo[:,1]
# Translation
x1 = x_pnts-dx-x_0
x_trafo=x1
y1 = y_pnts-dy-y_0
y_trafo = y1
# Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton
x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1
detected_LEDs_trafo[:,0] = x_trafo
y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1
detected_LEDs_trafo[:,1] = y_trafo
#sort points along lane: x_2, y_2 -axis (KS_LED)
detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')]
return detected_LEDs_trafo
def construct_lane(detected_LEDs, img_bgr):
"""construct the lane"""
# This function is partially commented in german, because higher math is used
# clearer what is trying to be achieved
# get points
# xy_pnts = detected_LEDs[:,0:2]
# x_pnts = detected_LEDs[:,0]
# y_pnts = detected_LEDs[:,1]
# approach 2:
# fit line through centers of LEDs in KS_0
# DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance
param = 0 # not used for DIST_L2
reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line).
aeps = 0.001 # Sufficient accuracy for the angle.
[dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps)
# x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1)
alpha_rad = np.arctan2(dy, dx) # calculate angle of line
alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line
# print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°")
print(f"Lane: alpha:{alpha[0]}°")
# get smallest distance to point an line
# Berechnung nach: Repetitorium Höhere Mathematik, Wirth
# Gerade: x = a+ t*b
# Punkt : OP = p
# d = abs(b x (p-a))/(abs(b))
# info: np.array()[:,0] --> gets only array with 1 dimensions with desired values
p = np.array([x_0, y_0])
a = np.array([x_2, y_2])[:,0]
b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor
c = p-a
# Betrag von Vektor: np.linalg.norm(vec)
cross= np.cross(b, c)
d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px]
#print(f"d: {round(d,2)}")
# Fußpunkt (X_LED, Y_LED)
t_0_dot = np.dot(c, b)
t_0_norm = (np.linalg.norm(b)**2)
t_0 = t_0_dot/t_0_norm
[x_LED, y_LED] = (a+t_0*b)
#print(f"x_LED: {x_LED}, y_LED: {y_LED}")
# Abstand (dx, dy) Fußpunkt zu KS_0
dx_LED = x_LED - x_0
dx_LED_mm = dx_LED*(1/pixels_per_mm)
dy_LED = y_LED - y_0
dy_LED_mm = dy_LED*(1/pixels_per_mm)
#print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]")
#print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]")
# Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty
# Diese Werte zurückgeben
dx_LED_scooty = x_LED - x_0 + x_offset_camera_px
dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm)
dy_LED_scooty = y_LED - y_0 + y_offset_camera_px
dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm)
#print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]")
#print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]")
# Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten
# Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen
detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED)
#print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}")
#-----------------------------------
# draw useful lines and points
# draw lane line
pt_0 = (a+b*np.array([-300, -300])).astype(np.int32)
pt_1 = (a+b*np.array([300, 300])).astype(np.int32)
#print(f"pt_0: {pt_0}, pt_1: {pt_1}")
cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane
# draw dx dy
cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot
# cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx
# cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy
#draw additional points
cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points
#cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA)
cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt
#cv.imshow("Lane", img_bgr)
return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED
def convert_rgb_to_grayscale_average(image_b, image_g, image_r):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_r = image_r.astype(np.uint16)
image_g = image_g.astype(np.uint16)
image_b = image_b.astype(np.uint16)
image_gray = (image_b+image_g+image_r)/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8) # convert back to uint8
return image_gray
def define_parameters_for_blob_detection():
"""set parameters for simple blob detector"""
params = cv.SimpleBlobDetector_Params()
# Threshold for Convert the source image to binary images by applying thresholding
# with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive)
# with distance thresholdStep between neighboring thresholds.
# Since the Grayscale image is dark if only one color channel is active,
# the Threshold values have to be set like this.
# particularly the thresholdStep-Value has to be low
params.minThreshold=20 # reminder: this value is set for grayscale image
params.maxThreshold=255
params.thresholdStep=1
params.filterByColor=False # do not filter blobs by color
# Filter blobs by Area
params.filterByArea=False
minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED
maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED
minArea_px2 = np.pi/4*minDiameter_px**2
maxArea_px2 = np.pi/4*maxDiameter_px**2
params.minArea = minArea_px2 # min Area of a blob in px^2
# params.maxArea = maxArea_px2 # max Area of a blob in px^2.
# reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area
# Filter by Inertia
params.filterByInertia=False
params.minInertiaRatio = 0.2 # [0-1]
# Filter by Convexity
params.filterByConvexity=False
params.minConvexity = 0.2 # [0-1]
# Filter by Circularity
params.filterByCircularity=False
params.minCircularity = 0.4 # [0-1]
# params.minDistBetweenBlobs = minDist_px # this has no effect
return params
def detect_LED_positions_in_grayscale(image_gray, image_bgr, params_for_blob_detection):
detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters.
keypoints = detector.detect(image_gray) # Detect blobs --> LEDs
number_of_detected_leds = len(keypoints)
if number_of_detected_leds != 0:
# print information of keypoints
print(f"detected LEDs: {number_of_detected_leds}")
#Pre-allocate matrix for numpy
number_of_rows = number_of_detected_leds
number_of_columns = 3
position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16)
for i, k in enumerate(keypoints):
x_pos = round(k.pt[0],0) # x position
y_pos = round(k.pt[1],0) # y position
# print(f"x: {x_pos} y: {y_pos}")
diameter_px = round(k.size,2)
diameter_mm = round(diameter_px*1/pixels_per_mm,2)
# print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter
area_px2 = round(M.pi/4*k.size**2,0) # area in px^2
area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0)
# print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}")
# print('')
# calculate parameters to transfer to matrix
x_pos = int(np.ceil(x_pos))
y_pos = int(np.ceil(y_pos))
# Fill matrix
# position_of_leds[i,:] = [x_pos,y_pos, 0]
position_of_leds[i,0] = x_pos
position_of_leds[i,1] = y_pos
# draw the keypoints on the original image
# cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \
outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# cv.imshow("grayscale", image_gray)
#cv.imshow("Detected", blobs)
return position_of_leds
else:
print(f"No LEDs were detected")
return None
def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, params_for_blob_detection):
position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, params_for_blob_detection)
if position_of_LEDs is not None:
return position_of_LEDs
else:
return None
def get_color_of_leds(matrix_of_LEDs, image_bgr):
# is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels.
offset = 2 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led
# offset = 0 --> only the value from the middle point of the blob/led
# offset=1 --> 9 values, offset=2-->25 values
for led in matrix_of_LEDs:
x_pos = led[0] # uint16
y_pos = led[1] # uint16
# get values of color channels in region around middle point of blob/led:
# +1 at stop index, because it is not inclusive
region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8
region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8
region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8
# average of the values
# convert dtype to prevent integer overflow while addition
region_around_red_led = region_around_red_led.astype(np.uint16)
region_around_green_led = region_around_green_led.astype(np.uint16)
region_around_blue_led = region_around_blue_led .astype(np.uint16)
# sum all elements in matrix and divide with number of elements
number_of_elements= region_around_blue_led.size
value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result
value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result
value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result
# determine which leds are active:
# if value > threshold --> led is active
status_blue_led = False; status_green_led = False; status_red_led = False
if value_of_blue_led > threshold_color_detection:
status_blue_led = True
if value_of_green_led > threshold_color_detection:
status_green_led = True
if value_of_red_led > threshold_color_detection:
status_red_led = True
# determine color by checking the cases:
# case 1: red
if status_blue_led==False and status_green_led==False and status_red_led==True:
color = color_number_red
# case 2: green
elif status_blue_led==False and status_green_led==True and status_red_led==False:
color = color_number_green
# case 3: blue
elif status_blue_led==True and status_green_led==False and status_red_led==False:
color = color_number_blue
# case 4: yellow = red + green
elif status_blue_led==False and status_green_led==True and status_red_led==True:
color = color_number_yellow
# case 5: magenta = red + blue
elif status_blue_led==True and status_green_led==False and status_red_led==True:
color = color_number_magenta
# case 6: cyan = green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==False:
color = color_number_cyan
# case 7: white = red + green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==True:
color = color_number_white
# case 8: led not active
# this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale
else:
color = color_number_off
# fill matrix with color
led[2] = color # uint16
return matrix_of_LEDs
def detect_LEDs_with_grayscale(image_b, image_g, image_r, image_bgr, params_for_blob_detection):
# convert rgb to grayscale image
start_m1 = time.perf_counter_ns()
image_gray = convert_rgb_to_grayscale_average(image_b, image_g, image_r)
end_m1 = time.perf_counter_ns()
time_processing = end_m1-start_m1
time_processing = time_processing*1e-6
time_processing = round(time_processing, 2)
print(f'processing time convert image: {time_processing} ms')
# get position of leds
position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, params_for_blob_detection=params_for_blob_detection)
if position_of_LEDs is not None:
# determine color of leds and add to matrix
detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr)
return detected_LEDs
else:
return None
def lane_detection(image_bgr, params_for_blob_detection):
start_m1 = time.perf_counter_ns()
image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images
end_m1 = time.perf_counter_ns()
time_processing = end_m1-start_m1
time_processing = time_processing*1e-6
time_processing = round(time_processing, 2)
print(f'processing time split image: {time_processing} ms')
# Detect LEDs
print(f"Detect LEDs and color:")
detected_LEDs = detect_LEDs_with_grayscale(image_b, image_g, image_r, image_bgr, params_for_blob_detection)
if detected_LEDs is not None:
# Contruct lane
#print(f"_____________________________________")
#print("Contruct lane")
dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \
construct_lane(detected_LEDs, image_bgr)
# print result
#print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}")
return detected_LEDs
else:
return None
#-------------------
# Define Funcions
def get_frames_from_picamera():
# newframe= shm_bools[0] # do not use this! no updted values in "newframe"
# framenumber = shm_framenumber[0]
# Initialise Camera
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = SHUTTER_SPEED
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
camera.exposure_mode = EXPOSURE_MODE
time.sleep(1) # wait before applying brightness and contrast
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
t_start= time.perf_counter() # save time for fps calculation
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
# General information:
# - always the newest frame is recieved: processing must be faster than fps if every frame should be processed
shm_bools[0] = True
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
shm_framenumber[0] = framenumber
#print('')
#print(f"new frame: {framenumber}")
#image = frame.array # raw NumPy array without JPEG encoding
#b,g,r = cv.split(frame.array) # split colour channels of raw NumPy array without JPEG encoding
shm_frame[:] = frame.array
#shm_redframe[:] = r
#shm_greenframe[:] = g
#shm_blueframe[:] = b
# for better performance one can assign directly in funtion line the values to the shm_memorys: shm_red, .. , ... = cv.split(..)
shm_bools[7:10]=[True] # trigger the start of the processing for each colorchannel
#print(shm_bools[7], shm_bools[8], shm_bools[9])
#display_image_with_text(image, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out) # show the frame
output.truncate(0) # clear the stream for next frame
if framenumber == 500: # 5 sek @ 30 fps, only for performance measuring
t_stop=time.perf_counter()
print(f"calculated fps: {framenumber/(t_stop-t_start)}")
break
def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out):
img = img.copy() # make copy of image and do not modify the original image
# please activate only one trigger once
trigger_show_brightness = 0 # trigger for (not) calculating andshowing the brightness of the image+
if trigger_show_brightness == 1:
arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(img)
trigger_show_max_brightness_values_of_colour_channels = 0 # trigger for (not) calculating and showing max values of colour chanels
if trigger_show_max_brightness_values_of_colour_channels == 1:
r_max, g_max, b_max = get_max_rgb_values(img)
font = cv.FONT_HERSHEY_SIMPLEX # font
fontScale = 1 # fontScale
color = (255, 255, 255) # Font colour in BGR
thickness = 1 # Line thickness in px
# set text position
frame_width = int(img.shape[1])
frame_height = int(img.shape[0])
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
text_linespacing = 50 # line spacing between two strings in pixels
# text_start_position_X = int(frame_width/4) # start text from 1/4 of image width
text_start_position_X = int(0) # start text from left edge of image
# set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image.
pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width
pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width
pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width
if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1:
pos_4 = (text_start_position_X, text_start_position_Y+3*text_linespacing) # start text from 1/4 of image width
# define text to display
text_line_1 = f"set ss: {shutter_speed} us"
text_line_3 = f"Frame: {framenumber}"
text_line_2 = f"ret exs: {camera_exposure_speed} us"
if trigger_show_brightness==1:
if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01:
text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %"
elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01:
text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} pm"
if trigger_show_max_brightness_values_of_colour_channels==1:
text_line_4 = f"max: r:{r_max} g:{g_max} b:{b_max}"
# put the text into the image
image_text_1 = cv.putText(img, text_line_1, pos_1, font,
fontScale, color, thickness, cv.LINE_AA)
image_text_2 = cv.putText(img, text_line_2, pos_2, font,
fontScale, color, thickness, cv.LINE_AA)
image_text_3 = cv.putText(img, text_line_3, pos_3, font,
fontScale, color, thickness, cv.LINE_AA)
if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1:
image_text_4 = cv.putText(img, text_line_4, pos_4, font,
fontScale, color, thickness, cv.LINE_AA)
cv.imshow("Current Frame", img) # display the image
if trigger_record_OpenCV == 1:
out.write(img) # write frame to Video
def calc_arithmetic_mean_of_brightness_per_pixel(image):
"""Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen."""
#Comment: So rechenintensiv, dass man kein Blitzen sieht im Bild. (Oder sehr selten bzw. schwach). Daher anzeige von max-werten
b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr.
r=r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765
#the rest will also be uint16 then
image_heigth = r.shape[0]
image_width = r.shape[1]
number_of_colour_channels = 3
arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels)
arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth)
max_possible_brightness = 255 # maximum possible brightness
arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness
return arithmetic_mean_of_brightness_per_pixel_relative
def get_max_rgb_values(image):
"""get max values of colour channels"""
b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr.
r_max=r.max()
g_max=g.max()
b_max=b.max()
return r_max, g_max, b_max
def create_folder_for_captures():
# Create folder for saving the captured pictures
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M")
path_cwd = os.getcwd()
path_saveFolder = path_cwd+r"/Capture_"+d1
try:
os.mkdir(path_saveFolder)
folder_exists = True
except OSError:
print("Error! Ending script.")
quit()
return path_saveFolder, folder_exists
def do_processing():
time.sleep(0.001)
#print("ohh i am doing high complex image analysis with computer vision")
def do_processing_frame_r(frame):
print(f"max frame color red: {frame.max()}")
def do_processing_frame_g(frame):
print(f"max frame color green: {frame.max()}")
def do_processing_frame_b(frame):
print(f"max frame color blue: {frame.max()}")
def processing_red():
shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block
shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script
newframe = shm_bools[0]
p_red_finished = shm_bools[1] # not used, but for clarity
p_red_started = shm_bools[4]
shm_framenumber_init = shared_memory.SharedMemory\
(name="shm_framenumber") # Attach to existing shared memory block
shm_framenumber = np.ndarray((1,), dtype=np.uint64, \
buffer= shm_framenumber_init.buf)
# framenumer = shm_framenumber[0]
shm_redframe_init = shared_memory.SharedMemory\
(name="shm_redframe") # Attach to existing shared memory block
shm_redframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \
buffer= shm_redframe_init.buf)
i=0
while True:
try:
framenumber = shm_framenumber[0]
if i==0:
last_processed_frame = framenumber
conditions_for_first_start = (i==0) and\
(shm_bools[0] == True) and \
(shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \
and (shm_bools[7] == True)
conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True)
# newframe and all color-channel-processings have to be finished
if conditions_for_first_start == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
do_processing()
i += 1
#print(f"first processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
shm_bools[4] = False # process ended
elif conditions_for_starting_processing == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
#print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}")
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
do_processing()
#print(f"max frame color red: {shm_redframe.max()}")
if show_opencv_window:
cv.imshow("red", shm_redframe)
cv.waitKey(1)
#print(f"processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
#t2 = time.perf_counter_ns()
#print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms")
last_processed_frame = framenumber
shm_bools[4] = False # process ended
# elif shm_bools[0] == False:
# pass
#print(f"no new red frame")
# image processing finished
except KeyboardInterrupt:
try:
shm_bool_init.close()
shm_framenumber_init.close()
shm_redframe_init.close()
except FileNotFoundError:
# Memory already destroyed
pass
def processing_bgr():
shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block
shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script
newframe = shm_bools[0]
p_red_finished = shm_bools[1] # not used, but for clarity
p_red_started = shm_bools[4]
shm_framenumber_init = shared_memory.SharedMemory\
(name="shm_framenumber") # Attach to existing shared memory block
shm_framenumber = np.ndarray((1,), dtype=np.uint64, \
buffer= shm_framenumber_init.buf)
# framenumer = shm_framenumber[0]
shm_frame_init = shared_memory.SharedMemory\
(name="shm_frame") # Attach to existing shared memory block
shm_redframe = np.ndarray((image_heigth,image_width, number_of_colorchannels), dtype=np.uint8, \
buffer= shm_frame_init.buf)
image_bgr = shm_redframe
params_for_blob_detection = define_parameters_for_blob_detection()
i=0
while True:
try:
framenumber = shm_framenumber[0]
if i==0:
last_processed_frame = framenumber
conditions_for_first_start = (i==0) and\
(shm_bools[0] == True) and \
(shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \
and (shm_bools[7] == True)
conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True)
# newframe and all color-channel-processings have to be finished
if conditions_for_first_start == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
lane_detection(image_bgr, params_for_blob_detection)
i += 1
#print(f"first processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
shm_bools[4] = False # process ended
elif conditions_for_starting_processing == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
#print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}")
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
start_processing = time.perf_counter_ns()
lane_detection(image_bgr, params_for_blob_detection)
end_processing = time.perf_counter_ns()
time_processing = end_processing-start_processing
time_processing = time_processing*1e-6
time_processing = round(time_processing, 2)
print(f'processing time: {time_processing} ms')
#print(f"max frame color red: {shm_redframe.max()}")
if show_opencv_window:
cv.imshow("red", shm_redframe)
cv.waitKey(1)
#print(f"processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
#t2 = time.perf_counter_ns()
#print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms")
last_processed_frame = framenumber
shm_bools[4] = False # process ended
# elif shm_bools[0] == False:
# pass
#print(f"no new red frame")
# image processing finished
except KeyboardInterrupt:
try:
shm_bool_init.close()
shm_framenumber_init.close()
shm_redframe_init.close()
except FileNotFoundError:
# Memory already destroyed
pass
# ----------------------------------------------------------------------------
# main
def main():
start = time.perf_counter()
try:
# create processes
p_red = Process(target=processing_bgr)
processes = [p_red]
print(f"waiting 1 second to create processes")
time.sleep(1) # sind prozesse schon vorhanden
# start acitivity of processes
for process in processes:
process.start()
# start capturing
get_frames_from_picamera()
print('*******************************')
# this below code is only executed if the loop in take_image_picamera_opencv is breaked
# In real use case there will be no end of this program
for process in processes:
process.terminate()
# print time measuring
end = time.perf_counter()
print(f'Script finished in {round(end-start, 2)} s')
# close each SharedMemory instance and unlink to release the shared memory
shm_bools_create.close()
shm_bools_create.unlink()
shm_framenumber_create.close()
shm_framenumber_create.unlink()
shm_redframe_create.close()
shm_redframe_create.unlink()
shm_greenframe_create.close()
shm_greenframe_create.unlink()
shm_blueframe_create.close()
shm_blueframe_create.unlink()
except KeyboardInterrupt:
# Normally this prgoram never gets keyboard interrupted! But here this case is nevertheless handled
# End Script
try:
# close each SharedMemory instance and unlink to release the shared memory
shm_bools.close()
shm_bools.unlink()
shm_framenumber_create.close()
shm_framenumber_create.unlink()
shm_redframe_create.close()
shm_redframe_create.unlink()
shm_greenframe_create.close()
shm_greenframe_create.unlink()
shm_blueframe_create.close()
shm_blueframe_create.unlink()
except FileNotFoundError:
# Memory already destroyed
pass
if __name__ == "__main__":
main()

View File

@ -0,0 +1,905 @@
import cv2 as cv
import numpy as np
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import sys
import numpy as np
import math as M
from multiprocessing import Process, shared_memory
# Parameters
pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480
# Offset Camera Sensor in Scooty according to Scooty-KS
x_offset_camera_mm = 100 # [mm]
y_offset_camera_mm = 50 # [mm]
x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px]
y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px]
# image parameters
image_heigth = 320 # shape [0]
image_width = 416 # shape[1]
# calculate center of image
[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16)
threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel
# Parameters for Blob/LED Detection
minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED
maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED
# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16
color_number_off = 0
color_number_red = 1
color_number_green = 2
color_number_blue = 3
color_number_yellow = 4
color_number_magenta = 5
color_number_cyan = 6
color_number_white = 7
#----------------------------------------------------------------------
# Define camera settings
SENSOR_MODE = 4 # corresponding sensor mode to resolution
OUTPUT_RESOLUTION = (320, 416) # (width, heigth)
image_width = OUTPUT_RESOLUTION[0]
image_heigth = OUTPUT_RESOLUTION[1]
number_of_colorchannels = 3 # r, g, b
size_of_frame=int(image_heigth*image_heigth*number_of_colorchannels)
frame_dimension = int(number_of_colorchannels)
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
#the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
SHUTTER_SPEED = 50 # [µs]
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# miscellaneous parameters
max_value_of_uint64 = int((2**64) - 1) # @30 fps: konservative calculated driving time: 1.95*1e10 years --> Integer overflow not relevant.
# settings for development
show_opencv_window = True # show opencv window
draw_opencv = True # draw lane and so on
# create shared Memorys for main-process
# shared memory for bools
shm_bools_pre=np.array([False, False, False, False, False, False, False, False, False, False], dtype=np.bool8) # create numpy array with bools stored in it
# [0]: newframe [1]: p_red_finished [2]: p_green_finished [3]: p_blue_finished
# [4]: p_red_started [5]: p_green_started [6]: p_blue_started
# [7]: p_red_start_trigger [8]: p_green_start_trigger [9]: p_blue_start_trigger
size_of_buffer = shm_bools_pre.nbytes
print(f"size of buffer: {size_of_buffer}") # size of buffer: 10
print(f"shm_bools dtype: {shm_bools_pre.dtype}") # dtype: bool
shm_bools_create = shared_memory.SharedMemory(name="shm_bools", create=True, size=shm_bools_pre.nbytes) # create a new shared memory block
shm_bools = np.ndarray(shm_bools_pre.shape, dtype=shm_bools_pre.dtype, buffer=shm_bools_create.buf) # create a NumPy array backed by shared memory
shm_bools[:] = shm_bools_pre[:] # Copy the original data into shared memory
# print(shm_bool)
# print(shm_bools.name)
# shared memory for framenumber
shm_framenumber_pre=np.array([0], dtype=np.uint64)
size_of_buffer = shm_framenumber_pre.nbytes
print(f"size of framenumber-buffer: {size_of_buffer}") #8
print(f"shm_framenumber dtype: {shm_framenumber_pre.dtype}") #uint64
shm_framenumber_create = shared_memory.SharedMemory(name="shm_framenumber", create=True, size=shm_framenumber_pre.nbytes) # create a new shared memory block
shm_framenumber = np.ndarray(shm_framenumber_pre.shape, dtype=shm_framenumber_pre.dtype, buffer=shm_framenumber_create.buf) # create a NumPy array backed by shared memory
shm_framenumber[:] = shm_framenumber_pre[:] # Copy the original data into shared memory
# print(shm_framenumber) # [0]
# print(shm_framenumber_create.name) # shm_framenumber
# shared memory for red, green, blue frame
int_black = 0 # integer for black color/ no color
shm_colorframes_pre = np.full(\
(image_heigth,image_width), \
int_black, dtype=np.uint8)
size_of_buffer = shm_colorframes_pre.nbytes
print(f"size of colorframe-buffer: {size_of_buffer}") #133 120
print(f"shm_colorframes_pre dtype: {shm_colorframes_pre.dtype}") #uint8
shm_redframe_create = shared_memory.SharedMemory(name="shm_redframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_greenframe_create = shared_memory.SharedMemory(name="shm_greenframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_blueframe_create = shared_memory.SharedMemory(name="shm_blueframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_redframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_redframe_create.buf) # create a NumPy array backed by shared memory
shm_greenframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_greenframe_create.buf) # create a NumPy array backed by shared memory
shm_blueframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_blueframe_create.buf) # create a NumPy array backed by shared memory
shm_redframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
shm_greenframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
shm_blueframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
# shared memory bgr frame
int_black = 0 # integer for black color/ no color
shm_frame_pre = np.full(\
(image_heigth,image_width, number_of_colorchannels), \
int_black, dtype=np.uint8)
size_of_buffer = shm_frame_pre.nbytes
print(f"size of colorframe-buffer: {size_of_buffer}") #399 360 = 416 x 320 x 3
print(f"shm_colorframes_pre dtype: {shm_frame_pre.dtype}") #uint8
shm_frame_create = shared_memory.SharedMemory(name="shm_frame", create=True, size=shm_frame_pre.nbytes) # create a new shared memory block
shm_frame = np.ndarray(shm_frame_pre.shape, dtype=shm_frame_pre.dtype, buffer=shm_frame_create.buf) # create a NumPy array backed by shared memory
shm_frame[:] = shm_frame_pre[:] # Copy the original data into shared memory
# ----------------------------------------------------------------------------
# Define Functions
def points_trafo(detected_LEDs, alpha_rad, dx, dy):
"""Tranfsform points of LED to lane in KS-LED"""
detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer
detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow
x_pnts = detected_LEDs_trafo[:,0]
y_pnts = detected_LEDs_trafo[:,1]
# Translation
x1 = x_pnts-dx-x_0
x_trafo=x1
y1 = y_pnts-dy-y_0
y_trafo = y1
# Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton
x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1
detected_LEDs_trafo[:,0] = x_trafo
y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1
detected_LEDs_trafo[:,1] = y_trafo
#sort points along lane: x_2, y_2 -axis (KS_LED)
detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')]
return detected_LEDs_trafo
def construct_lane(detected_LEDs, img_bgr):
"""construct the lane"""
# This function is partially commented in german, because higher math is used
# clearer what is trying to be achieved
# get points
# xy_pnts = detected_LEDs[:,0:2]
# x_pnts = detected_LEDs[:,0]
# y_pnts = detected_LEDs[:,1]
# approach 2:
# fit line through centers of LEDs in KS_0
# DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance
param = 0 # not used for DIST_L2
reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line).
aeps = 0.001 # Sufficient accuracy for the angle.
[dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps)
# x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1)
alpha_rad = np.arctan2(dy, dx) # calculate angle of line
alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line
# print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°")
print(f"Lane: alpha:{alpha[0]}°")
# get smallest distance to point an line
# Berechnung nach: Repetitorium Höhere Mathematik, Wirth
# Gerade: x = a+ t*b
# Punkt : OP = p
# d = abs(b x (p-a))/(abs(b))
# info: np.array()[:,0] --> gets only array with 1 dimensions with desired values
p = np.array([x_0, y_0])
a = np.array([x_2, y_2])[:,0]
b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor
c = p-a
# Betrag von Vektor: np.linalg.norm(vec)
cross= np.cross(b, c)
d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px]
#print(f"d: {round(d,2)}")
# Fußpunkt (X_LED, Y_LED)
t_0_dot = np.dot(c, b)
t_0_norm = (np.linalg.norm(b)**2)
t_0 = t_0_dot/t_0_norm
[x_LED, y_LED] = (a+t_0*b)
print(f"x_LED: {x_LED}, y_LED: {y_LED}")
# Abstand (dx, dy) Fußpunkt zu KS_0
dx_LED = x_LED - x_0
dx_LED_mm = dx_LED*(1/pixels_per_mm)
dy_LED = y_LED - y_0
dy_LED_mm = dy_LED*(1/pixels_per_mm)
print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]")
print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]")
# Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty
# Diese Werte zurückgeben
dx_LED_scooty = x_LED - x_0 + x_offset_camera_px
dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm)
dy_LED_scooty = y_LED - y_0 + y_offset_camera_px
dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm)
print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]")
print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]")
# Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten
# Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen
detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED)
print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}")
#-----------------------------------
# draw useful lines and points
# draw lane line
pt_0 = (a+b*np.array([-300, -300])).astype(np.int32)
pt_1 = (a+b*np.array([300, 300])).astype(np.int32)
#print(f"pt_0: {pt_0}, pt_1: {pt_1}")
cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane
# draw dx dy
cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot
# cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx
# cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy
#draw additional points
cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points
#cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA)
cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt
cv.imshow("Lane", img_bgr)
return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED
def convert_rgb_to_grayscale_average(image_bgr):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_bgr = image_bgr.astype(np.uint16)
image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8) # convert back to uint8
return image_gray
def define_parameters_for_blob_detection():
"""set parameters for simple blob detector"""
params = cv.SimpleBlobDetector_Params()
# Threshold for Convert the source image to binary images by applying thresholding
# with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive)
# with distance thresholdStep between neighboring thresholds.
# Since the Grayscale image is dark if only one color channel is active,
# the Threshold values have to be set like this.
# particularly the thresholdStep-Value has to be low
params.minThreshold=20 # reminder: this value is set for grayscale image
params.maxThreshold=255
params.thresholdStep=1
params.filterByColor=False # do not filter blobs by color
# Filter blobs by Area
params.filterByArea=True
minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED
maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED
minArea_px2 = np.pi/4*minDiameter_px**2
maxArea_px2 = np.pi/4*maxDiameter_px**2
params.minArea = minArea_px2 # min Area of a blob in px^2
# params.maxArea = maxArea_px2 # max Area of a blob in px^2.
# reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area
# Filter by Inertia
params.filterByInertia=False
params.minInertiaRatio = 0.2 # [0-1]
# Filter by Convexity
params.filterByConvexity=False
params.minConvexity = 0.2 # [0-1]
# Filter by Circularity
params.filterByCircularity=False
params.minCircularity = 0.4 # [0-1]
# params.minDistBetweenBlobs = minDist_px # this has no effect
return params
def detect_LED_positions_in_grayscale(image_gray, image_bgr, params_for_blob_detection):
detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters.
keypoints = detector.detect(image_gray) # Detect blobs --> LEDs
number_of_detected_leds = len(keypoints)
if number_of_detected_leds != 0:
# print information of keypoints
print(f"detected LEDs: {number_of_detected_leds}")
#Pre-allocate matrix for numpy
number_of_rows = number_of_detected_leds
number_of_columns = 3
position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16)
for i, k in enumerate(keypoints):
# x_pos = round(k.pt[0],0) # x position
# y_pos = round(k.pt[1],0) # y position
# print(f"x: {x_pos} y: {y_pos}")
# diameter_px = round(k.size,2)
# diameter_mm = round(diameter_px*1/pixels_per_mm,2)
# print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter
# area_px2 = round(np.pi/4*k.size**2,0) # area in px^2
# area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0)
# print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}")
# print('')
# calculate parameters to transfer to matrix
# x_pos = int(np.ceil(x_pos))
# y_pos = int(np.ceil(y_pos))
# Fill matrix
# position_of_leds[i,:] = [x_pos,y_pos, 0]
position_of_leds[i,0] = int(np.ceil(k.pt[0])) # x positon
position_of_leds[i,1] = int(np.ceil(k.pt[1])) # y position
if draw_opencv:
# draw the keypoints on the original image
# cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \
outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if show_opencv_window:
# cv.imshow("grayscale", image_gray)
cv.imshow("Detected", blobs)
return position_of_leds
else:
print(f"No LEDs were detected")
return None
def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, params_for_blob_detection):
position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, params_for_blob_detection)
if position_of_LEDs is not None:
return position_of_LEDs
else:
return None
def get_color_of_leds(matrix_of_LEDs, image_bgr):
# is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels.
offset = 2 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led
# offset = 0 --> only the value from the middle point of the blob/led
# offset=1 --> 9 values, offset=2-->25 values
for led in matrix_of_LEDs:
x_pos = led[0] # uint16
y_pos = led[1] # uint16
# get values of color channels in region around middle point of blob/led:
# +1 at stop index, because it is not inclusive
region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8
region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8
region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8
# average of the values
# convert dtype to prevent integer overflow while addition
region_around_red_led = region_around_red_led.astype(np.uint16)
region_around_green_led = region_around_green_led.astype(np.uint16)
region_around_blue_led = region_around_blue_led .astype(np.uint16)
# sum all elements in matrix and divide with number of elements
number_of_elements= region_around_blue_led.size
value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result
value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result
value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result
# determine which leds are active:
# if value > threshold --> led is active
status_blue_led = False; status_green_led = False; status_red_led = False
if value_of_blue_led > threshold_color_detection:
status_blue_led = True
if value_of_green_led > threshold_color_detection:
status_green_led = True
if value_of_red_led > threshold_color_detection:
status_red_led = True
# determine color by checking the cases:
# case 1: red
if status_blue_led==False and status_green_led==False and status_red_led==True:
color = color_number_red
# case 2: green
elif status_blue_led==False and status_green_led==True and status_red_led==False:
color = color_number_green
# case 3: blue
elif status_blue_led==True and status_green_led==False and status_red_led==False:
color = color_number_blue
# case 4: yellow = red + green
elif status_blue_led==False and status_green_led==True and status_red_led==True:
color = color_number_yellow
# case 5: magenta = red + blue
elif status_blue_led==True and status_green_led==False and status_red_led==True:
color = color_number_magenta
# case 6: cyan = green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==False:
color = color_number_cyan
# case 7: white = red + green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==True:
color = color_number_white
# case 8: led not active
# this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale
else:
color = color_number_off
# fill matrix with color
led[2] = color # uint16
return matrix_of_LEDs
def detect_LEDs_with_grayscale(image_bgr, params_for_blob_detection):
# convert rgb to grayscale image
start_m1 = time.perf_counter_ns()
image_gray = convert_rgb_to_grayscale_average(image_bgr)
end_m1= time.perf_counter_ns()
time_processing = end_m1-start_m1
time_processing = time_processing*1e-6
time_processing = round(time_processing, 2)
print(f'processing time grayscale_conversion: {time_processing} ms')
# get position of leds
position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, params_for_blob_detection=params_for_blob_detection)
if position_of_LEDs is not None:
# determine color of leds and add to matrix
detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr)
return detected_LEDs
else:
return None
def lane_detection(image_bgr, params_for_blob_detection):
# Detect LEDs
print(f"Detect LEDs and color:")
detected_LEDs = detect_LEDs_with_grayscale(image_bgr, params_for_blob_detection)
if detected_LEDs is not None:
# Contruct lane
print(f"_____________________________________")
print("Contruct lane")
dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \
construct_lane(detected_LEDs, image_bgr)
# print result
print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}")
return detected_LEDs
else:
return None
# -------------------
# Define Funcions
def get_frames_from_picamera():
# newframe= shm_bools[0] # do not use this! no updted values in "newframe"
# framenumber = shm_framenumber[0]
# Initialise Camera
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = SHUTTER_SPEED
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
camera.exposure_mode = EXPOSURE_MODE
time.sleep(1) # wait before applying brightness and contrast
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
t_start= time.perf_counter() # save time for fps calculation
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
# General information:
# - always the newest frame is recieved: processing must be faster than fps if every frame should be processed
shm_bools[0] = True
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
shm_framenumber[0] = framenumber
#print('')
#print(f"new frame: {framenumber}")
#image = frame.array # raw NumPy array without JPEG encoding
#b,g,r = cv.split(frame.array) # split colour channels of raw NumPy array without JPEG encoding
shm_frame[:] = frame.array
#shm_redframe[:] = r
#shm_greenframe[:] = g
#shm_blueframe[:] = b
# for better performance one can assign directly in funtion line the values to the shm_memorys: shm_red, .. , ... = cv.split(..)
shm_bools[7:10]=[True] # trigger the start of the processing for each colorchannel
#print(shm_bools[7], shm_bools[8], shm_bools[9])
#display_image_with_text(image, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out) # show the frame
output.truncate(0) # clear the stream for next frame
if framenumber == 500: # 5 sek @ 30 fps, only for performance measuring
t_stop=time.perf_counter()
print(f"calculated fps: {framenumber/(t_stop-t_start)}")
break
def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out):
img = img.copy() # make copy of image and do not modify the original image
# please activate only one trigger once
trigger_show_brightness = 0 # trigger for (not) calculating andshowing the brightness of the image+
if trigger_show_brightness == 1:
arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(img)
trigger_show_max_brightness_values_of_colour_channels = 0 # trigger for (not) calculating and showing max values of colour chanels
if trigger_show_max_brightness_values_of_colour_channels == 1:
r_max, g_max, b_max = get_max_rgb_values(img)
font = cv.FONT_HERSHEY_SIMPLEX # font
fontScale = 1 # fontScale
color = (255, 255, 255) # Font colour in BGR
thickness = 1 # Line thickness in px
# set text position
frame_width = int(img.shape[1])
frame_height = int(img.shape[0])
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
text_linespacing = 50 # line spacing between two strings in pixels
# text_start_position_X = int(frame_width/4) # start text from 1/4 of image width
text_start_position_X = int(0) # start text from left edge of image
# set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image.
pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width
pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width
pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width
if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1:
pos_4 = (text_start_position_X, text_start_position_Y+3*text_linespacing) # start text from 1/4 of image width
# define text to display
text_line_1 = f"set ss: {shutter_speed} us"
text_line_3 = f"Frame: {framenumber}"
text_line_2 = f"ret exs: {camera_exposure_speed} us"
if trigger_show_brightness==1:
if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01:
text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %"
elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01:
text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} pm"
if trigger_show_max_brightness_values_of_colour_channels==1:
text_line_4 = f"max: r:{r_max} g:{g_max} b:{b_max}"
# put the text into the image
#image_text_1 = cv.putText(img, text_line_1, pos_1, font,
# fontScale, color, thickness, cv.LINE_AA)
#image_text_2 = cv.putText(img, text_line_2, pos_2, font,
# fontScale, color, thickness, cv.LINE_AA)
#image_text_3 = cv.putText(img, text_line_3, pos_3, font,
# fontScale, color, thickness, cv.LINE_AA)
if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1:
image_text_4 = cv.putText(img, text_line_4, pos_4, font,
fontScale, color, thickness, cv.LINE_AA)
cv.imshow("Current Frame", img) # display the image
if trigger_record_OpenCV == 1:
out.write(img) # write frame to Video
def calc_arithmetic_mean_of_brightness_per_pixel(image):
"""Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen."""
#Comment: So rechenintensiv, dass man kein Blitzen sieht im Bild. (Oder sehr selten bzw. schwach). Daher anzeige von max-werten
b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr.
r=r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765
#the rest will also be uint16 then
image_heigth = r.shape[0]
image_width = r.shape[1]
number_of_colour_channels = 3
arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels)
arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth)
max_possible_brightness = 255 # maximum possible brightness
arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness
return arithmetic_mean_of_brightness_per_pixel_relative
def get_max_rgb_values(image):
"""get max values of colour channels"""
b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr.
r_max=r.max()
g_max=g.max()
b_max=b.max()
return r_max, g_max, b_max
def create_folder_for_captures():
# Create folder for saving the captured pictures
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M")
path_cwd = os.getcwd()
path_saveFolder = path_cwd+r"/Capture_"+d1
try:
os.mkdir(path_saveFolder)
folder_exists = True
except OSError:
print("Error! Ending script.")
quit()
return path_saveFolder, folder_exists
def do_processing():
time.sleep(0.001)
#print("ohh i am doing high complex image analysis with computer vision")
def do_processing_frame_r(frame):
print(f"max frame color red: {frame.max()}")
def do_processing_frame_g(frame):
print(f"max frame color green: {frame.max()}")
def do_processing_frame_b(frame):
print(f"max frame color blue: {frame.max()}")
def processing_red():
shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block
shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script
newframe = shm_bools[0]
p_red_finished = shm_bools[1] # not used, but for clarity
p_red_started = shm_bools[4]
shm_framenumber_init = shared_memory.SharedMemory\
(name="shm_framenumber") # Attach to existing shared memory block
shm_framenumber = np.ndarray((1,), dtype=np.uint64, \
buffer= shm_framenumber_init.buf)
# framenumer = shm_framenumber[0]
shm_redframe_init = shared_memory.SharedMemory\
(name="shm_redframe") # Attach to existing shared memory block
shm_redframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \
buffer= shm_redframe_init.buf)
i=0
while True:
try:
framenumber = shm_framenumber[0]
if i==0:
last_processed_frame = framenumber
conditions_for_first_start = (i==0) and\
(shm_bools[0] == True) and \
(shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \
and (shm_bools[7] == True)
conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True)
# newframe and all color-channel-processings have to be finished
if conditions_for_first_start == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
do_processing()
i += 1
#print(f"first processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
shm_bools[4] = False # process ended
elif conditions_for_starting_processing == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
#print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}")
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
do_processing()
#print(f"max frame color red: {shm_redframe.max()}")
if show_opencv_window:
cv.imshow("red", shm_redframe)
cv.waitKey(1)
#print(f"processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
#t2 = time.perf_counter_ns()
#print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms")
last_processed_frame = framenumber
shm_bools[4] = False # process ended
# elif shm_bools[0] == False:
# pass
#print(f"no new red frame")
# image processing finished
except KeyboardInterrupt:
try:
shm_bool_init.close()
shm_framenumber_init.close()
shm_redframe_init.close()
except FileNotFoundError:
# Memory already destroyed
pass
def processing_bgr():
shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block
shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script
newframe = shm_bools[0]
p_red_finished = shm_bools[1] # not used, but for clarity
p_red_started = shm_bools[4]
shm_framenumber_init = shared_memory.SharedMemory\
(name="shm_framenumber") # Attach to existing shared memory block
shm_framenumber = np.ndarray((1,), dtype=np.uint64, \
buffer= shm_framenumber_init.buf)
# framenumer = shm_framenumber[0]
shm_frame_init = shared_memory.SharedMemory\
(name="shm_frame") # Attach to existing shared memory block
shm_redframe = np.ndarray((image_heigth,image_width, number_of_colorchannels), dtype=np.uint8, \
buffer= shm_frame_init.buf)
image_bgr = shm_redframe
params_for_blob_detection = define_parameters_for_blob_detection()
i=0
while True:
try:
framenumber = shm_framenumber[0]
if i==0:
last_processed_frame = framenumber
conditions_for_first_start = (i==0) and\
(shm_bools[0] == True) and \
(shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \
and (shm_bools[7] == True)
conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True)
# newframe and all color-channel-processings have to be finished
if conditions_for_first_start == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
lane_detection(image_bgr, params_for_blob_detection)
i += 1
#print(f"first processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
shm_bools[4] = False # process ended
elif conditions_for_starting_processing == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
#print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}")
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
lane_detection(image_bgr, params_for_blob_detection)
#print(f"max frame color red: {shm_redframe.max()}")
if show_opencv_window:
cv.imshow("red", shm_redframe)
cv.waitKey(1)
#print(f"processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
#t2 = time.perf_counter_ns()
#print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms")
last_processed_frame = framenumber
shm_bools[4] = False # process ended
# elif shm_bools[0] == False:
# pass
#print(f"no new red frame")
# image processing finished
except KeyboardInterrupt:
try:
shm_bool_init.close()
shm_framenumber_init.close()
shm_redframe_init.close()
except FileNotFoundError:
# Memory already destroyed
pass
# ----------------------------------------------------------------------------
# main
def main():
start = time.perf_counter()
try:
# create processes
p_red = Process(target=processing_bgr)
processes = [p_red]
print(f"waiting 1 second to create processes")
time.sleep(1) # sind prozesse schon vorhanden
# start acitivity of processes
for process in processes:
process.start()
# start capturing
get_frames_from_picamera()
print('*******************************')
# this below code is only executed if the loop in take_image_picamera_opencv is breaked
# In real use case there will be no end of this program
for process in processes:
process.terminate()
# print time measuring
end = time.perf_counter()
print(f'Script finished in {round(end-start, 2)} s')
# close each SharedMemory instance and unlink to release the shared memory
shm_bools_create.close()
shm_bools_create.unlink()
shm_framenumber_create.close()
shm_framenumber_create.unlink()
shm_redframe_create.close()
shm_redframe_create.unlink()
shm_greenframe_create.close()
shm_greenframe_create.unlink()
shm_blueframe_create.close()
shm_blueframe_create.unlink()
except KeyboardInterrupt:
# Normally this prgoram never gets keyboard interrupted! But here this case is nevertheless handled
# End Script
try:
# close each SharedMemory instance and unlink to release the shared memory
shm_bools.close()
shm_bools.unlink()
shm_framenumber_create.close()
shm_framenumber_create.unlink()
shm_redframe_create.close()
shm_redframe_create.unlink()
shm_greenframe_create.close()
shm_greenframe_create.unlink()
shm_blueframe_create.close()
shm_blueframe_create.unlink()
except FileNotFoundError:
# Memory already destroyed
pass
if __name__ == "__main__":
main()

View File

@ -0,0 +1,925 @@
import cv2 as cv
import numpy as np
import picamera
from picamera.array import PiRGBArray
from fractions import Fraction
import time
from datetime import datetime
import os
import sys
import numpy as np
import math as M
from multiprocessing import Process, shared_memory
#----------------------------------------------------------------------
# Define camera settings
SENSOR_MODE = 4 # corresponding sensor mode to resolution
OUTPUT_RESOLUTION = (192, 144) # (width, heigth)
image_width = OUTPUT_RESOLUTION[0]
image_heigth = OUTPUT_RESOLUTION[1]
number_of_colorchannels = 3 # r, g, b
size_of_frame=int(image_heigth*image_heigth*number_of_colorchannels)
frame_dimension = int(number_of_colorchannels)
AWB_MODE = 'off' # Auto white balance mode
AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid.
BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100]
#the brighter, the brighter the LEDs and the higher the RGB values and vice versa!
CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100]
SHUTTER_SPEED = 50 # [µs]
ISO = 320 # ISO value
EXPOSURE_MODE = 'off'
FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4
SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s
# miscellaneous parameters
max_value_of_uint64 = int((2**64) - 1) # @30 fps: konservative calculated driving time: 1.95*1e10 years --> Integer overflow not relevant.
#----------------------------------------------------------------------
# Parameters
pixels_per_mm = 32/24.25 #[px/mm] for 120 mm camera height for resolution: 192x144
# pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320
# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480
# Offset Camera Sensor in Scooty according to Scooty-KS
x_offset_camera_mm = 100 # [mm]
y_offset_camera_mm = 50 # [mm]
x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px]
y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px]
# image parameters
image_heigth = OUTPUT_RESOLUTION[1] # shape [0]
image_width = OUTPUT_RESOLUTION[0]# shape[1]
# calculate center of image
[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16)
threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel
# Parameters for Blob/LED Detection
minDiameter_mm = 1.5 # [mm] minimum diameter of detected blob/LED
maxDiameter_mm = 8 # [mm] maximum diameter of detected blob/LED
# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16
color_number_off = 0
color_number_red = 1
color_number_green = 2
color_number_blue = 3
color_number_yellow = 4
color_number_magenta = 5
color_number_cyan = 6
color_number_white = 7
show_opencv_window = True # show opencv window
draw_opencv = True # draw lane and so on
print_additional_info = False
# create shared Memorys for main-process
# shared memory for bools
shm_bools_pre=np.array([False, False, False, False, False, False, False, False, False, False], dtype=np.bool8) # create numpy array with bools stored in it
# [0]: newframe [1]: p_red_finished [2]: p_green_finished [3]: p_blue_finished
# [4]: p_red_started [5]: p_green_started [6]: p_blue_started
# [7]: p_red_start_trigger [8]: p_green_start_trigger [9]: p_blue_start_trigger
size_of_buffer = shm_bools_pre.nbytes
print(f"size of buffer: {size_of_buffer}") # size of buffer: 10
print(f"shm_bools dtype: {shm_bools_pre.dtype}") # dtype: bool
shm_bools_create = shared_memory.SharedMemory(name="shm_bools", create=True, size=shm_bools_pre.nbytes) # create a new shared memory block
shm_bools = np.ndarray(shm_bools_pre.shape, dtype=shm_bools_pre.dtype, buffer=shm_bools_create.buf) # create a NumPy array backed by shared memory
shm_bools[:] = shm_bools_pre[:] # Copy the original data into shared memory
# print(shm_bool)
# print(shm_bools.name)
# shared memory for framenumber
shm_framenumber_pre=np.array([0], dtype=np.uint64)
size_of_buffer = shm_framenumber_pre.nbytes
print(f"size of framenumber-buffer: {size_of_buffer}") #8
print(f"shm_framenumber dtype: {shm_framenumber_pre.dtype}") #uint64
shm_framenumber_create = shared_memory.SharedMemory(name="shm_framenumber", create=True, size=shm_framenumber_pre.nbytes) # create a new shared memory block
shm_framenumber = np.ndarray(shm_framenumber_pre.shape, dtype=shm_framenumber_pre.dtype, buffer=shm_framenumber_create.buf) # create a NumPy array backed by shared memory
shm_framenumber[:] = shm_framenumber_pre[:] # Copy the original data into shared memory
# print(shm_framenumber) # [0]
# print(shm_framenumber_create.name) # shm_framenumber
# shared memory for red, green, blue frame
int_black = 0 # integer for black color/ no color
shm_colorframes_pre = np.full(\
(image_heigth,image_width), \
int_black, dtype=np.uint8)
size_of_buffer = shm_colorframes_pre.nbytes
print(f"size of colorframe-buffer: {size_of_buffer}") #133 120
print(f"shm_colorframes_pre dtype: {shm_colorframes_pre.dtype}") #uint8
shm_redframe_create = shared_memory.SharedMemory(name="shm_redframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_greenframe_create = shared_memory.SharedMemory(name="shm_greenframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_blueframe_create = shared_memory.SharedMemory(name="shm_blueframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block
shm_redframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_redframe_create.buf) # create a NumPy array backed by shared memory
shm_greenframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_greenframe_create.buf) # create a NumPy array backed by shared memory
shm_blueframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_blueframe_create.buf) # create a NumPy array backed by shared memory
shm_redframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
shm_greenframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
shm_blueframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory
# shared memory bgr frame
int_black = 0 # integer for black color/ no color
shm_frame_pre = np.full(\
(image_heigth,image_width, number_of_colorchannels), \
int_black, dtype=np.uint8)
size_of_buffer = shm_frame_pre.nbytes
print(f"size of colorframe-buffer: {size_of_buffer}") #399 360 = 416 x 320 x 3
print(f"shm_colorframes_pre dtype: {shm_frame_pre.dtype}") #uint8
shm_frame_create = shared_memory.SharedMemory(name="shm_frame", create=True, size=shm_frame_pre.nbytes) # create a new shared memory block
shm_frame = np.ndarray(shm_frame_pre.shape, dtype=shm_frame_pre.dtype, buffer=shm_frame_create.buf) # create a NumPy array backed by shared memory
shm_frame[:] = shm_frame_pre[:] # Copy the original data into shared memory
# ----------------------------------------------------------------------------
# Define Functions
def points_trafo(detected_LEDs, alpha_rad, dx, dy):
"""Tranfsform points of LED to lane in KS-LED"""
detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer
detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow
x_pnts = detected_LEDs_trafo[:,0]
y_pnts = detected_LEDs_trafo[:,1]
# Translation
x1 = x_pnts-dx-x_0
x_trafo=x1
y1 = y_pnts-dy-y_0
y_trafo = y1
# Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton
x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1
detected_LEDs_trafo[:,0] = x_trafo
y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1
detected_LEDs_trafo[:,1] = y_trafo
#sort points along lane: x_2, y_2 -axis (KS_LED)
detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')]
return detected_LEDs_trafo
def construct_lane(detected_LEDs, img_bgr):
"""construct the lane"""
# This function is partially commented in german, because higher math is used
# clearer what is trying to be achieved
# get points
# xy_pnts = detected_LEDs[:,0:2]
# x_pnts = detected_LEDs[:,0]
# y_pnts = detected_LEDs[:,1]
# approach 2:
# fit line through centers of LEDs in KS_0
# DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance
param = 0 # not used for DIST_L2
reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line).
aeps = 0.001 # Sufficient accuracy for the angle.
[dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps)
# x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1)
alpha_rad = np.arctan2(dy, dx) # calculate angle of line
alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line
# print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°")
if print_additional_info:
print(f"Lane: alpha:{alpha[0]}°")
# get smallest distance to point an line
# Berechnung nach: Repetitorium Höhere Mathematik, Wirth
# Gerade: x = a+ t*b
# Punkt : OP = p
# d = abs(b x (p-a))/(abs(b))
# info: np.array()[:,0] --> gets only array with 1 dimensions with desired values
p = np.array([x_0, y_0])
a = np.array([x_2, y_2])[:,0]
b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor
c = p-a
# Betrag von Vektor: np.linalg.norm(vec)
cross= np.cross(b, c)
d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px]
#print(f"d: {round(d,2)}")
# Fußpunkt (X_LED, Y_LED)
t_0_dot = np.dot(c, b)
t_0_norm = (np.linalg.norm(b)**2)
t_0 = t_0_dot/t_0_norm
[x_LED, y_LED] = (a+t_0*b)
if print_additional_info:
print(f"x_LED: {x_LED}, y_LED: {y_LED}")
# Abstand (dx, dy) Fußpunkt zu KS_0
dx_LED = x_LED - x_0
dx_LED_mm = dx_LED*(1/pixels_per_mm)
dy_LED = y_LED - y_0
dy_LED_mm = dy_LED*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]")
print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]")
# Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty
# Diese Werte zurückgeben
dx_LED_scooty = x_LED - x_0 + x_offset_camera_px
dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm)
dy_LED_scooty = y_LED - y_0 + y_offset_camera_px
dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm)
if print_additional_info:
print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]")
print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]")
# Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten
# Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen
detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED)
if print_additional_info:
print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}")
#-----------------------------------
# draw useful lines and points
# draw lane line
if draw_opencv:
pt_0 = (a+b*np.array([-300, -300])).astype(np.int32)
pt_1 = (a+b*np.array([300, 300])).astype(np.int32)
#print(f"pt_0: {pt_0}, pt_1: {pt_1}")
cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane
# draw dx dy
cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot
# cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx
# cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy
#draw additional points
cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points
#cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA)
cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt
if show_opencv_window:
cv.imshow("Lane", img_bgr)
return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED
def convert_rgb_to_grayscale_average(image_bgr):
"""This function converts the RGB image into an grayscale image.
Algorithm: Average: Y = (R+G+B)/3"""
# convert dtype to prevent integer overflow while addition
image_bgr = image_bgr.astype(np.uint16, copy=False)
image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion
image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8
return image_gray
def create_detector(params_for_blob_detection):
detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters.
return detector
def define_parameters_for_blob_detection():
"""set parameters for simple blob detector"""
params = cv.SimpleBlobDetector_Params()
# Threshold for Convert the source image to binary images by applying thresholding
# with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive)
# with distance thresholdStep between neighboring thresholds.
# Since the Grayscale image is dark if only one color channel is active,
# the Threshold values have to be set like this.
# particularly the thresholdStep-Value has to be low
params.minThreshold=20 # reminder: this value is set for grayscale image
params.maxThreshold=255
params.thresholdStep=1
params.filterByColor=False # do not filter blobs by color
# Filter blobs by Area
params.filterByArea=True
minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED
maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED
minArea_px2 = np.pi/4*minDiameter_px**2
maxArea_px2 = np.pi/4*maxDiameter_px**2
params.minArea = minArea_px2 # min Area of a blob in px^2
params.maxArea = maxArea_px2 # max Area of a blob in px^2.
# reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area
# Filter by Inertia
params.filterByInertia=False
params.minInertiaRatio = 0.2 # [0-1]
# Filter by Convexity
params.filterByConvexity=False
params.minConvexity = 0.2 # [0-1]
# Filter by Circularity
params.filterByCircularity=False
params.minCircularity = 0.4 # [0-1]
# params.minDistBetweenBlobs = minDist_px # this has no effect
return params
def detect_LED_positions_in_grayscale(image_gray, image_bgr, detector):
start_processing = time.perf_counter()
keypoints = detector.detect(image_gray) # Detect blobs --> LEDs
end_processing = time.perf_counter()
time_processing = end_processing-start_processing
time_processing = time_processing*1000
time_processing = round(time_processing, 2)
print(f'processing time detector: {time_processing} ms')
number_of_detected_leds = len(keypoints)
if number_of_detected_leds != 0:
# print information of keypoints
print(f"detected LEDs: {number_of_detected_leds}")
#Pre-allocate matrix for numpy
number_of_rows = number_of_detected_leds
number_of_columns = 3
position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16)
for i, k in enumerate(keypoints):
# x_pos = round(k.pt[0],0) # x position
# y_pos = round(k.pt[1],0) # y position
# print(f"x: {x_pos} y: {y_pos}")
diameter_px = round(k.size,2)
diameter_mm = round(diameter_px*1/pixels_per_mm,2)
#print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter
# area_px2 = round(np.pi/4*k.size**2,0) # area in px^2
# area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0)
# print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}")
# print('')
# calculate parameters to transfer to matrix
# x_pos = int(np.ceil(x_pos))
# y_pos = int(np.ceil(y_pos))
# Fill matrix
# position_of_leds[i,:] = [x_pos,y_pos, 0]
position_of_leds[i,0] = int(np.ceil(k.pt[0])) # x positon
position_of_leds[i,1] = int(np.ceil(k.pt[1])) # y position
if draw_opencv:
# draw the keypoints on the original image
# cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \
outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if show_opencv_window:
# cv.imshow("grayscale", image_gray)
cv.imshow("Detected", blobs)
return position_of_leds
else:
print(f"No LEDs were detected")
return None
def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, detector):
position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, detector)
if position_of_LEDs is not None:
return position_of_LEDs
else:
return None
def get_color_of_leds(matrix_of_LEDs, image_bgr):
# is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels.
offset = 0 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led
# offset = 0 --> only the value from the middle point of the blob/led
# offset=1 --> 9 values, offset=2-->25 values
for led in matrix_of_LEDs:
x_pos = led[0] # uint16
y_pos = led[1] # uint16
# get values of color channels in region around middle point of blob/led:
# +1 at stop index, because it is not inclusive
region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8
region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8
region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8
# average of the values
# convert dtype to prevent integer overflow while addition
region_around_red_led = region_around_red_led.astype(np.uint16, copy=False)
region_around_green_led = region_around_green_led.astype(np.uint16, copy=False)
region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False)
# sum all elements in matrix and divide with number of elements
number_of_elements= region_around_blue_led.size
value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result
value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result
value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result
# determine which leds are active:
# if value > threshold --> led is active
status_blue_led = False; status_green_led = False; status_red_led = False
if value_of_blue_led > threshold_color_detection:
status_blue_led = True
if value_of_green_led > threshold_color_detection:
status_green_led = True
if value_of_red_led > threshold_color_detection:
status_red_led = True
# determine color by checking the cases:
# case 1: red
if status_blue_led==False and status_green_led==False and status_red_led==True:
color = color_number_red
# case 2: green
elif status_blue_led==False and status_green_led==True and status_red_led==False:
color = color_number_green
# case 3: blue
elif status_blue_led==True and status_green_led==False and status_red_led==False:
color = color_number_blue
# case 4: yellow = red + green
elif status_blue_led==False and status_green_led==True and status_red_led==True:
color = color_number_yellow
# case 5: magenta = red + blue
elif status_blue_led==True and status_green_led==False and status_red_led==True:
color = color_number_magenta
# case 6: cyan = green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==False:
color = color_number_cyan
# case 7: white = red + green + blue
elif status_blue_led==True and status_green_led==True and status_red_led==True:
color = color_number_white
# case 8: led not active
# this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale
else:
color = color_number_off
# fill matrix with color
led[2] = color # uint16
return matrix_of_LEDs
def detect_LEDs_with_grayscale(image_bgr, detector):
# convert rgb to grayscale image
# start_m1 = time.perf_counter()
image_gray = convert_rgb_to_grayscale_average(image_bgr)
# end_m1 = time.perf_counter()
# time_processing = end_m1-start_m1
# time_processing = time_processing*1000
# time_processing = round(time_processing, 2)
# print(f'processing time conversion: {time_processing} ms')
# get position of leds
position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, detector=detector)
#position_of_LEDs = None
if position_of_LEDs is not None:
# determine color of leds and add to matrix
detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr)
return detected_LEDs
else:
return None
def lane_detection(image_bgr, detector):
# Detect LEDs
print(f"Detect LEDs and color:")
detected_LEDs = detect_LEDs_with_grayscale(image_bgr, detector)
if detected_LEDs is not None:
# Contruct lane
#print(f"_____________________________________")
# print("Contruct lane")
dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \
construct_lane(detected_LEDs, image_bgr)
# print result
if print_additional_info:
print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}")
return detected_LEDs
else:
return None
# -------------------
# Define Funcions
def get_frames_from_picamera():
# newframe= shm_bools[0] # do not use this! no updted values in "newframe"
# framenumber = shm_framenumber[0]
# Initialise Camera
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
# Set camera settings
camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode
camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage!
camera.framerate = FRAMERATE
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
camera.iso = ISO
camera.shutter_speed = SHUTTER_SPEED
time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off
camera.exposure_mode = EXPOSURE_MODE
time.sleep(1) # wait before applying brightness and contrast
camera.brightness = BRIGHTNESS
camera.contrast = CONTRAST
time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings
t_start= time.perf_counter() # save time for fps calculation
for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)):
# General information:
# - always the newest frame is recieved: processing must be faster than fps if every frame should be processed
shm_bools[0] = True
framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1
shm_framenumber[0] = framenumber
#print('')
#print(f"new frame: {framenumber}")
#image = frame.array # raw NumPy array without JPEG encoding
#b,g,r = cv.split(frame.array) # split colour channels of raw NumPy array without JPEG encoding
shm_frame[:] = frame.array
#shm_redframe[:] = r
#shm_greenframe[:] = g
#shm_blueframe[:] = b
# for better performance one can assign directly in funtion line the values to the shm_memorys: shm_red, .. , ... = cv.split(..)
shm_bools[7:10]=[True] # trigger the start of the processing for each colorchannel
#print(shm_bools[7], shm_bools[8], shm_bools[9])
#display_image_with_text(image, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out) # show the frame
output.truncate(0) # clear the stream for next frame
if framenumber == 500: # 5 sek @ 30 fps, only for performance measuring
t_stop=time.perf_counter()
print(f"calculated fps: {framenumber/(t_stop-t_start)}")
break
def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out):
img = img.copy() # make copy of image and do not modify the original image
# please activate only one trigger once
trigger_show_brightness = 0 # trigger for (not) calculating andshowing the brightness of the image+
if trigger_show_brightness == 1:
arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(img)
trigger_show_max_brightness_values_of_colour_channels = 0 # trigger for (not) calculating and showing max values of colour chanels
if trigger_show_max_brightness_values_of_colour_channels == 1:
r_max, g_max, b_max = get_max_rgb_values(img)
font = cv.FONT_HERSHEY_SIMPLEX # font
fontScale = 1 # fontScale
color = (255, 255, 255) # Font colour in BGR
thickness = 1 # Line thickness in px
# set text position
frame_width = int(img.shape[1])
frame_height = int(img.shape[0])
text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height
text_linespacing = 50 # line spacing between two strings in pixels
# text_start_position_X = int(frame_width/4) # start text from 1/4 of image width
text_start_position_X = int(0) # start text from left edge of image
# set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image.
pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width
pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width
pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width
if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1:
pos_4 = (text_start_position_X, text_start_position_Y+3*text_linespacing) # start text from 1/4 of image width
# define text to display
text_line_1 = f"set ss: {shutter_speed} us"
text_line_3 = f"Frame: {framenumber}"
text_line_2 = f"ret exs: {camera_exposure_speed} us"
if trigger_show_brightness==1:
if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01:
text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %"
elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01:
text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} pm"
if trigger_show_max_brightness_values_of_colour_channels==1:
text_line_4 = f"max: r:{r_max} g:{g_max} b:{b_max}"
# put the text into the image
#image_text_1 = cv.putText(img, text_line_1, pos_1, font,
# fontScale, color, thickness, cv.LINE_AA)
#image_text_2 = cv.putText(img, text_line_2, pos_2, font,
# fontScale, color, thickness, cv.LINE_AA)
#image_text_3 = cv.putText(img, text_line_3, pos_3, font,
# fontScale, color, thickness, cv.LINE_AA)
if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1:
image_text_4 = cv.putText(img, text_line_4, pos_4, font,
fontScale, color, thickness, cv.LINE_AA)
cv.imshow("Current Frame", img) # display the image
if trigger_record_OpenCV == 1:
out.write(img) # write frame to Video
def calc_arithmetic_mean_of_brightness_per_pixel(image):
"""Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen."""
#Comment: So rechenintensiv, dass man kein Blitzen sieht im Bild. (Oder sehr selten bzw. schwach). Daher anzeige von max-werten
b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr.
r=r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765
#the rest will also be uint16 then
image_heigth = r.shape[0]
image_width = r.shape[1]
number_of_colour_channels = 3
arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels)
arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth)
max_possible_brightness = 255 # maximum possible brightness
arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness
return arithmetic_mean_of_brightness_per_pixel_relative
def get_max_rgb_values(image):
"""get max values of colour channels"""
b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr.
r_max=r.max()
g_max=g.max()
b_max=b.max()
return r_max, g_max, b_max
def create_folder_for_captures():
# Create folder for saving the captured pictures
now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M")
path_cwd = os.getcwd()
path_saveFolder = path_cwd+r"/Capture_"+d1
try:
os.mkdir(path_saveFolder)
folder_exists = True
except OSError:
print("Error! Ending script.")
quit()
return path_saveFolder, folder_exists
def do_processing():
time.sleep(0.001)
#print("ohh i am doing high complex image analysis with computer vision")
def do_processing_frame_r(frame):
print(f"max frame color red: {frame.max()}")
def do_processing_frame_g(frame):
print(f"max frame color green: {frame.max()}")
def do_processing_frame_b(frame):
print(f"max frame color blue: {frame.max()}")
def processing_red():
shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block
shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script
newframe = shm_bools[0]
p_red_finished = shm_bools[1] # not used, but for clarity
p_red_started = shm_bools[4]
shm_framenumber_init = shared_memory.SharedMemory\
(name="shm_framenumber") # Attach to existing shared memory block
shm_framenumber = np.ndarray((1,), dtype=np.uint64, \
buffer= shm_framenumber_init.buf)
# framenumer = shm_framenumber[0]
shm_redframe_init = shared_memory.SharedMemory\
(name="shm_redframe") # Attach to existing shared memory block
shm_redframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \
buffer= shm_redframe_init.buf)
i=0
while True:
try:
framenumber = shm_framenumber[0]
if i==0:
last_processed_frame = framenumber
conditions_for_first_start = (i==0) and\
(shm_bools[0] == True) and \
(shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \
and (shm_bools[7] == True)
conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True)
# newframe and all color-channel-processings have to be finished
if conditions_for_first_start == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
do_processing()
i += 1
#print(f"first processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
shm_bools[4] = False # process ended
elif conditions_for_starting_processing == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
#print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}")
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
do_processing()
#print(f"max frame color red: {shm_redframe.max()}")
if show_opencv_window:
cv.imshow("red", shm_redframe)
cv.waitKey(1)
#print(f"processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
#t2 = time.perf_counter_ns()
#print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms")
last_processed_frame = framenumber
shm_bools[4] = False # process ended
# elif shm_bools[0] == False:
# pass
#print(f"no new red frame")
# image processing finished
except KeyboardInterrupt:
try:
shm_bool_init.close()
shm_framenumber_init.close()
shm_redframe_init.close()
except FileNotFoundError:
# Memory already destroyed
pass
def processing_bgr():
shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block
shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script
newframe = shm_bools[0]
p_red_finished = shm_bools[1] # not used, but for clarity
p_red_started = shm_bools[4]
shm_framenumber_init = shared_memory.SharedMemory\
(name="shm_framenumber") # Attach to existing shared memory block
shm_framenumber = np.ndarray((1,), dtype=np.uint64, \
buffer= shm_framenumber_init.buf)
# framenumer = shm_framenumber[0]
shm_frame_init = shared_memory.SharedMemory\
(name="shm_frame") # Attach to existing shared memory block
shm_redframe = np.ndarray((image_heigth,image_width, number_of_colorchannels), dtype=np.uint8, \
buffer= shm_frame_init.buf)
image_bgr = shm_redframe
params_for_blob_detection = define_parameters_for_blob_detection()
detector = create_detector(params_for_blob_detection)
i=0
while True:
try:
framenumber = shm_framenumber[0]
if i==0:
last_processed_frame = framenumber
conditions_for_first_start = (i==0) and\
(shm_bools[0] == True) and \
(shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \
and (shm_bools[7] == True)
conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True)
# newframe and all color-channel-processings have to be finished
if conditions_for_first_start == True:
print("first start")
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
lane_detection(image_bgr, detector) # hier immer noch altes bild -->> Processing ist langsamer als neue bilder kommen..
i += 1
#print(f"first processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
shm_bools[4] = False # process ended
elif conditions_for_starting_processing == True:
shm_bools[4] = True # process started
shm_bools[7] = False # reset trigger
#print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}")
shm_bools[1] = False # set bool for p_red_finished to false
#t1 = time.perf_counter_ns()
cv.imshow("AAA", shm_redframe)
lane_detection(image_bgr, detector)
#print(f"max frame color red: {shm_redframe.max()}")
if show_opencv_window:
cv.waitKey(1)
#print(f"processing red finished. frame: {framenumber}")
shm_bools[1] = True # set bool for p_red_finished to true
#t2 = time.perf_counter_ns()
#print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms")
last_processed_frame = framenumber
shm_bools[4] = False # process ended
# elif shm_bools[0] == False:
# pass
#print(f"no new red frame")
# image processing finished
except KeyboardInterrupt:
try:
shm_bool_init.close()
shm_framenumber_init.close()
shm_redframe_init.close()
except FileNotFoundError:
# Memory already destroyed
pass
# ----------------------------------------------------------------------------
# main
def main():
start = time.perf_counter()
try:
# create processes
p_red = Process(target=processing_bgr)
processes = [p_red]
print(f"waiting 1 second to create processes")
time.sleep(1) # sind prozesse schon vorhanden
# start acitivity of processes
for process in processes:
process.start()
# start capturing
get_frames_from_picamera()
print('*******************************')
# this below code is only executed if the loop in take_image_picamera_opencv is breaked
# In real use case there will be no end of this program
for process in processes:
process.terminate()
# print time measuring
end = time.perf_counter()
print(f'Script finished in {round(end-start, 2)} s')
# close each SharedMemory instance and unlink to release the shared memory
shm_bools_create.close()
shm_bools_create.unlink()
shm_framenumber_create.close()
shm_framenumber_create.unlink()
shm_redframe_create.close()
shm_redframe_create.unlink()
shm_greenframe_create.close()
shm_greenframe_create.unlink()
shm_blueframe_create.close()
shm_blueframe_create.unlink()
except KeyboardInterrupt:
# Normally this prgoram never gets keyboard interrupted! But here this case is nevertheless handled
# End Script
try:
# close each SharedMemory instance and unlink to release the shared memory
shm_bools.close()
shm_bools.unlink()
shm_framenumber_create.close()
shm_framenumber_create.unlink()
shm_redframe_create.close()
shm_redframe_create.unlink()
shm_greenframe_create.close()
shm_greenframe_create.unlink()
shm_blueframe_create.close()
shm_blueframe_create.unlink()
except FileNotFoundError:
# Memory already destroyed
pass
if __name__ == "__main__":
main()

BIN
Installationsanleitung.docx Normal file

Binary file not shown.

BIN
Installationsanleitung.pdf Normal file

Binary file not shown.